Commit | Line | Data |
---|---|---|
01649561 JS |
1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | |
3 | * Fibre Channel Host Bus Adapters. * | |
0d041215 | 4 | * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * |
3e21d1cb | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
01649561 JS |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | |
d080abe0 | 8 | * www.broadcom.com * |
01649561 JS |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * | |
11 | * This program is free software; you can redistribute it and/or * | |
12 | * modify it under the terms of version 2 of the GNU General * | |
13 | * Public License as published by the Free Software Foundation. * | |
14 | * This program is distributed in the hope that it will be useful. * | |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
20 | * more details, a copy of which can be found in the file COPYING * | |
21 | * included with this package. * | |
22 | ********************************************************************/ | |
23 | #include <linux/pci.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/delay.h> | |
27 | #include <asm/unaligned.h> | |
28 | #include <linux/crc-t10dif.h> | |
29 | #include <net/checksum.h> | |
30 | ||
31 | #include <scsi/scsi.h> | |
32 | #include <scsi/scsi_device.h> | |
33 | #include <scsi/scsi_eh.h> | |
34 | #include <scsi/scsi_host.h> | |
35 | #include <scsi/scsi_tcq.h> | |
36 | #include <scsi/scsi_transport_fc.h> | |
37 | #include <scsi/fc/fc_fs.h> | |
38 | ||
39 | #include <linux/nvme.h> | |
40 | #include <linux/nvme-fc-driver.h> | |
41 | #include <linux/nvme-fc.h> | |
42 | #include "lpfc_version.h" | |
43 | #include "lpfc_hw4.h" | |
44 | #include "lpfc_hw.h" | |
45 | #include "lpfc_sli.h" | |
46 | #include "lpfc_sli4.h" | |
47 | #include "lpfc_nl.h" | |
48 | #include "lpfc_disc.h" | |
49 | #include "lpfc.h" | |
50 | #include "lpfc_nvme.h" | |
51 | #include "lpfc_scsi.h" | |
52 | #include "lpfc_logmsg.h" | |
53 | #include "lpfc_crtn.h" | |
54 | #include "lpfc_vport.h" | |
bd2cdd5e | 55 | #include "lpfc_debugfs.h" |
01649561 JS |
56 | |
57 | /* NVME initiator-based functions */ | |
58 | ||
c490850a | 59 | static struct lpfc_io_buf * |
cf1a1d3e | 60 | lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
5e5b511d | 61 | int idx, int expedite); |
01649561 JS |
62 | |
63 | static void | |
c490850a | 64 | lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); |
01649561 | 65 | |
81e6a637 | 66 | static struct nvme_fc_port_template lpfc_nvme_template; |
01649561 | 67 | |
fab2e466 CIK |
68 | static union lpfc_wqe128 lpfc_iread_cmd_template; |
69 | static union lpfc_wqe128 lpfc_iwrite_cmd_template; | |
70 | static union lpfc_wqe128 lpfc_icmnd_cmd_template; | |
5fd11085 JS |
71 | |
72 | /* Setup WQE templates for NVME IOs */ | |
73 | void | |
fab2e466 | 74 | lpfc_nvme_cmd_template(void) |
5fd11085 JS |
75 | { |
76 | union lpfc_wqe128 *wqe; | |
77 | ||
78 | /* IREAD template */ | |
79 | wqe = &lpfc_iread_cmd_template; | |
80 | memset(wqe, 0, sizeof(union lpfc_wqe128)); | |
81 | ||
82 | /* Word 0, 1, 2 - BDE is variable */ | |
83 | ||
84 | /* Word 3 - cmd_buff_len, payload_offset_len is zero */ | |
85 | ||
86 | /* Word 4 - total_xfer_len is variable */ | |
87 | ||
88 | /* Word 5 - is zero */ | |
89 | ||
90 | /* Word 6 - ctxt_tag, xri_tag is variable */ | |
91 | ||
92 | /* Word 7 */ | |
93 | bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); | |
94 | bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); | |
95 | bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); | |
96 | bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); | |
97 | ||
98 | /* Word 8 - abort_tag is variable */ | |
99 | ||
100 | /* Word 9 - reqtag is variable */ | |
101 | ||
102 | /* Word 10 - dbde, wqes is variable */ | |
103 | bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); | |
104 | bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1); | |
105 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); | |
106 | bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); | |
107 | bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); | |
108 | bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); | |
109 | ||
110 | /* Word 11 - pbde is variable */ | |
111 | bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD); | |
112 | bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); | |
113 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); | |
114 | ||
115 | /* Word 12 - is zero */ | |
116 | ||
117 | /* Word 13, 14, 15 - PBDE is variable */ | |
118 | ||
119 | /* IWRITE template */ | |
120 | wqe = &lpfc_iwrite_cmd_template; | |
121 | memset(wqe, 0, sizeof(union lpfc_wqe128)); | |
122 | ||
123 | /* Word 0, 1, 2 - BDE is variable */ | |
124 | ||
125 | /* Word 3 - cmd_buff_len, payload_offset_len is zero */ | |
126 | ||
127 | /* Word 4 - total_xfer_len is variable */ | |
128 | ||
129 | /* Word 5 - initial_xfer_len is variable */ | |
130 | ||
131 | /* Word 6 - ctxt_tag, xri_tag is variable */ | |
132 | ||
133 | /* Word 7 */ | |
134 | bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); | |
135 | bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); | |
136 | bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); | |
137 | bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); | |
138 | ||
139 | /* Word 8 - abort_tag is variable */ | |
140 | ||
141 | /* Word 9 - reqtag is variable */ | |
142 | ||
143 | /* Word 10 - dbde, wqes is variable */ | |
144 | bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); | |
145 | bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1); | |
146 | bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); | |
147 | bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); | |
148 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); | |
149 | bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); | |
150 | ||
151 | /* Word 11 - pbde is variable */ | |
152 | bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD); | |
153 | bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); | |
154 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); | |
155 | ||
156 | /* Word 12 - is zero */ | |
157 | ||
158 | /* Word 13, 14, 15 - PBDE is variable */ | |
159 | ||
160 | /* ICMND template */ | |
161 | wqe = &lpfc_icmnd_cmd_template; | |
162 | memset(wqe, 0, sizeof(union lpfc_wqe128)); | |
163 | ||
164 | /* Word 0, 1, 2 - BDE is variable */ | |
165 | ||
166 | /* Word 3 - payload_offset_len is variable */ | |
167 | ||
168 | /* Word 4, 5 - is zero */ | |
169 | ||
170 | /* Word 6 - ctxt_tag, xri_tag is variable */ | |
171 | ||
172 | /* Word 7 */ | |
173 | bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); | |
174 | bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); | |
175 | bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); | |
176 | bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); | |
177 | ||
178 | /* Word 8 - abort_tag is variable */ | |
179 | ||
180 | /* Word 9 - reqtag is variable */ | |
181 | ||
182 | /* Word 10 - dbde, wqes is variable */ | |
183 | bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); | |
184 | bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1); | |
185 | bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); | |
186 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); | |
187 | bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); | |
188 | bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); | |
189 | ||
190 | /* Word 11 */ | |
191 | bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND); | |
192 | bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); | |
193 | bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); | |
194 | ||
195 | /* Word 12, 13, 14, 15 - is zero */ | |
196 | } | |
197 | ||
01649561 JS |
198 | /** |
199 | * lpfc_nvme_create_queue - | |
200 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
201 | * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. | |
202 | * @handle: An opaque driver handle used in follow-up calls. | |
203 | * | |
204 | * Driver registers this routine to preallocate and initialize any | |
205 | * internal data structures to bind the @qidx to its internal IO queues. | |
206 | * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. | |
207 | * | |
208 | * Return value : | |
209 | * 0 - Success | |
210 | * -EINVAL - Unsupported input value. | |
211 | * -ENOMEM - Could not alloc necessary memory | |
212 | **/ | |
213 | static int | |
214 | lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, | |
215 | unsigned int qidx, u16 qsize, | |
216 | void **handle) | |
217 | { | |
218 | struct lpfc_nvme_lport *lport; | |
219 | struct lpfc_vport *vport; | |
220 | struct lpfc_nvme_qhandle *qhandle; | |
221 | char *str; | |
222 | ||
c3725bdc JS |
223 | if (!pnvme_lport->private) |
224 | return -ENOMEM; | |
225 | ||
01649561 JS |
226 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
227 | vport = lport->vport; | |
228 | qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); | |
229 | if (qhandle == NULL) | |
230 | return -ENOMEM; | |
231 | ||
d6d189ce | 232 | qhandle->cpu_id = raw_smp_processor_id(); |
01649561 JS |
233 | qhandle->qidx = qidx; |
234 | /* | |
235 | * NVME qidx == 0 is the admin queue, so both admin queue | |
236 | * and first IO queue will use MSI-X vector and associated | |
237 | * EQ/CQ/WQ at index 0. After that they are sequentially assigned. | |
238 | */ | |
239 | if (qidx) { | |
240 | str = "IO "; /* IO queue */ | |
241 | qhandle->index = ((qidx - 1) % | |
6a828b0f | 242 | lpfc_nvme_template.max_hw_queues); |
01649561 JS |
243 | } else { |
244 | str = "ADM"; /* Admin queue */ | |
245 | qhandle->index = qidx; | |
246 | } | |
247 | ||
d58734f0 | 248 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
01649561 | 249 | "6073 Binding %s HdwQueue %d (cpu %d) to " |
32350664 | 250 | "hdw_queue %d qhandle x%px\n", str, |
01649561 JS |
251 | qidx, qhandle->cpu_id, qhandle->index, qhandle); |
252 | *handle = (void *)qhandle; | |
253 | return 0; | |
254 | } | |
255 | ||
256 | /** | |
257 | * lpfc_nvme_delete_queue - | |
258 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
259 | * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. | |
260 | * @handle: An opaque driver handle from lpfc_nvme_create_queue | |
261 | * | |
262 | * Driver registers this routine to free | |
263 | * any internal data structures to bind the @qidx to its internal | |
264 | * IO queues. | |
265 | * | |
266 | * Return value : | |
267 | * 0 - Success | |
268 | * TODO: What are the failure codes. | |
269 | **/ | |
270 | static void | |
271 | lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, | |
272 | unsigned int qidx, | |
273 | void *handle) | |
274 | { | |
275 | struct lpfc_nvme_lport *lport; | |
276 | struct lpfc_vport *vport; | |
277 | ||
c3725bdc JS |
278 | if (!pnvme_lport->private) |
279 | return; | |
280 | ||
01649561 JS |
281 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
282 | vport = lport->vport; | |
283 | ||
284 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, | |
32350664 | 285 | "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", |
01649561 JS |
286 | lport, qidx, handle); |
287 | kfree(handle); | |
288 | } | |
289 | ||
290 | static void | |
291 | lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) | |
292 | { | |
293 | struct lpfc_nvme_lport *lport = localport->private; | |
294 | ||
add9d6be | 295 | lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, |
32350664 | 296 | "6173 localport x%px delete complete\n", |
add9d6be JS |
297 | lport); |
298 | ||
01649561 | 299 | /* release any threads waiting for the unreg to complete */ |
7961cba6 EM |
300 | if (lport->vport->localport) |
301 | complete(lport->lport_unreg_cmp); | |
01649561 JS |
302 | } |
303 | ||
304 | /* lpfc_nvme_remoteport_delete | |
305 | * | |
306 | * @remoteport: Pointer to an nvme transport remoteport instance. | |
307 | * | |
308 | * This is a template downcall. NVME transport calls this function | |
309 | * when it has completed the unregistration of a previously | |
310 | * registered remoteport. | |
311 | * | |
312 | * Return value : | |
313 | * None | |
314 | */ | |
3999df75 | 315 | static void |
01649561 JS |
316 | lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) |
317 | { | |
318 | struct lpfc_nvme_rport *rport = remoteport->private; | |
319 | struct lpfc_vport *vport; | |
320 | struct lpfc_nodelist *ndlp; | |
321 | ||
322 | ndlp = rport->ndlp; | |
323 | if (!ndlp) | |
324 | goto rport_err; | |
325 | ||
326 | vport = ndlp->vport; | |
327 | if (!vport) | |
328 | goto rport_err; | |
329 | ||
330 | /* Remove this rport from the lport's list - memory is owned by the | |
331 | * transport. Remove the ndlp reference for the NVME transport before | |
7a06dcd3 | 332 | * calling state machine to remove the node. |
01649561 JS |
333 | */ |
334 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
32350664 | 335 | "6146 remoteport delete of remoteport x%px\n", |
01649561 | 336 | remoteport); |
3fd78355 | 337 | spin_lock_irq(&vport->phba->hbalock); |
b15bd3e6 JS |
338 | |
339 | /* The register rebind might have occurred before the delete | |
340 | * downcall. Guard against this race. | |
341 | */ | |
342 | if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) { | |
343 | ndlp->nrport = NULL; | |
344 | ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; | |
345 | } | |
3fd78355 JS |
346 | spin_unlock_irq(&vport->phba->hbalock); |
347 | ||
348 | /* Remove original register reference. The host transport | |
349 | * won't reference this rport/remoteport any further. | |
350 | */ | |
01649561 JS |
351 | lpfc_nlp_put(ndlp); |
352 | ||
353 | rport_err: | |
3fd78355 | 354 | return; |
01649561 JS |
355 | } |
356 | ||
357 | static void | |
358 | lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |
359 | struct lpfc_wcqe_complete *wcqe) | |
360 | { | |
361 | struct lpfc_vport *vport = cmdwqe->vport; | |
4b056682 | 362 | struct lpfc_nvme_lport *lport; |
01649561 JS |
363 | uint32_t status; |
364 | struct nvmefc_ls_req *pnvme_lsreq; | |
365 | struct lpfc_dmabuf *buf_ptr; | |
366 | struct lpfc_nodelist *ndlp; | |
367 | ||
01649561 JS |
368 | pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; |
369 | status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; | |
66a210ff | 370 | |
66a85155 JS |
371 | if (vport->localport) { |
372 | lport = (struct lpfc_nvme_lport *)vport->localport->private; | |
373 | if (lport) { | |
374 | atomic_inc(&lport->fc4NvmeLsCmpls); | |
375 | if (status) { | |
376 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) | |
377 | atomic_inc(&lport->cmpl_ls_xb); | |
378 | atomic_inc(&lport->cmpl_ls_err); | |
379 | } | |
66a210ff | 380 | } |
4b056682 JS |
381 | } |
382 | ||
01649561 JS |
383 | ndlp = (struct lpfc_nodelist *)cmdwqe->context1; |
384 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
385 | "6047 nvme cmpl Enter " | |
32350664 JS |
386 | "Data %px DID %x Xri: %x status %x reason x%x " |
387 | "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n", | |
01649561 JS |
388 | pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
389 | cmdwqe->sli4_xritag, status, | |
815a9c43 | 390 | (wcqe->parameter & 0xffff), |
01649561 JS |
391 | cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); |
392 | ||
bd2cdd5e JS |
393 | lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n", |
394 | cmdwqe->sli4_xritag, status, wcqe->parameter); | |
395 | ||
01649561 JS |
396 | if (cmdwqe->context3) { |
397 | buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; | |
398 | lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); | |
399 | kfree(buf_ptr); | |
400 | cmdwqe->context3 = NULL; | |
401 | } | |
402 | if (pnvme_lsreq->done) | |
403 | pnvme_lsreq->done(pnvme_lsreq, status); | |
404 | else | |
405 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | |
406 | "6046 nvme cmpl without done call back? " | |
32350664 | 407 | "Data %px DID %x Xri: %x status %x\n", |
01649561 JS |
408 | pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, |
409 | cmdwqe->sli4_xritag, status); | |
410 | if (ndlp) { | |
411 | lpfc_nlp_put(ndlp); | |
412 | cmdwqe->context1 = NULL; | |
413 | } | |
414 | lpfc_sli_release_iocbq(phba, cmdwqe); | |
415 | } | |
416 | ||
417 | static int | |
418 | lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |
419 | struct lpfc_dmabuf *inp, | |
205e8240 JS |
420 | struct nvmefc_ls_req *pnvme_lsreq, |
421 | void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, | |
422 | struct lpfc_wcqe_complete *), | |
423 | struct lpfc_nodelist *ndlp, uint32_t num_entry, | |
424 | uint32_t tmo, uint8_t retry) | |
01649561 | 425 | { |
205e8240 JS |
426 | struct lpfc_hba *phba = vport->phba; |
427 | union lpfc_wqe128 *wqe; | |
01649561 JS |
428 | struct lpfc_iocbq *genwqe; |
429 | struct ulp_bde64 *bpl; | |
430 | struct ulp_bde64 bde; | |
431 | int i, rc, xmit_len, first_len; | |
432 | ||
433 | /* Allocate buffer for command WQE */ | |
434 | genwqe = lpfc_sli_get_iocbq(phba); | |
435 | if (genwqe == NULL) | |
436 | return 1; | |
437 | ||
438 | wqe = &genwqe->wqe; | |
d9f492a1 | 439 | /* Initialize only 64 bytes */ |
01649561 JS |
440 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
441 | ||
442 | genwqe->context3 = (uint8_t *)bmp; | |
443 | genwqe->iocb_flag |= LPFC_IO_NVME_LS; | |
444 | ||
445 | /* Save for completion so we can release these resources */ | |
446 | genwqe->context1 = lpfc_nlp_get(ndlp); | |
447 | genwqe->context2 = (uint8_t *)pnvme_lsreq; | |
448 | /* Fill in payload, bp points to frame payload */ | |
449 | ||
450 | if (!tmo) | |
451 | /* FC spec states we need 3 * ratov for CT requests */ | |
452 | tmo = (3 * phba->fc_ratov); | |
453 | ||
454 | /* For this command calculate the xmit length of the request bde. */ | |
455 | xmit_len = 0; | |
456 | first_len = 0; | |
457 | bpl = (struct ulp_bde64 *)bmp->virt; | |
458 | for (i = 0; i < num_entry; i++) { | |
459 | bde.tus.w = bpl[i].tus.w; | |
460 | if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) | |
461 | break; | |
462 | xmit_len += bde.tus.f.bdeSize; | |
463 | if (i == 0) | |
464 | first_len = xmit_len; | |
465 | } | |
466 | ||
467 | genwqe->rsvd2 = num_entry; | |
468 | genwqe->hba_wqidx = 0; | |
469 | ||
470 | /* Words 0 - 2 */ | |
471 | wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | |
472 | wqe->generic.bde.tus.f.bdeSize = first_len; | |
473 | wqe->generic.bde.addrLow = bpl[0].addrLow; | |
474 | wqe->generic.bde.addrHigh = bpl[0].addrHigh; | |
475 | ||
476 | /* Word 3 */ | |
477 | wqe->gen_req.request_payload_len = first_len; | |
478 | ||
479 | /* Word 4 */ | |
480 | ||
481 | /* Word 5 */ | |
482 | bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); | |
483 | bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); | |
484 | bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); | |
8b361639 | 485 | bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); |
01649561 JS |
486 | bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); |
487 | ||
488 | /* Word 6 */ | |
489 | bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, | |
490 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); | |
491 | bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); | |
492 | ||
493 | /* Word 7 */ | |
494 | bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1)); | |
495 | bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); | |
496 | bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); | |
497 | bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); | |
498 | ||
499 | /* Word 8 */ | |
500 | wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; | |
501 | ||
502 | /* Word 9 */ | |
503 | bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); | |
504 | ||
505 | /* Word 10 */ | |
506 | bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); | |
507 | bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); | |
508 | bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); | |
509 | bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); | |
510 | bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); | |
511 | ||
512 | /* Word 11 */ | |
513 | bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); | |
514 | bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); | |
515 | ||
516 | ||
517 | /* Issue GEN REQ WQE for NPORT <did> */ | |
518 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | |
519 | "6050 Issue GEN REQ WQE to NPORT x%x " | |
32350664 JS |
520 | "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px " |
521 | "xmit:%d 1st:%d\n", | |
01649561 JS |
522 | ndlp->nlp_DID, genwqe->iotag, |
523 | vport->port_state, | |
524 | genwqe, pnvme_lsreq, bmp, xmit_len, first_len); | |
525 | genwqe->wqe_cmpl = cmpl; | |
526 | genwqe->iocb_cmpl = NULL; | |
527 | genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; | |
528 | genwqe->vport = vport; | |
529 | genwqe->retry = retry; | |
530 | ||
bd2cdd5e JS |
531 | lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", |
532 | genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); | |
533 | ||
1fbf9742 | 534 | rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); |
cd22d605 | 535 | if (rc) { |
01649561 JS |
536 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
537 | "6045 Issue GEN REQ WQE to NPORT x%x " | |
538 | "Data: x%x x%x\n", | |
539 | ndlp->nlp_DID, genwqe->iotag, | |
540 | vport->port_state); | |
541 | lpfc_sli_release_iocbq(phba, genwqe); | |
542 | return 1; | |
543 | } | |
544 | return 0; | |
545 | } | |
546 | ||
547 | /** | |
548 | * lpfc_nvme_ls_req - Issue an Link Service request | |
549 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
550 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
551 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
552 | * | |
553 | * Driver registers this routine to handle any link service request | |
554 | * from the nvme_fc transport to a remote nvme-aware port. | |
555 | * | |
556 | * Return value : | |
557 | * 0 - Success | |
558 | * TODO: What are the failure codes. | |
559 | **/ | |
560 | static int | |
561 | lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, | |
562 | struct nvme_fc_remote_port *pnvme_rport, | |
563 | struct nvmefc_ls_req *pnvme_lsreq) | |
564 | { | |
565 | int ret = 0; | |
566 | struct lpfc_nvme_lport *lport; | |
815a9c43 | 567 | struct lpfc_nvme_rport *rport; |
01649561 JS |
568 | struct lpfc_vport *vport; |
569 | struct lpfc_nodelist *ndlp; | |
570 | struct ulp_bde64 *bpl; | |
571 | struct lpfc_dmabuf *bmp; | |
ba43c4d0 | 572 | uint16_t ntype, nstate; |
01649561 JS |
573 | |
574 | /* there are two dma buf in the request, actually there is one and | |
575 | * the second one is just the start address + cmd size. | |
576 | * Before calling lpfc_nvme_gen_req these buffers need to be wrapped | |
577 | * in a lpfc_dmabuf struct. When freeing we just free the wrapper | |
578 | * because the nvem layer owns the data bufs. | |
579 | * We do not have to break these packets open, we don't care what is in | |
580 | * them. And we do not have to look at the resonse data, we only care | |
581 | * that we got a response. All of the caring is going to happen in the | |
582 | * nvme-fc layer. | |
583 | */ | |
584 | ||
585 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; | |
815a9c43 | 586 | rport = (struct lpfc_nvme_rport *)pnvme_rport->private; |
66a210ff JS |
587 | if (unlikely(!lport) || unlikely(!rport)) |
588 | return -EINVAL; | |
589 | ||
01649561 JS |
590 | vport = lport->vport; |
591 | ||
3386f4bd JS |
592 | if (vport->load_flag & FC_UNLOADING) |
593 | return -ENODEV; | |
594 | ||
815a9c43 JS |
595 | /* Need the ndlp. It is stored in the driver's rport. */ |
596 | ndlp = rport->ndlp; | |
ba43c4d0 JS |
597 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
598 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, | |
32350664 | 599 | "6051 Remoteport x%px, rport has invalid ndlp. " |
815a9c43 | 600 | "Failing LS Req\n", pnvme_rport); |
ba43c4d0 JS |
601 | return -ENODEV; |
602 | } | |
603 | ||
604 | /* The remote node has to be a mapped nvme target or an | |
605 | * unmapped nvme initiator or it's an error. | |
606 | */ | |
607 | ntype = ndlp->nlp_type; | |
608 | nstate = ndlp->nlp_state; | |
609 | if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || | |
610 | (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { | |
611 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, | |
612 | "6088 DID x%06x not ready for " | |
613 | "IO. State x%x, Type x%x\n", | |
614 | pnvme_rport->port_id, | |
615 | ndlp->nlp_state, ndlp->nlp_type); | |
616 | return -ENODEV; | |
01649561 JS |
617 | } |
618 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | |
619 | if (!bmp) { | |
620 | ||
621 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | |
622 | "6044 Could not find node for DID %x\n", | |
623 | pnvme_rport->port_id); | |
624 | return 2; | |
625 | } | |
626 | INIT_LIST_HEAD(&bmp->list); | |
627 | bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); | |
628 | if (!bmp->virt) { | |
629 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | |
630 | "6042 Could not find node for DID %x\n", | |
631 | pnvme_rport->port_id); | |
632 | kfree(bmp); | |
633 | return 3; | |
634 | } | |
635 | bpl = (struct ulp_bde64 *)bmp->virt; | |
636 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); | |
637 | bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); | |
638 | bpl->tus.f.bdeFlags = 0; | |
639 | bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; | |
640 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | |
641 | bpl++; | |
642 | ||
643 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); | |
644 | bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); | |
645 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | |
646 | bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; | |
647 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | |
648 | ||
649 | /* Expand print to include key fields. */ | |
650 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
32350664 JS |
651 | "6149 Issue LS Req to DID 0x%06x lport x%px, " |
652 | "rport x%px lsreq x%px rqstlen:%d rsplen:%d " | |
653 | "%pad %pad\n", | |
654 | ndlp->nlp_DID, pnvme_lport, pnvme_rport, | |
01649561 | 655 | pnvme_lsreq, pnvme_lsreq->rqstlen, |
825c6abb AB |
656 | pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
657 | &pnvme_lsreq->rspdma); | |
01649561 | 658 | |
66a210ff | 659 | atomic_inc(&lport->fc4NvmeLsRequests); |
01649561 JS |
660 | |
661 | /* Hardcode the wait to 30 seconds. Connections are failing otherwise. | |
662 | * This code allows it all to work. | |
663 | */ | |
664 | ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, | |
665 | pnvme_lsreq, lpfc_nvme_cmpl_gen_req, | |
666 | ndlp, 2, 30, 0); | |
667 | if (ret != WQE_SUCCESS) { | |
4b056682 | 668 | atomic_inc(&lport->xmt_ls_err); |
815a9c43 | 669 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
32350664 JS |
670 | "6052 EXIT. issue ls wqe failed lport x%px, " |
671 | "rport x%px lsreq x%px Status %x DID %x\n", | |
01649561 JS |
672 | pnvme_lport, pnvme_rport, pnvme_lsreq, |
673 | ret, ndlp->nlp_DID); | |
674 | lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); | |
675 | kfree(bmp); | |
676 | return ret; | |
677 | } | |
678 | ||
679 | /* Stub in routine and return 0 for now. */ | |
680 | return ret; | |
681 | } | |
682 | ||
683 | /** | |
684 | * lpfc_nvme_ls_abort - Issue an Link Service request | |
685 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
686 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
687 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
688 | * | |
689 | * Driver registers this routine to handle any link service request | |
690 | * from the nvme_fc transport to a remote nvme-aware port. | |
691 | * | |
692 | * Return value : | |
693 | * 0 - Success | |
694 | * TODO: What are the failure codes. | |
695 | **/ | |
696 | static void | |
697 | lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, | |
698 | struct nvme_fc_remote_port *pnvme_rport, | |
699 | struct nvmefc_ls_req *pnvme_lsreq) | |
700 | { | |
701 | struct lpfc_nvme_lport *lport; | |
702 | struct lpfc_vport *vport; | |
703 | struct lpfc_hba *phba; | |
704 | struct lpfc_nodelist *ndlp; | |
705 | LIST_HEAD(abort_list); | |
706 | struct lpfc_sli_ring *pring; | |
707 | struct lpfc_iocbq *wqe, *next_wqe; | |
708 | ||
709 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; | |
66a210ff JS |
710 | if (unlikely(!lport)) |
711 | return; | |
01649561 JS |
712 | vport = lport->vport; |
713 | phba = vport->phba; | |
714 | ||
3386f4bd JS |
715 | if (vport->load_flag & FC_UNLOADING) |
716 | return; | |
717 | ||
01649561 JS |
718 | ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); |
719 | if (!ndlp) { | |
720 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, | |
721 | "6049 Could not find node for DID %x\n", | |
722 | pnvme_rport->port_id); | |
723 | return; | |
724 | } | |
725 | ||
726 | /* Expand print to include key fields. */ | |
727 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, | |
32350664 | 728 | "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d " |
825c6abb | 729 | "rsplen:%d %pad %pad\n", |
01649561 JS |
730 | pnvme_lport, pnvme_rport, |
731 | pnvme_lsreq, pnvme_lsreq->rqstlen, | |
825c6abb AB |
732 | pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, |
733 | &pnvme_lsreq->rspdma); | |
01649561 JS |
734 | |
735 | /* | |
736 | * Lock the ELS ring txcmplq and build a local list of all ELS IOs | |
737 | * that need an ABTS. The IOs need to stay on the txcmplq so that | |
738 | * the abort operation completes them successfully. | |
739 | */ | |
740 | pring = phba->sli4_hba.nvmels_wq->pring; | |
741 | spin_lock_irq(&phba->hbalock); | |
742 | spin_lock(&pring->ring_lock); | |
743 | list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { | |
744 | /* Add to abort_list on on NDLP match. */ | |
745 | if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) { | |
746 | wqe->iocb_flag |= LPFC_DRIVER_ABORTED; | |
747 | list_add_tail(&wqe->dlist, &abort_list); | |
748 | } | |
749 | } | |
750 | spin_unlock(&pring->ring_lock); | |
751 | spin_unlock_irq(&phba->hbalock); | |
752 | ||
753 | /* Abort the targeted IOs and remove them from the abort list. */ | |
754 | list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { | |
4b056682 | 755 | atomic_inc(&lport->xmt_ls_abort); |
01649561 JS |
756 | spin_lock_irq(&phba->hbalock); |
757 | list_del_init(&wqe->dlist); | |
758 | lpfc_sli_issue_abort_iotag(phba, pring, wqe); | |
759 | spin_unlock_irq(&phba->hbalock); | |
760 | } | |
761 | } | |
762 | ||
763 | /* Fix up the existing sgls for NVME IO. */ | |
5fd11085 | 764 | static inline void |
01649561 | 765 | lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, |
c490850a | 766 | struct lpfc_io_buf *lpfc_ncmd, |
01649561 JS |
767 | struct nvmefc_fcp_req *nCmd) |
768 | { | |
4e565cf0 | 769 | struct lpfc_hba *phba = vport->phba; |
01649561 JS |
770 | struct sli4_sge *sgl; |
771 | union lpfc_wqe128 *wqe; | |
772 | uint32_t *wptr, *dptr; | |
773 | ||
4e565cf0 JS |
774 | /* |
775 | * Get a local pointer to the built-in wqe and correct | |
776 | * the cmd size to match NVME's 96 bytes and fix | |
777 | * the dma address. | |
778 | */ | |
779 | ||
205e8240 | 780 | wqe = &lpfc_ncmd->cur_iocbq.wqe; |
4e565cf0 | 781 | |
01649561 JS |
782 | /* |
783 | * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to | |
784 | * match NVME. NVME sends 96 bytes. Also, use the | |
785 | * nvme commands command and response dma addresses | |
786 | * rather than the virtual memory to ease the restore | |
787 | * operation. | |
788 | */ | |
0794d601 | 789 | sgl = lpfc_ncmd->dma_sgl; |
01649561 | 790 | sgl->sge_len = cpu_to_le32(nCmd->cmdlen); |
4e565cf0 JS |
791 | if (phba->cfg_nvme_embed_cmd) { |
792 | sgl->addr_hi = 0; | |
793 | sgl->addr_lo = 0; | |
794 | ||
795 | /* Word 0-2 - NVME CMND IU (embedded payload) */ | |
796 | wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; | |
797 | wqe->generic.bde.tus.f.bdeSize = 56; | |
798 | wqe->generic.bde.addrHigh = 0; | |
799 | wqe->generic.bde.addrLow = 64; /* Word 16 */ | |
5fd11085 JS |
800 | |
801 | /* Word 10 - dbde is 0, wqes is 1 in template */ | |
802 | ||
803 | /* | |
804 | * Embed the payload in the last half of the WQE | |
805 | * WQE words 16-30 get the NVME CMD IU payload | |
806 | * | |
807 | * WQE words 16-19 get payload Words 1-4 | |
808 | * WQE words 20-21 get payload Words 6-7 | |
809 | * WQE words 22-29 get payload Words 16-23 | |
810 | */ | |
811 | wptr = &wqe->words[16]; /* WQE ptr */ | |
812 | dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ | |
813 | dptr++; /* Skip Word 0 in payload */ | |
814 | ||
815 | *wptr++ = *dptr++; /* Word 1 */ | |
816 | *wptr++ = *dptr++; /* Word 2 */ | |
817 | *wptr++ = *dptr++; /* Word 3 */ | |
818 | *wptr++ = *dptr++; /* Word 4 */ | |
819 | dptr++; /* Skip Word 5 in payload */ | |
820 | *wptr++ = *dptr++; /* Word 6 */ | |
821 | *wptr++ = *dptr++; /* Word 7 */ | |
822 | dptr += 8; /* Skip Words 8-15 in payload */ | |
823 | *wptr++ = *dptr++; /* Word 16 */ | |
824 | *wptr++ = *dptr++; /* Word 17 */ | |
825 | *wptr++ = *dptr++; /* Word 18 */ | |
826 | *wptr++ = *dptr++; /* Word 19 */ | |
827 | *wptr++ = *dptr++; /* Word 20 */ | |
828 | *wptr++ = *dptr++; /* Word 21 */ | |
829 | *wptr++ = *dptr++; /* Word 22 */ | |
830 | *wptr = *dptr; /* Word 23 */ | |
4e565cf0 JS |
831 | } else { |
832 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); | |
833 | sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); | |
834 | ||
835 | /* Word 0-2 - NVME CMND IU Inline BDE */ | |
836 | wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | |
837 | wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; | |
838 | wqe->generic.bde.addrHigh = sgl->addr_hi; | |
839 | wqe->generic.bde.addrLow = sgl->addr_lo; | |
5fd11085 JS |
840 | |
841 | /* Word 10 */ | |
842 | bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); | |
843 | bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); | |
4e565cf0 | 844 | } |
01649561 JS |
845 | |
846 | sgl++; | |
847 | ||
848 | /* Setup the physical region for the FCP RSP */ | |
849 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); | |
850 | sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); | |
851 | sgl->word2 = le32_to_cpu(sgl->word2); | |
852 | if (nCmd->sg_cnt) | |
853 | bf_set(lpfc_sli4_sge_last, sgl, 0); | |
854 | else | |
855 | bf_set(lpfc_sli4_sge_last, sgl, 1); | |
856 | sgl->word2 = cpu_to_le32(sgl->word2); | |
857 | sgl->sge_len = cpu_to_le32(nCmd->rsplen); | |
01649561 JS |
858 | } |
859 | ||
bd2cdd5e JS |
860 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
861 | static void | |
862 | lpfc_nvme_ktime(struct lpfc_hba *phba, | |
c490850a | 863 | struct lpfc_io_buf *lpfc_ncmd) |
bd2cdd5e JS |
864 | { |
865 | uint64_t seg1, seg2, seg3, seg4; | |
c8a4ce0b | 866 | uint64_t segsum; |
bd2cdd5e | 867 | |
bd2cdd5e JS |
868 | if (!lpfc_ncmd->ts_last_cmd || |
869 | !lpfc_ncmd->ts_cmd_start || | |
870 | !lpfc_ncmd->ts_cmd_wqput || | |
871 | !lpfc_ncmd->ts_isr_cmpl || | |
872 | !lpfc_ncmd->ts_data_nvme) | |
873 | return; | |
c8a4ce0b DK |
874 | |
875 | if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start) | |
876 | return; | |
bd2cdd5e JS |
877 | if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) |
878 | return; | |
879 | if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) | |
880 | return; | |
881 | if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) | |
882 | return; | |
883 | if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) | |
884 | return; | |
885 | /* | |
886 | * Segment 1 - Time from Last FCP command cmpl is handed | |
887 | * off to NVME Layer to start of next command. | |
888 | * Segment 2 - Time from Driver receives a IO cmd start | |
889 | * from NVME Layer to WQ put is done on IO cmd. | |
890 | * Segment 3 - Time from Driver WQ put is done on IO cmd | |
891 | * to MSI-X ISR for IO cmpl. | |
892 | * Segment 4 - Time from MSI-X ISR for IO cmpl to when | |
893 | * cmpl is handled off to the NVME Layer. | |
894 | */ | |
895 | seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; | |
c8a4ce0b DK |
896 | if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ |
897 | seg1 = 0; | |
bd2cdd5e JS |
898 | |
899 | /* Calculate times relative to start of IO */ | |
900 | seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); | |
c8a4ce0b DK |
901 | segsum = seg2; |
902 | seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start; | |
903 | if (segsum > seg3) | |
904 | return; | |
905 | seg3 -= segsum; | |
906 | segsum += seg3; | |
907 | ||
908 | seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start; | |
909 | if (segsum > seg4) | |
910 | return; | |
911 | seg4 -= segsum; | |
912 | ||
bd2cdd5e JS |
913 | phba->ktime_data_samples++; |
914 | phba->ktime_seg1_total += seg1; | |
915 | if (seg1 < phba->ktime_seg1_min) | |
916 | phba->ktime_seg1_min = seg1; | |
917 | else if (seg1 > phba->ktime_seg1_max) | |
918 | phba->ktime_seg1_max = seg1; | |
919 | phba->ktime_seg2_total += seg2; | |
920 | if (seg2 < phba->ktime_seg2_min) | |
921 | phba->ktime_seg2_min = seg2; | |
922 | else if (seg2 > phba->ktime_seg2_max) | |
923 | phba->ktime_seg2_max = seg2; | |
924 | phba->ktime_seg3_total += seg3; | |
925 | if (seg3 < phba->ktime_seg3_min) | |
926 | phba->ktime_seg3_min = seg3; | |
927 | else if (seg3 > phba->ktime_seg3_max) | |
928 | phba->ktime_seg3_max = seg3; | |
929 | phba->ktime_seg4_total += seg4; | |
930 | if (seg4 < phba->ktime_seg4_min) | |
931 | phba->ktime_seg4_min = seg4; | |
932 | else if (seg4 > phba->ktime_seg4_max) | |
933 | phba->ktime_seg4_max = seg4; | |
934 | ||
935 | lpfc_ncmd->ts_last_cmd = 0; | |
936 | lpfc_ncmd->ts_cmd_start = 0; | |
937 | lpfc_ncmd->ts_cmd_wqput = 0; | |
938 | lpfc_ncmd->ts_isr_cmpl = 0; | |
939 | lpfc_ncmd->ts_data_nvme = 0; | |
940 | } | |
941 | #endif | |
942 | ||
01649561 JS |
943 | /** |
944 | * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO | |
945 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
946 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
947 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
948 | * | |
949 | * Driver registers this routine as it io request handler. This | |
950 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq | |
951 | * data structure to the rport indicated in @lpfc_nvme_rport. | |
952 | * | |
953 | * Return value : | |
954 | * 0 - Success | |
955 | * TODO: What are the failure codes. | |
956 | **/ | |
957 | static void | |
958 | lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, | |
959 | struct lpfc_wcqe_complete *wcqe) | |
960 | { | |
c490850a JS |
961 | struct lpfc_io_buf *lpfc_ncmd = |
962 | (struct lpfc_io_buf *)pwqeIn->context1; | |
01649561 JS |
963 | struct lpfc_vport *vport = pwqeIn->vport; |
964 | struct nvmefc_fcp_req *nCmd; | |
965 | struct nvme_fc_ersp_iu *ep; | |
966 | struct nvme_fc_cmd_iu *cp; | |
01649561 | 967 | struct lpfc_nodelist *ndlp; |
bbe3012b | 968 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
4b056682 | 969 | struct lpfc_nvme_lport *lport; |
352b205a | 970 | uint32_t code, status, idx; |
01649561 JS |
971 | uint16_t cid, sqhd, data; |
972 | uint32_t *ptr; | |
973 | ||
974 | /* Sanity check on return of outstanding command */ | |
c2017260 JS |
975 | if (!lpfc_ncmd) { |
976 | lpfc_printf_vlog(vport, KERN_ERR, | |
977 | LOG_NODE | LOG_NVME_IOERR, | |
978 | "6071 Null lpfc_ncmd pointer. No " | |
979 | "release, skip completion\n"); | |
980 | return; | |
981 | } | |
4d5e789a | 982 | |
c2017260 JS |
983 | /* Guard against abort handler being called at same time */ |
984 | spin_lock(&lpfc_ncmd->buf_lock); | |
4d5e789a | 985 | |
c2017260 JS |
986 | if (!lpfc_ncmd->nvmeCmd) { |
987 | spin_unlock(&lpfc_ncmd->buf_lock); | |
01649561 | 988 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
32350664 JS |
989 | "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " |
990 | "nvmeCmd x%px\n", | |
0b05e9fe | 991 | lpfc_ncmd, lpfc_ncmd->nvmeCmd); |
4d5e789a JS |
992 | |
993 | /* Release the lpfc_ncmd regardless of the missing elements. */ | |
994 | lpfc_release_nvme_buf(phba, lpfc_ncmd); | |
01649561 JS |
995 | return; |
996 | } | |
01649561 | 997 | nCmd = lpfc_ncmd->nvmeCmd; |
4b056682 | 998 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
66a210ff | 999 | |
4c47efc1 JS |
1000 | idx = lpfc_ncmd->cur_iocbq.hba_wqidx; |
1001 | phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; | |
1002 | ||
1df09449 | 1003 | if (unlikely(status && vport->localport)) { |
66a85155 | 1004 | lport = (struct lpfc_nvme_lport *)vport->localport->private; |
1df09449 | 1005 | if (lport) { |
4c47efc1 JS |
1006 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) |
1007 | atomic_inc(&lport->cmpl_fcp_xb); | |
1008 | atomic_inc(&lport->cmpl_fcp_err); | |
66a210ff | 1009 | } |
4b056682 | 1010 | } |
01649561 | 1011 | |
bd2cdd5e JS |
1012 | lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", |
1013 | lpfc_ncmd->cur_iocbq.sli4_xritag, | |
4b056682 | 1014 | status, wcqe->parameter); |
01649561 JS |
1015 | /* |
1016 | * Catch race where our node has transitioned, but the | |
1017 | * transport is still transitioning. | |
1018 | */ | |
0b05e9fe | 1019 | ndlp = lpfc_ncmd->ndlp; |
01649561 | 1020 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
0b05e9fe JS |
1021 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
1022 | "6062 Ignoring NVME cmpl. No ndlp\n"); | |
1023 | goto out_err; | |
01649561 JS |
1024 | } |
1025 | ||
1026 | code = bf_get(lpfc_wcqe_c_code, wcqe); | |
1027 | if (code == CQE_CODE_NVME_ERSP) { | |
1028 | /* For this type of CQE, we need to rebuild the rsp */ | |
1029 | ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; | |
1030 | ||
1031 | /* | |
1032 | * Get Command Id from cmd to plug into response. This | |
1033 | * code is not needed in the next NVME Transport drop. | |
1034 | */ | |
1035 | cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; | |
1036 | cid = cp->sqe.common.command_id; | |
1037 | ||
1038 | /* | |
1039 | * RSN is in CQE word 2 | |
1040 | * SQHD is in CQE Word 3 bits 15:0 | |
1041 | * Cmd Specific info is in CQE Word 1 | |
1042 | * and in CQE Word 0 bits 15:0 | |
1043 | */ | |
1044 | sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); | |
1045 | ||
1046 | /* Now lets build the NVME ERSP IU */ | |
1047 | ep->iu_len = cpu_to_be16(8); | |
1048 | ep->rsn = wcqe->parameter; | |
1049 | ep->xfrd_len = cpu_to_be32(nCmd->payload_length); | |
1050 | ep->rsvd12 = 0; | |
1051 | ptr = (uint32_t *)&ep->cqe.result.u64; | |
1052 | *ptr++ = wcqe->total_data_placed; | |
1053 | data = bf_get(lpfc_wcqe_c_ersp0, wcqe); | |
1054 | *ptr = (uint32_t)data; | |
1055 | ep->cqe.sq_head = sqhd; | |
1056 | ep->cqe.sq_id = nCmd->sqid; | |
1057 | ep->cqe.command_id = cid; | |
1058 | ep->cqe.status = 0; | |
1059 | ||
1060 | lpfc_ncmd->status = IOSTAT_SUCCESS; | |
1061 | lpfc_ncmd->result = 0; | |
1062 | nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; | |
1063 | nCmd->transferred_length = nCmd->payload_length; | |
1064 | } else { | |
4b056682 | 1065 | lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK); |
952c303b | 1066 | lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); |
01649561 JS |
1067 | |
1068 | /* For NVME, the only failure path that results in an | |
1069 | * IO error is when the adapter rejects it. All other | |
1070 | * conditions are a success case and resolved by the | |
1071 | * transport. | |
1072 | * IOSTAT_FCP_RSP_ERROR means: | |
1073 | * 1. Length of data received doesn't match total | |
1074 | * transfer length in WQE | |
1075 | * 2. If the RSP payload does NOT match these cases: | |
1076 | * a. RSP length 12/24 bytes and all zeros | |
1077 | * b. NVME ERSP | |
1078 | */ | |
1079 | switch (lpfc_ncmd->status) { | |
1080 | case IOSTAT_SUCCESS: | |
1081 | nCmd->transferred_length = wcqe->total_data_placed; | |
1082 | nCmd->rcv_rsplen = 0; | |
1083 | nCmd->status = 0; | |
1084 | break; | |
1085 | case IOSTAT_FCP_RSP_ERROR: | |
1086 | nCmd->transferred_length = wcqe->total_data_placed; | |
1087 | nCmd->rcv_rsplen = wcqe->parameter; | |
1088 | nCmd->status = 0; | |
1089 | /* Sanity check */ | |
1090 | if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) | |
1091 | break; | |
1092 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, | |
1093 | "6081 NVME Completion Protocol Error: " | |
86c67379 JS |
1094 | "xri %x status x%x result x%x " |
1095 | "placed x%x\n", | |
1096 | lpfc_ncmd->cur_iocbq.sli4_xritag, | |
01649561 JS |
1097 | lpfc_ncmd->status, lpfc_ncmd->result, |
1098 | wcqe->total_data_placed); | |
1099 | break; | |
952c303b DK |
1100 | case IOSTAT_LOCAL_REJECT: |
1101 | /* Let fall through to set command final state. */ | |
1102 | if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) | |
1103 | lpfc_printf_vlog(vport, KERN_INFO, | |
1104 | LOG_NVME_IOERR, | |
32350664 JS |
1105 | "6032 Delay Aborted cmd x%px " |
1106 | "nvme cmd x%px, xri x%x, " | |
952c303b DK |
1107 | "xb %d\n", |
1108 | lpfc_ncmd, nCmd, | |
1109 | lpfc_ncmd->cur_iocbq.sli4_xritag, | |
1110 | bf_get(lpfc_wcqe_c_xb, wcqe)); | |
cd05c155 | 1111 | /* fall through */ |
01649561 JS |
1112 | default: |
1113 | out_err: | |
e3246a12 | 1114 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
86c67379 | 1115 | "6072 NVME Completion Error: xri %x " |
2c013a3a JS |
1116 | "status x%x result x%x [x%x] " |
1117 | "placed x%x\n", | |
86c67379 | 1118 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
01649561 | 1119 | lpfc_ncmd->status, lpfc_ncmd->result, |
2c013a3a | 1120 | wcqe->parameter, |
01649561 JS |
1121 | wcqe->total_data_placed); |
1122 | nCmd->transferred_length = 0; | |
1123 | nCmd->rcv_rsplen = 0; | |
8e009ce8 | 1124 | nCmd->status = NVME_SC_INTERNAL; |
01649561 JS |
1125 | } |
1126 | } | |
1127 | ||
1128 | /* pick up SLI4 exhange busy condition */ | |
1129 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) | |
1130 | lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; | |
1131 | else | |
1132 | lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; | |
1133 | ||
01649561 JS |
1134 | /* Update stats and complete the IO. There is |
1135 | * no need for dma unprep because the nvme_transport | |
1136 | * owns the dma address. | |
1137 | */ | |
bd2cdd5e | 1138 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
c8a4ce0b | 1139 | if (lpfc_ncmd->ts_cmd_start) { |
bd2cdd5e JS |
1140 | lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; |
1141 | lpfc_ncmd->ts_data_nvme = ktime_get_ns(); | |
1142 | phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; | |
1143 | lpfc_nvme_ktime(phba, lpfc_ncmd); | |
1144 | } | |
1df09449 | 1145 | if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) { |
352b205a | 1146 | uint32_t cpu; |
63df6d63 | 1147 | idx = lpfc_ncmd->cur_iocbq.hba_wqidx; |
d6d189ce | 1148 | cpu = raw_smp_processor_id(); |
63df6d63 JS |
1149 | if (cpu < LPFC_CHECK_CPU_CNT) { |
1150 | if (lpfc_ncmd->cpu != cpu) | |
1151 | lpfc_printf_vlog(vport, | |
1152 | KERN_INFO, LOG_NVME_IOERR, | |
1153 | "6701 CPU Check cmpl: " | |
1154 | "cpu %d expect %d\n", | |
1155 | cpu, lpfc_ncmd->cpu); | |
1156 | phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; | |
1157 | } | |
bd2cdd5e JS |
1158 | } |
1159 | #endif | |
952c303b DK |
1160 | |
1161 | /* NVME targets need completion held off until the abort exchange | |
add9d6be | 1162 | * completes unless the NVME Rport is getting unregistered. |
952c303b | 1163 | */ |
add9d6be | 1164 | |
3fd78355 | 1165 | if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { |
91455b85 JS |
1166 | freqpriv = nCmd->private; |
1167 | freqpriv->nvme_buf = NULL; | |
3fd78355 | 1168 | lpfc_ncmd->nvmeCmd = NULL; |
c2017260 JS |
1169 | spin_unlock(&lpfc_ncmd->buf_lock); |
1170 | nCmd->done(nCmd); | |
1171 | } else | |
1172 | spin_unlock(&lpfc_ncmd->buf_lock); | |
01649561 | 1173 | |
952c303b | 1174 | /* Call release with XB=1 to queue the IO into the abort list. */ |
01649561 JS |
1175 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
1176 | } | |
1177 | ||
1178 | ||
1179 | /** | |
1180 | * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO | |
1181 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
1182 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
1183 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
1184 | * @lpfc_nvme_fcreq: IO request from nvme fc to driver. | |
1185 | * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue | |
1186 | * | |
1187 | * Driver registers this routine as it io request handler. This | |
1188 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq | |
1189 | * data structure to the rport indicated in @lpfc_nvme_rport. | |
1190 | * | |
1191 | * Return value : | |
1192 | * 0 - Success | |
1193 | * TODO: What are the failure codes. | |
1194 | **/ | |
1195 | static int | |
1196 | lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, | |
c490850a | 1197 | struct lpfc_io_buf *lpfc_ncmd, |
66a210ff | 1198 | struct lpfc_nodelist *pnode, |
4c47efc1 | 1199 | struct lpfc_fc4_ctrl_stat *cstat) |
01649561 JS |
1200 | { |
1201 | struct lpfc_hba *phba = vport->phba; | |
1202 | struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; | |
1203 | struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); | |
205e8240 | 1204 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
01649561 JS |
1205 | uint32_t req_len; |
1206 | ||
e960f5ab | 1207 | if (!NLP_CHK_NODE_ACT(pnode)) |
01649561 JS |
1208 | return -EINVAL; |
1209 | ||
1210 | /* | |
1211 | * There are three possibilities here - use scatter-gather segment, use | |
1212 | * the single mapping, or neither. | |
1213 | */ | |
01649561 JS |
1214 | if (nCmd->sg_cnt) { |
1215 | if (nCmd->io_dir == NVMEFC_FCP_WRITE) { | |
5fd11085 JS |
1216 | /* From the iwrite template, initialize words 7 - 11 */ |
1217 | memcpy(&wqe->words[7], | |
1218 | &lpfc_iwrite_cmd_template.words[7], | |
1219 | sizeof(uint32_t) * 5); | |
1220 | ||
1221 | /* Word 4 */ | |
1222 | wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; | |
1223 | ||
01649561 JS |
1224 | /* Word 5 */ |
1225 | if ((phba->cfg_nvme_enable_fb) && | |
1226 | (pnode->nlp_flag & NLP_FIRSTBURST)) { | |
1227 | req_len = lpfc_ncmd->nvmeCmd->payload_length; | |
1228 | if (req_len < pnode->nvme_fb_size) | |
1229 | wqe->fcp_iwrite.initial_xfer_len = | |
1230 | req_len; | |
1231 | else | |
1232 | wqe->fcp_iwrite.initial_xfer_len = | |
1233 | pnode->nvme_fb_size; | |
5fd11085 JS |
1234 | } else { |
1235 | wqe->fcp_iwrite.initial_xfer_len = 0; | |
01649561 | 1236 | } |
4c47efc1 | 1237 | cstat->output_requests++; |
01649561 | 1238 | } else { |
5fd11085 JS |
1239 | /* From the iread template, initialize words 7 - 11 */ |
1240 | memcpy(&wqe->words[7], | |
1241 | &lpfc_iread_cmd_template.words[7], | |
1242 | sizeof(uint32_t) * 5); | |
1243 | ||
1244 | /* Word 4 */ | |
1245 | wqe->fcp_iread.total_xfer_len = nCmd->payload_length; | |
1246 | ||
1247 | /* Word 5 */ | |
1248 | wqe->fcp_iread.rsrvd5 = 0; | |
01649561 | 1249 | |
4c47efc1 | 1250 | cstat->input_requests++; |
01649561 JS |
1251 | } |
1252 | } else { | |
5fd11085 JS |
1253 | /* From the icmnd template, initialize words 4 - 11 */ |
1254 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], | |
1255 | sizeof(uint32_t) * 8); | |
4c47efc1 | 1256 | cstat->control_requests++; |
01649561 | 1257 | } |
0d8af096 JS |
1258 | |
1259 | if (pnode->nlp_nvme_info & NLP_NVME_NSLER) | |
1260 | bf_set(wqe_erp, &wqe->generic.wqe_com, 1); | |
01649561 JS |
1261 | /* |
1262 | * Finish initializing those WQE fields that are independent | |
1263 | * of the nvme_cmnd request_buffer | |
1264 | */ | |
1265 | ||
5fd11085 JS |
1266 | /* Word 3 */ |
1267 | bf_set(payload_offset_len, &wqe->fcp_icmd, | |
1268 | (nCmd->rsplen + nCmd->cmdlen)); | |
1269 | ||
01649561 JS |
1270 | /* Word 6 */ |
1271 | bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, | |
1272 | phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); | |
1273 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); | |
1274 | ||
01649561 JS |
1275 | /* Word 8 */ |
1276 | wqe->generic.wqe_com.abort_tag = pwqeq->iotag; | |
1277 | ||
1278 | /* Word 9 */ | |
1279 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); | |
1280 | ||
414abe0a JS |
1281 | /* Words 13 14 15 are for PBDE support */ |
1282 | ||
01649561 JS |
1283 | pwqeq->vport = vport; |
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | ||
1288 | /** | |
1289 | * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO | |
1290 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
1291 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
1292 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
1293 | * @lpfc_nvme_fcreq: IO request from nvme fc to driver. | |
1294 | * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue | |
1295 | * | |
1296 | * Driver registers this routine as it io request handler. This | |
1297 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq | |
1298 | * data structure to the rport indicated in @lpfc_nvme_rport. | |
1299 | * | |
1300 | * Return value : | |
1301 | * 0 - Success | |
1302 | * TODO: What are the failure codes. | |
1303 | **/ | |
1304 | static int | |
1305 | lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, | |
c490850a | 1306 | struct lpfc_io_buf *lpfc_ncmd) |
01649561 JS |
1307 | { |
1308 | struct lpfc_hba *phba = vport->phba; | |
1309 | struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; | |
205e8240 | 1310 | union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; |
0794d601 | 1311 | struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; |
d79c9e9d | 1312 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
01649561 JS |
1313 | struct scatterlist *data_sg; |
1314 | struct sli4_sge *first_data_sgl; | |
0bc2b7c5 | 1315 | struct ulp_bde64 *bde; |
d79c9e9d | 1316 | dma_addr_t physaddr = 0; |
01649561 | 1317 | uint32_t num_bde = 0; |
d79c9e9d | 1318 | uint32_t dma_len = 0; |
01649561 | 1319 | uint32_t dma_offset = 0; |
d79c9e9d JS |
1320 | int nseg, i, j; |
1321 | bool lsp_just_set = false; | |
01649561 JS |
1322 | |
1323 | /* Fix up the command and response DMA stuff. */ | |
1324 | lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); | |
1325 | ||
1326 | /* | |
1327 | * There are three possibilities here - use scatter-gather segment, use | |
1328 | * the single mapping, or neither. | |
1329 | */ | |
1330 | if (nCmd->sg_cnt) { | |
1331 | /* | |
1332 | * Jump over the cmd and rsp SGEs. The fix routine | |
1333 | * has already adjusted for this. | |
1334 | */ | |
1335 | sgl += 2; | |
1336 | ||
1337 | first_data_sgl = sgl; | |
1338 | lpfc_ncmd->seg_cnt = nCmd->sg_cnt; | |
81e6a637 | 1339 | if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { |
01649561 JS |
1340 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1341 | "6058 Too many sg segments from " | |
1342 | "NVME Transport. Max %d, " | |
1343 | "nvmeIO sg_cnt %d\n", | |
29bfd55a | 1344 | phba->cfg_nvme_seg_cnt + 1, |
01649561 JS |
1345 | lpfc_ncmd->seg_cnt); |
1346 | lpfc_ncmd->seg_cnt = 0; | |
1347 | return 1; | |
1348 | } | |
1349 | ||
1350 | /* | |
1351 | * The driver established a maximum scatter-gather segment count | |
1352 | * during probe that limits the number of sg elements in any | |
1353 | * single nvme command. Just run through the seg_cnt and format | |
1354 | * the sge's. | |
1355 | */ | |
1356 | nseg = nCmd->sg_cnt; | |
1357 | data_sg = nCmd->first_sgl; | |
d79c9e9d JS |
1358 | |
1359 | /* for tracking the segment boundaries */ | |
1360 | j = 2; | |
01649561 JS |
1361 | for (i = 0; i < nseg; i++) { |
1362 | if (data_sg == NULL) { | |
1363 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | |
1364 | "6059 dptr err %d, nseg %d\n", | |
1365 | i, nseg); | |
1366 | lpfc_ncmd->seg_cnt = 0; | |
1367 | return 1; | |
1368 | } | |
d79c9e9d JS |
1369 | |
1370 | sgl->word2 = 0; | |
1371 | if ((num_bde + 1) == nseg) { | |
01649561 | 1372 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
d79c9e9d JS |
1373 | bf_set(lpfc_sli4_sge_type, sgl, |
1374 | LPFC_SGE_TYPE_DATA); | |
1375 | } else { | |
01649561 | 1376 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
d79c9e9d JS |
1377 | |
1378 | /* expand the segment */ | |
1379 | if (!lsp_just_set && | |
1380 | !((j + 1) % phba->border_sge_num) && | |
1381 | ((nseg - 1) != i)) { | |
1382 | /* set LSP type */ | |
1383 | bf_set(lpfc_sli4_sge_type, sgl, | |
1384 | LPFC_SGE_TYPE_LSP); | |
1385 | ||
1386 | sgl_xtra = lpfc_get_sgl_per_hdwq( | |
1387 | phba, lpfc_ncmd); | |
1388 | ||
1389 | if (unlikely(!sgl_xtra)) { | |
1390 | lpfc_ncmd->seg_cnt = 0; | |
1391 | return 1; | |
1392 | } | |
1393 | sgl->addr_lo = cpu_to_le32(putPaddrLow( | |
1394 | sgl_xtra->dma_phys_sgl)); | |
1395 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( | |
1396 | sgl_xtra->dma_phys_sgl)); | |
1397 | ||
1398 | } else { | |
1399 | bf_set(lpfc_sli4_sge_type, sgl, | |
1400 | LPFC_SGE_TYPE_DATA); | |
1401 | } | |
1402 | } | |
1403 | ||
1404 | if (!(bf_get(lpfc_sli4_sge_type, sgl) & | |
1405 | LPFC_SGE_TYPE_LSP)) { | |
1406 | if ((nseg - 1) == i) | |
1407 | bf_set(lpfc_sli4_sge_last, sgl, 1); | |
1408 | ||
1409 | physaddr = data_sg->dma_address; | |
1410 | dma_len = data_sg->length; | |
1411 | sgl->addr_lo = cpu_to_le32( | |
1412 | putPaddrLow(physaddr)); | |
1413 | sgl->addr_hi = cpu_to_le32( | |
1414 | putPaddrHigh(physaddr)); | |
1415 | ||
1416 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); | |
1417 | sgl->word2 = cpu_to_le32(sgl->word2); | |
1418 | sgl->sge_len = cpu_to_le32(dma_len); | |
1419 | ||
1420 | dma_offset += dma_len; | |
1421 | data_sg = sg_next(data_sg); | |
1422 | ||
1423 | sgl++; | |
1424 | ||
1425 | lsp_just_set = false; | |
1426 | } else { | |
1427 | sgl->word2 = cpu_to_le32(sgl->word2); | |
1428 | ||
1429 | sgl->sge_len = cpu_to_le32( | |
1430 | phba->cfg_sg_dma_buf_size); | |
1431 | ||
1432 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; | |
1433 | i = i - 1; | |
1434 | ||
1435 | lsp_just_set = true; | |
1436 | } | |
1437 | ||
1438 | j++; | |
01649561 | 1439 | } |
414abe0a | 1440 | if (phba->cfg_enable_pbde) { |
0bc2b7c5 JS |
1441 | /* Use PBDE support for first SGL only, offset == 0 */ |
1442 | /* Words 13-15 */ | |
1443 | bde = (struct ulp_bde64 *) | |
1444 | &wqe->words[13]; | |
1445 | bde->addrLow = first_data_sgl->addr_lo; | |
1446 | bde->addrHigh = first_data_sgl->addr_hi; | |
1447 | bde->tus.f.bdeSize = | |
1448 | le32_to_cpu(first_data_sgl->sge_len); | |
1449 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | |
1450 | bde->tus.w = cpu_to_le32(bde->tus.w); | |
5fd11085 JS |
1451 | /* wqe_pbde is 1 in template */ |
1452 | } else { | |
1453 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); | |
0bc2b7c5 | 1454 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); |
5fd11085 | 1455 | } |
0bc2b7c5 | 1456 | |
414abe0a | 1457 | } else { |
0794d601 JS |
1458 | lpfc_ncmd->seg_cnt = 0; |
1459 | ||
01649561 JS |
1460 | /* For this clause to be valid, the payload_length |
1461 | * and sg_cnt must zero. | |
1462 | */ | |
1463 | if (nCmd->payload_length != 0) { | |
1464 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | |
1465 | "6063 NVME DMA Prep Err: sg_cnt %d " | |
1466 | "payload_length x%x\n", | |
1467 | nCmd->sg_cnt, nCmd->payload_length); | |
1468 | return 1; | |
1469 | } | |
1470 | } | |
01649561 JS |
1471 | return 0; |
1472 | } | |
1473 | ||
1474 | /** | |
1475 | * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO | |
1476 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
1477 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
1478 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
1479 | * @lpfc_nvme_fcreq: IO request from nvme fc to driver. | |
1480 | * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue | |
1481 | * | |
1482 | * Driver registers this routine as it io request handler. This | |
1483 | * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq | |
1484 | * data structure to the rport | |
1485 | indicated in @lpfc_nvme_rport. | |
1486 | * | |
1487 | * Return value : | |
1488 | * 0 - Success | |
1489 | * TODO: What are the failure codes. | |
1490 | **/ | |
1491 | static int | |
1492 | lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |
1493 | struct nvme_fc_remote_port *pnvme_rport, | |
1494 | void *hw_queue_handle, | |
1495 | struct nvmefc_fcp_req *pnvme_fcreq) | |
1496 | { | |
1497 | int ret = 0; | |
cf1a1d3e | 1498 | int expedite = 0; |
63df6d63 | 1499 | int idx, cpu; |
01649561 | 1500 | struct lpfc_nvme_lport *lport; |
4c47efc1 | 1501 | struct lpfc_fc4_ctrl_stat *cstat; |
01649561 JS |
1502 | struct lpfc_vport *vport; |
1503 | struct lpfc_hba *phba; | |
1504 | struct lpfc_nodelist *ndlp; | |
c490850a | 1505 | struct lpfc_io_buf *lpfc_ncmd; |
01649561 JS |
1506 | struct lpfc_nvme_rport *rport; |
1507 | struct lpfc_nvme_qhandle *lpfc_queue_info; | |
c3725bdc | 1508 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
cf1a1d3e | 1509 | struct nvme_common_command *sqe; |
bd2cdd5e JS |
1510 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1511 | uint64_t start = 0; | |
1512 | #endif | |
01649561 | 1513 | |
c3725bdc JS |
1514 | /* Validate pointers. LLDD fault handling with transport does |
1515 | * have timing races. | |
1516 | */ | |
01649561 | 1517 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
c3725bdc JS |
1518 | if (unlikely(!lport)) { |
1519 | ret = -EINVAL; | |
1520 | goto out_fail; | |
1521 | } | |
1522 | ||
01649561 | 1523 | vport = lport->vport; |
c3725bdc JS |
1524 | |
1525 | if (unlikely(!hw_queue_handle)) { | |
44c2757b JS |
1526 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1527 | "6117 Fail IO, NULL hw_queue_handle\n"); | |
1528 | atomic_inc(&lport->xmt_fcp_err); | |
cd240071 | 1529 | ret = -EBUSY; |
c3725bdc JS |
1530 | goto out_fail; |
1531 | } | |
1532 | ||
01649561 JS |
1533 | phba = vport->phba; |
1534 | ||
1abcb371 DK |
1535 | if (vport->load_flag & FC_UNLOADING) { |
1536 | ret = -ENODEV; | |
1537 | goto out_fail; | |
1538 | } | |
1539 | ||
1df09449 | 1540 | if (unlikely(vport->load_flag & FC_UNLOADING)) { |
44c2757b JS |
1541 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1542 | "6124 Fail IO, Driver unload\n"); | |
1543 | atomic_inc(&lport->xmt_fcp_err); | |
3386f4bd JS |
1544 | ret = -ENODEV; |
1545 | goto out_fail; | |
1546 | } | |
1547 | ||
c3725bdc JS |
1548 | freqpriv = pnvme_fcreq->private; |
1549 | if (unlikely(!freqpriv)) { | |
44c2757b JS |
1550 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1551 | "6158 Fail IO, NULL request data\n"); | |
1552 | atomic_inc(&lport->xmt_fcp_err); | |
c3725bdc | 1553 | ret = -EINVAL; |
b7672ae6 DK |
1554 | goto out_fail; |
1555 | } | |
1556 | ||
bd2cdd5e JS |
1557 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1558 | if (phba->ktime_on) | |
1559 | start = ktime_get_ns(); | |
1560 | #endif | |
01649561 JS |
1561 | rport = (struct lpfc_nvme_rport *)pnvme_rport->private; |
1562 | lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; | |
1563 | ||
1564 | /* | |
1565 | * Catch race where our node has transitioned, but the | |
1566 | * transport is still transitioning. | |
1567 | */ | |
1568 | ndlp = rport->ndlp; | |
1569 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { | |
44c2757b | 1570 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, |
32350664 JS |
1571 | "6053 Busy IO, ndlp not ready: rport x%px " |
1572 | "ndlp x%px, DID x%06x\n", | |
01649561 | 1573 | rport, ndlp, pnvme_rport->port_id); |
44c2757b JS |
1574 | atomic_inc(&lport->xmt_fcp_err); |
1575 | ret = -EBUSY; | |
1576 | goto out_fail; | |
01649561 JS |
1577 | } |
1578 | ||
1579 | /* The remote node has to be a mapped target or it's an error. */ | |
1580 | if ((ndlp->nlp_type & NLP_NVME_TARGET) && | |
1581 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { | |
44c2757b JS |
1582 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, |
1583 | "6036 Fail IO, DID x%06x not ready for " | |
cd240071 | 1584 | "IO. State x%x, Type x%x Flg x%x\n", |
44c2757b | 1585 | pnvme_rport->port_id, |
cd240071 JS |
1586 | ndlp->nlp_state, ndlp->nlp_type, |
1587 | ndlp->upcall_flags); | |
4b056682 | 1588 | atomic_inc(&lport->xmt_fcp_bad_ndlp); |
cd240071 | 1589 | ret = -EBUSY; |
01649561 JS |
1590 | goto out_fail; |
1591 | ||
1592 | } | |
1593 | ||
cf1a1d3e JS |
1594 | /* Currently only NVME Keep alive commands should be expedited |
1595 | * if the driver runs out of a resource. These should only be | |
1596 | * issued on the admin queue, qidx 0 | |
1597 | */ | |
1598 | if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { | |
1599 | sqe = &((struct nvme_fc_cmd_iu *) | |
1600 | pnvme_fcreq->cmdaddr)->sqe.common; | |
1601 | if (sqe->opcode == nvme_admin_keep_alive) | |
1602 | expedite = 1; | |
1603 | } | |
1604 | ||
01649561 JS |
1605 | /* The node is shared with FCP IO, make sure the IO pending count does |
1606 | * not exceed the programmed depth. | |
1607 | */ | |
2a5b7d62 JS |
1608 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
1609 | if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && | |
1610 | !expedite) { | |
1611 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, | |
1612 | "6174 Fail IO, ndlp qdepth exceeded: " | |
1613 | "idx %d DID %x pend %d qdepth %d\n", | |
1614 | lpfc_queue_info->index, ndlp->nlp_DID, | |
1615 | atomic_read(&ndlp->cmd_pending), | |
1616 | ndlp->cmd_qdepth); | |
1617 | atomic_inc(&lport->xmt_fcp_qdepth); | |
1618 | ret = -EBUSY; | |
1619 | goto out_fail; | |
1620 | } | |
01649561 JS |
1621 | } |
1622 | ||
6a828b0f | 1623 | /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ |
45aa312e JS |
1624 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { |
1625 | idx = lpfc_queue_info->index; | |
1626 | } else { | |
d6d189ce | 1627 | cpu = raw_smp_processor_id(); |
6a828b0f | 1628 | idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
45aa312e JS |
1629 | } |
1630 | ||
1631 | lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); | |
01649561 | 1632 | if (lpfc_ncmd == NULL) { |
4b056682 | 1633 | atomic_inc(&lport->xmt_fcp_noxri); |
01649561 | 1634 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
44c2757b JS |
1635 | "6065 Fail IO, driver buffer pool is empty: " |
1636 | "idx %d DID %x\n", | |
1637 | lpfc_queue_info->index, ndlp->nlp_DID); | |
cd22d605 | 1638 | ret = -EBUSY; |
01649561 JS |
1639 | goto out_fail; |
1640 | } | |
bd2cdd5e | 1641 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
c8a4ce0b | 1642 | if (start) { |
bd2cdd5e JS |
1643 | lpfc_ncmd->ts_cmd_start = start; |
1644 | lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; | |
c8a4ce0b DK |
1645 | } else { |
1646 | lpfc_ncmd->ts_cmd_start = 0; | |
bd2cdd5e JS |
1647 | } |
1648 | #endif | |
01649561 JS |
1649 | |
1650 | /* | |
1651 | * Store the data needed by the driver to issue, abort, and complete | |
1652 | * an IO. | |
1653 | * Do not let the IO hang out forever. There is no midlayer issuing | |
1654 | * an abort so inform the FW of the maximum IO pending time. | |
1655 | */ | |
bbe3012b | 1656 | freqpriv->nvme_buf = lpfc_ncmd; |
01649561 | 1657 | lpfc_ncmd->nvmeCmd = pnvme_fcreq; |
318083ad | 1658 | lpfc_ncmd->ndlp = ndlp; |
0794d601 | 1659 | lpfc_ncmd->qidx = lpfc_queue_info->qidx; |
01649561 | 1660 | |
01649561 JS |
1661 | /* |
1662 | * Issue the IO on the WQ indicated by index in the hw_queue_handle. | |
1663 | * This identfier was create in our hardware queue create callback | |
1664 | * routine. The driver now is dependent on the IO queue steering from | |
1665 | * the transport. We are trusting the upper NVME layers know which | |
1666 | * index to use and that they have affinitized a CPU to this hardware | |
1667 | * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. | |
1668 | */ | |
66a210ff | 1669 | lpfc_ncmd->cur_iocbq.hba_wqidx = idx; |
4c47efc1 | 1670 | cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; |
66a210ff JS |
1671 | |
1672 | lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); | |
1673 | ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); | |
1674 | if (ret) { | |
44c2757b JS |
1675 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1676 | "6175 Fail IO, Prep DMA: " | |
1677 | "idx %d DID %x\n", | |
1678 | lpfc_queue_info->index, ndlp->nlp_DID); | |
1679 | atomic_inc(&lport->xmt_fcp_err); | |
66a210ff JS |
1680 | ret = -ENOMEM; |
1681 | goto out_free_nvme_buf; | |
1682 | } | |
1683 | ||
bd2cdd5e JS |
1684 | lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", |
1685 | lpfc_ncmd->cur_iocbq.sli4_xritag, | |
1686 | lpfc_queue_info->index, ndlp->nlp_DID); | |
1687 | ||
1fbf9742 | 1688 | ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); |
01649561 | 1689 | if (ret) { |
4b056682 | 1690 | atomic_inc(&lport->xmt_fcp_wqerr); |
e3246a12 | 1691 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
44c2757b | 1692 | "6113 Fail IO, Could not issue WQE err %x " |
01649561 JS |
1693 | "sid: x%x did: x%x oxid: x%x\n", |
1694 | ret, vport->fc_myDID, ndlp->nlp_DID, | |
1695 | lpfc_ncmd->cur_iocbq.sli4_xritag); | |
01649561 JS |
1696 | goto out_free_nvme_buf; |
1697 | } | |
1698 | ||
c490850a JS |
1699 | if (phba->cfg_xri_rebalancing) |
1700 | lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); | |
1701 | ||
bd2cdd5e | 1702 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
c8a4ce0b | 1703 | if (lpfc_ncmd->ts_cmd_start) |
bd2cdd5e JS |
1704 | lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); |
1705 | ||
1706 | if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { | |
d6d189ce | 1707 | cpu = raw_smp_processor_id(); |
63df6d63 JS |
1708 | if (cpu < LPFC_CHECK_CPU_CNT) { |
1709 | lpfc_ncmd->cpu = cpu; | |
1710 | if (idx != cpu) | |
bd2cdd5e | 1711 | lpfc_printf_vlog(vport, |
63df6d63 | 1712 | KERN_INFO, LOG_NVME_IOERR, |
bd2cdd5e JS |
1713 | "6702 CPU Check cmd: " |
1714 | "cpu %d wq %d\n", | |
1715 | lpfc_ncmd->cpu, | |
1716 | lpfc_queue_info->index); | |
63df6d63 | 1717 | phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++; |
bd2cdd5e | 1718 | } |
bd2cdd5e JS |
1719 | } |
1720 | #endif | |
01649561 JS |
1721 | return 0; |
1722 | ||
1723 | out_free_nvme_buf: | |
2cee7808 JS |
1724 | if (lpfc_ncmd->nvmeCmd->sg_cnt) { |
1725 | if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) | |
4c47efc1 | 1726 | cstat->output_requests--; |
2cee7808 | 1727 | else |
4c47efc1 | 1728 | cstat->input_requests--; |
2cee7808 | 1729 | } else |
4c47efc1 | 1730 | cstat->control_requests--; |
01649561 JS |
1731 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
1732 | out_fail: | |
1733 | return ret; | |
1734 | } | |
1735 | ||
1736 | /** | |
1737 | * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. | |
1738 | * @phba: Pointer to HBA context object | |
1739 | * @cmdiocb: Pointer to command iocb object. | |
1740 | * @rspiocb: Pointer to response iocb object. | |
1741 | * | |
1742 | * This is the callback function for any NVME FCP IO that was aborted. | |
1743 | * | |
1744 | * Return value: | |
1745 | * None | |
1746 | **/ | |
1747 | void | |
1748 | lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |
1749 | struct lpfc_wcqe_complete *abts_cmpl) | |
1750 | { | |
e3246a12 | 1751 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME, |
01649561 JS |
1752 | "6145 ABORT_XRI_CN completing on rpi x%x " |
1753 | "original iotag x%x, abort cmd iotag x%x " | |
1754 | "req_tag x%x, status x%x, hwstatus x%x\n", | |
1755 | cmdiocb->iocb.un.acxri.abortContextTag, | |
1756 | cmdiocb->iocb.un.acxri.abortIoTag, | |
1757 | cmdiocb->iotag, | |
1758 | bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), | |
1759 | bf_get(lpfc_wcqe_c_status, abts_cmpl), | |
1760 | bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); | |
1761 | lpfc_sli_release_iocbq(phba, cmdiocb); | |
1762 | } | |
1763 | ||
1764 | /** | |
1765 | * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS | |
1766 | * @lpfc_pnvme: Pointer to the driver's nvme instance data | |
1767 | * @lpfc_nvme_lport: Pointer to the driver's local port data | |
1768 | * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq | |
1769 | * @lpfc_nvme_fcreq: IO request from nvme fc to driver. | |
1770 | * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue | |
1771 | * | |
1772 | * Driver registers this routine as its nvme request io abort handler. This | |
1773 | * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq | |
1774 | * data structure to the rport indicated in @lpfc_nvme_rport. This routine | |
1775 | * is executed asynchronously - one the target is validated as "MAPPED" and | |
1776 | * ready for IO, the driver issues the abort request and returns. | |
1777 | * | |
1778 | * Return value: | |
1779 | * None | |
1780 | **/ | |
1781 | static void | |
1782 | lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |
1783 | struct nvme_fc_remote_port *pnvme_rport, | |
1784 | void *hw_queue_handle, | |
1785 | struct nvmefc_fcp_req *pnvme_fcreq) | |
1786 | { | |
1787 | struct lpfc_nvme_lport *lport; | |
1788 | struct lpfc_vport *vport; | |
1789 | struct lpfc_hba *phba; | |
c490850a | 1790 | struct lpfc_io_buf *lpfc_nbuf; |
01649561 JS |
1791 | struct lpfc_iocbq *abts_buf; |
1792 | struct lpfc_iocbq *nvmereq_wqe; | |
c3725bdc | 1793 | struct lpfc_nvme_fcpreq_priv *freqpriv; |
205e8240 | 1794 | union lpfc_wqe128 *abts_wqe; |
01649561 JS |
1795 | unsigned long flags; |
1796 | int ret_val; | |
1797 | ||
c3725bdc JS |
1798 | /* Validate pointers. LLDD fault handling with transport does |
1799 | * have timing races. | |
1800 | */ | |
01649561 | 1801 | lport = (struct lpfc_nvme_lport *)pnvme_lport->private; |
c3725bdc JS |
1802 | if (unlikely(!lport)) |
1803 | return; | |
1804 | ||
01649561 | 1805 | vport = lport->vport; |
c3725bdc JS |
1806 | |
1807 | if (unlikely(!hw_queue_handle)) { | |
1808 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, | |
1809 | "6129 Fail Abort, HW Queue Handle NULL.\n"); | |
1810 | return; | |
1811 | } | |
1812 | ||
01649561 | 1813 | phba = vport->phba; |
c3725bdc | 1814 | freqpriv = pnvme_fcreq->private; |
01649561 | 1815 | |
c3725bdc JS |
1816 | if (unlikely(!freqpriv)) |
1817 | return; | |
3386f4bd JS |
1818 | if (vport->load_flag & FC_UNLOADING) |
1819 | return; | |
1820 | ||
01649561 | 1821 | /* Announce entry to new IO submit field. */ |
86c67379 | 1822 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
01649561 | 1823 | "6002 Abort Request to rport DID x%06x " |
32350664 | 1824 | "for nvme_fc_req x%px\n", |
01649561 JS |
1825 | pnvme_rport->port_id, |
1826 | pnvme_fcreq); | |
1827 | ||
01649561 JS |
1828 | /* If the hba is getting reset, this flag is set. It is |
1829 | * cleared when the reset is complete and rings reestablished. | |
1830 | */ | |
1831 | spin_lock_irqsave(&phba->hbalock, flags); | |
1832 | /* driver queued commands are in process of being flushed */ | |
c00f62e6 | 1833 | if (phba->hba_flag & HBA_IOQ_FLUSH) { |
01649561 | 1834 | spin_unlock_irqrestore(&phba->hbalock, flags); |
86c67379 | 1835 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 JS |
1836 | "6139 Driver in reset cleanup - flushing " |
1837 | "NVME Req now. hba_flag x%x\n", | |
1838 | phba->hba_flag); | |
1839 | return; | |
1840 | } | |
1841 | ||
bbe3012b | 1842 | lpfc_nbuf = freqpriv->nvme_buf; |
01649561 JS |
1843 | if (!lpfc_nbuf) { |
1844 | spin_unlock_irqrestore(&phba->hbalock, flags); | |
86c67379 | 1845 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 JS |
1846 | "6140 NVME IO req has no matching lpfc nvme " |
1847 | "io buffer. Skipping abort req.\n"); | |
1848 | return; | |
1849 | } else if (!lpfc_nbuf->nvmeCmd) { | |
1850 | spin_unlock_irqrestore(&phba->hbalock, flags); | |
86c67379 | 1851 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 JS |
1852 | "6141 lpfc NVME IO req has no nvme_fcreq " |
1853 | "io buffer. Skipping abort req.\n"); | |
1854 | return; | |
1855 | } | |
2b7824d0 | 1856 | nvmereq_wqe = &lpfc_nbuf->cur_iocbq; |
01649561 | 1857 | |
c2017260 JS |
1858 | /* Guard against IO completion being called at same time */ |
1859 | spin_lock(&lpfc_nbuf->buf_lock); | |
1860 | ||
01649561 JS |
1861 | /* |
1862 | * The lpfc_nbuf and the mapped nvme_fcreq in the driver's | |
1863 | * state must match the nvme_fcreq passed by the nvme | |
1864 | * transport. If they don't match, it is likely the driver | |
1865 | * has already completed the NVME IO and the nvme transport | |
1866 | * has not seen it yet. | |
1867 | */ | |
1868 | if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { | |
86c67379 | 1869 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 | 1870 | "6143 NVME req mismatch: " |
32350664 JS |
1871 | "lpfc_nbuf x%px nvmeCmd x%px, " |
1872 | "pnvme_fcreq x%px. Skipping Abort xri x%x\n", | |
01649561 | 1873 | lpfc_nbuf, lpfc_nbuf->nvmeCmd, |
2b7824d0 | 1874 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
c2017260 | 1875 | goto out_unlock; |
01649561 JS |
1876 | } |
1877 | ||
1878 | /* Don't abort IOs no longer on the pending queue. */ | |
01649561 | 1879 | if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
86c67379 | 1880 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
32350664 | 1881 | "6142 NVME IO req x%px not queued - skipping " |
2b7824d0 JS |
1882 | "abort req xri x%x\n", |
1883 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); | |
c2017260 | 1884 | goto out_unlock; |
01649561 JS |
1885 | } |
1886 | ||
4b056682 | 1887 | atomic_inc(&lport->xmt_fcp_abort); |
bd2cdd5e JS |
1888 | lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", |
1889 | nvmereq_wqe->sli4_xritag, | |
00cefeb9 | 1890 | nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); |
bd2cdd5e | 1891 | |
01649561 JS |
1892 | /* Outstanding abort is in progress */ |
1893 | if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { | |
86c67379 | 1894 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 | 1895 | "6144 Outstanding NVME I/O Abort Request " |
32350664 JS |
1896 | "still pending on nvme_fcreq x%px, " |
1897 | "lpfc_ncmd %px xri x%x\n", | |
2b7824d0 JS |
1898 | pnvme_fcreq, lpfc_nbuf, |
1899 | nvmereq_wqe->sli4_xritag); | |
c2017260 | 1900 | goto out_unlock; |
01649561 JS |
1901 | } |
1902 | ||
1903 | abts_buf = __lpfc_sli_get_iocbq(phba); | |
1904 | if (!abts_buf) { | |
86c67379 | 1905 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 | 1906 | "6136 No available abort wqes. Skipping " |
32350664 | 1907 | "Abts req for nvme_fcreq x%px xri x%x\n", |
2b7824d0 | 1908 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
c2017260 | 1909 | goto out_unlock; |
01649561 JS |
1910 | } |
1911 | ||
1912 | /* Ready - mark outstanding as aborted by driver. */ | |
1913 | nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; | |
1914 | ||
1915 | /* Complete prepping the abort wqe and issue to the FW. */ | |
1916 | abts_wqe = &abts_buf->wqe; | |
1917 | ||
1918 | /* WQEs are reused. Clear stale data and set key fields to | |
1919 | * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. | |
1920 | */ | |
d9f492a1 | 1921 | memset(abts_wqe, 0, sizeof(*abts_wqe)); |
01649561 JS |
1922 | bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); |
1923 | ||
1924 | /* word 7 */ | |
01649561 JS |
1925 | bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
1926 | bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, | |
1927 | nvmereq_wqe->iocb.ulpClass); | |
1928 | ||
1929 | /* word 8 - tell the FW to abort the IO associated with this | |
1930 | * outstanding exchange ID. | |
1931 | */ | |
1932 | abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; | |
1933 | ||
1934 | /* word 9 - this is the iotag for the abts_wqe completion. */ | |
1935 | bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, | |
1936 | abts_buf->iotag); | |
1937 | ||
1938 | /* word 10 */ | |
01649561 JS |
1939 | bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); |
1940 | bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); | |
1941 | ||
1942 | /* word 11 */ | |
1943 | bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); | |
1944 | bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); | |
1945 | bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); | |
1946 | ||
1947 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | |
1948 | abts_buf->iocb_flag |= LPFC_IO_NVME; | |
1949 | abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; | |
1950 | abts_buf->vport = vport; | |
1951 | abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; | |
1fbf9742 | 1952 | ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf); |
c2017260 | 1953 | spin_unlock(&lpfc_nbuf->buf_lock); |
01649561 | 1954 | spin_unlock_irqrestore(&phba->hbalock, flags); |
cd22d605 | 1955 | if (ret_val) { |
86c67379 | 1956 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
01649561 | 1957 | "6137 Failed abts issue_wqe with status x%x " |
32350664 | 1958 | "for nvme_fcreq x%px.\n", |
01649561 JS |
1959 | ret_val, pnvme_fcreq); |
1960 | lpfc_sli_release_iocbq(phba, abts_buf); | |
1961 | return; | |
1962 | } | |
1963 | ||
86c67379 | 1964 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
2b7824d0 | 1965 | "6138 Transport Abort NVME Request Issued for " |
01649561 JS |
1966 | "ox_id x%x on reqtag x%x\n", |
1967 | nvmereq_wqe->sli4_xritag, | |
1968 | abts_buf->iotag); | |
c2017260 JS |
1969 | return; |
1970 | ||
1971 | out_unlock: | |
1972 | spin_unlock(&lpfc_nbuf->buf_lock); | |
1973 | spin_unlock_irqrestore(&phba->hbalock, flags); | |
1974 | return; | |
01649561 JS |
1975 | } |
1976 | ||
1977 | /* Declare and initialization an instance of the FC NVME template. */ | |
1978 | static struct nvme_fc_port_template lpfc_nvme_template = { | |
863fbae9 JS |
1979 | .module = THIS_MODULE, |
1980 | ||
01649561 JS |
1981 | /* initiator-based functions */ |
1982 | .localport_delete = lpfc_nvme_localport_delete, | |
1983 | .remoteport_delete = lpfc_nvme_remoteport_delete, | |
1984 | .create_queue = lpfc_nvme_create_queue, | |
1985 | .delete_queue = lpfc_nvme_delete_queue, | |
1986 | .ls_req = lpfc_nvme_ls_req, | |
1987 | .fcp_io = lpfc_nvme_fcp_io_submit, | |
1988 | .ls_abort = lpfc_nvme_ls_abort, | |
1989 | .fcp_abort = lpfc_nvme_fcp_abort, | |
1990 | ||
1991 | .max_hw_queues = 1, | |
1992 | .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, | |
1993 | .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, | |
1994 | .dma_boundary = 0xFFFFFFFF, | |
1995 | ||
1996 | /* Sizes of additional private data for data structures. | |
1997 | * No use for the last two sizes at this time. | |
1998 | */ | |
1999 | .local_priv_sz = sizeof(struct lpfc_nvme_lport), | |
2000 | .remote_priv_sz = sizeof(struct lpfc_nvme_rport), | |
2001 | .lsrqst_priv_sz = 0, | |
bbe3012b | 2002 | .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), |
01649561 JS |
2003 | }; |
2004 | ||
2005 | /** | |
5e5b511d | 2006 | * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA |
01649561 | 2007 | * @phba: The HBA for which this call is being executed. |
01649561 | 2008 | * |
5e5b511d | 2009 | * This routine removes a nvme buffer from head of @hdwq io_buf_list |
01649561 | 2010 | * and returns to caller. |
01649561 JS |
2011 | * |
2012 | * Return codes: | |
01649561 JS |
2013 | * NULL - Error |
2014 | * Pointer to lpfc_nvme_buf - Success | |
01649561 | 2015 | **/ |
c490850a | 2016 | static struct lpfc_io_buf * |
cf1a1d3e | 2017 | lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
5e5b511d | 2018 | int idx, int expedite) |
01649561 | 2019 | { |
c490850a | 2020 | struct lpfc_io_buf *lpfc_ncmd; |
5e5b511d | 2021 | struct lpfc_sli4_hdw_queue *qp; |
0794d601 | 2022 | struct sli4_sge *sgl; |
01649561 JS |
2023 | struct lpfc_iocbq *pwqeq; |
2024 | union lpfc_wqe128 *wqe; | |
01649561 | 2025 | |
c490850a | 2026 | lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); |
2a5b7d62 | 2027 | |
0794d601 | 2028 | if (lpfc_ncmd) { |
01649561 | 2029 | pwqeq = &(lpfc_ncmd->cur_iocbq); |
205e8240 | 2030 | wqe = &pwqeq->wqe; |
01649561 | 2031 | |
0794d601 JS |
2032 | /* Setup key fields in buffer that may have been changed |
2033 | * if other protocols used this buffer. | |
2034 | */ | |
2035 | pwqeq->iocb_flag = LPFC_IO_NVME; | |
01649561 | 2036 | pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; |
0794d601 JS |
2037 | lpfc_ncmd->start_time = jiffies; |
2038 | lpfc_ncmd->flags = 0; | |
01649561 JS |
2039 | |
2040 | /* Rsp SGE will be filled in when we rcv an IO | |
2041 | * from the NVME Layer to be sent. | |
2042 | * The cmd is going to be embedded so we need a SKIP SGE. | |
2043 | */ | |
0794d601 | 2044 | sgl = lpfc_ncmd->dma_sgl; |
01649561 JS |
2045 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); |
2046 | bf_set(lpfc_sli4_sge_last, sgl, 0); | |
2047 | sgl->word2 = cpu_to_le32(sgl->word2); | |
2048 | /* Fill in word 3 / sgl_len during cmd submission */ | |
2049 | ||
d9f492a1 | 2050 | /* Initialize 64 bytes only */ |
5fd11085 | 2051 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
01649561 | 2052 | |
0794d601 JS |
2053 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
2054 | atomic_inc(&ndlp->cmd_pending); | |
c490850a | 2055 | lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
0794d601 | 2056 | } |
01649561 | 2057 | |
c490850a JS |
2058 | } else { |
2059 | qp = &phba->sli4_hba.hdwq[idx]; | |
5e5b511d | 2060 | qp->empty_io_bufs++; |
01649561 | 2061 | } |
2a5b7d62 | 2062 | |
01649561 JS |
2063 | return lpfc_ncmd; |
2064 | } | |
2065 | ||
2066 | /** | |
2067 | * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. | |
2068 | * @phba: The Hba for which this call is being executed. | |
2069 | * @lpfc_ncmd: The nvme buffer which is being released. | |
2070 | * | |
2071 | * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba | |
5e5b511d | 2072 | * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer |
01649561 JS |
2073 | * and cannot be reused for at least RA_TOV amount of time if it was |
2074 | * aborted. | |
2075 | **/ | |
2076 | static void | |
c490850a | 2077 | lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) |
01649561 | 2078 | { |
5e5b511d | 2079 | struct lpfc_sli4_hdw_queue *qp; |
01649561 JS |
2080 | unsigned long iflag = 0; |
2081 | ||
c490850a | 2082 | if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) |
2a5b7d62 JS |
2083 | atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); |
2084 | ||
2a5b7d62 | 2085 | lpfc_ncmd->ndlp = NULL; |
c490850a | 2086 | lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; |
2a5b7d62 | 2087 | |
1fbf9742 | 2088 | qp = lpfc_ncmd->hdwq; |
318083ad | 2089 | if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { |
86c67379 JS |
2090 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
2091 | "6310 XB release deferred for " | |
2092 | "ox_id x%x on reqtag x%x\n", | |
2093 | lpfc_ncmd->cur_iocbq.sli4_xritag, | |
2094 | lpfc_ncmd->cur_iocbq.iotag); | |
2095 | ||
c00f62e6 | 2096 | spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); |
01649561 | 2097 | list_add_tail(&lpfc_ncmd->list, |
c00f62e6 | 2098 | &qp->lpfc_abts_io_buf_list); |
5e5b511d | 2099 | qp->abts_nvme_io_bufs++; |
c00f62e6 | 2100 | spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); |
c490850a JS |
2101 | } else |
2102 | lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); | |
01649561 JS |
2103 | } |
2104 | ||
2105 | /** | |
2106 | * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. | |
2107 | * @pvport - the lpfc_vport instance requesting a localport. | |
2108 | * | |
2109 | * This routine is invoked to create an nvme localport instance to bind | |
2110 | * to the nvme_fc_transport. It is called once during driver load | |
2111 | * like lpfc_create_shost after all other services are initialized. | |
2112 | * It requires a vport, vpi, and wwns at call time. Other localport | |
2113 | * parameters are modified as the driver's FCID and the Fabric WWN | |
2114 | * are established. | |
2115 | * | |
2116 | * Return codes | |
2117 | * 0 - successful | |
2118 | * -ENOMEM - no heap memory available | |
2119 | * other values - from nvme registration upcall | |
2120 | **/ | |
2121 | int | |
2122 | lpfc_nvme_create_localport(struct lpfc_vport *vport) | |
2123 | { | |
166d7211 | 2124 | int ret = 0; |
01649561 JS |
2125 | struct lpfc_hba *phba = vport->phba; |
2126 | struct nvme_fc_port_info nfcp_info; | |
2127 | struct nvme_fc_local_port *localport; | |
2128 | struct lpfc_nvme_lport *lport; | |
01649561 JS |
2129 | |
2130 | /* Initialize this localport instance. The vport wwn usage ensures | |
2131 | * that NPIV is accounted for. | |
2132 | */ | |
2133 | memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); | |
2134 | nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; | |
2135 | nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); | |
2136 | nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); | |
2137 | ||
5b9e70b2 JS |
2138 | /* We need to tell the transport layer + 1 because it takes page |
2139 | * alignment into account. When space for the SGL is allocated we | |
2140 | * allocate + 3, one for cmd, one for rsp and one for this alignment | |
4d4c4a4a | 2141 | */ |
4d4c4a4a | 2142 | lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
01649561 | 2143 | |
6a828b0f JS |
2144 | /* Advertise how many hw queues we support based on fcp_io_sched */ |
2145 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) | |
2146 | lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; | |
2147 | else | |
2148 | lpfc_nvme_template.max_hw_queues = | |
2149 | phba->sli4_hba.num_present_cpu; | |
66a210ff | 2150 | |
faf5a744 AB |
2151 | if (!IS_ENABLED(CONFIG_NVME_FC)) |
2152 | return ret; | |
2153 | ||
01649561 JS |
2154 | /* localport is allocated from the stack, but the registration |
2155 | * call allocates heap memory as well as the private area. | |
2156 | */ | |
faf5a744 | 2157 | |
01649561 JS |
2158 | ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, |
2159 | &vport->phba->pcidev->dev, &localport); | |
2160 | if (!ret) { | |
2161 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, | |
2162 | "6005 Successfully registered local " | |
32350664 JS |
2163 | "NVME port num %d, localP x%px, private " |
2164 | "x%px, sg_seg %d\n", | |
01649561 JS |
2165 | localport->port_num, localport, |
2166 | localport->private, | |
2167 | lpfc_nvme_template.max_sgl_segments); | |
2168 | ||
2169 | /* Private is our lport size declared in the template. */ | |
2170 | lport = (struct lpfc_nvme_lport *)localport->private; | |
2171 | vport->localport = localport; | |
2172 | lport->vport = vport; | |
01649561 | 2173 | vport->nvmei_support = 1; |
6b486ce9 | 2174 | |
4b056682 JS |
2175 | atomic_set(&lport->xmt_fcp_noxri, 0); |
2176 | atomic_set(&lport->xmt_fcp_bad_ndlp, 0); | |
2177 | atomic_set(&lport->xmt_fcp_qdepth, 0); | |
44c2757b | 2178 | atomic_set(&lport->xmt_fcp_err, 0); |
4b056682 JS |
2179 | atomic_set(&lport->xmt_fcp_wqerr, 0); |
2180 | atomic_set(&lport->xmt_fcp_abort, 0); | |
2181 | atomic_set(&lport->xmt_ls_abort, 0); | |
2182 | atomic_set(&lport->xmt_ls_err, 0); | |
2183 | atomic_set(&lport->cmpl_fcp_xb, 0); | |
2184 | atomic_set(&lport->cmpl_fcp_err, 0); | |
2185 | atomic_set(&lport->cmpl_ls_xb, 0); | |
2186 | atomic_set(&lport->cmpl_ls_err, 0); | |
66a210ff JS |
2187 | atomic_set(&lport->fc4NvmeLsRequests, 0); |
2188 | atomic_set(&lport->fc4NvmeLsCmpls, 0); | |
01649561 JS |
2189 | } |
2190 | ||
01649561 JS |
2191 | return ret; |
2192 | } | |
2193 | ||
d964b3e5 | 2194 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
add9d6be JS |
2195 | /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. |
2196 | * | |
2197 | * The driver has to wait for the host nvme transport to callback | |
2198 | * indicating the localport has successfully unregistered all | |
2199 | * resources. Since this is an uninterruptible wait, loop every ten | |
2200 | * seconds and print a message indicating no progress. | |
2201 | * | |
2202 | * An uninterruptible wait is used because of the risk of transport-to- | |
2203 | * driver state mismatch. | |
2204 | */ | |
3999df75 | 2205 | static void |
add9d6be | 2206 | lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, |
7961cba6 EM |
2207 | struct lpfc_nvme_lport *lport, |
2208 | struct completion *lport_unreg_cmp) | |
add9d6be | 2209 | { |
add9d6be | 2210 | u32 wait_tmo; |
2ab70c21 JS |
2211 | int ret, i, pending = 0; |
2212 | struct lpfc_sli_ring *pring; | |
2213 | struct lpfc_hba *phba = vport->phba; | |
add9d6be JS |
2214 | |
2215 | /* Host transport has to clean up and confirm requiring an indefinite | |
2216 | * wait. Print a message if a 10 second wait expires and renew the | |
2217 | * wait. This is unexpected. | |
2218 | */ | |
2219 | wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); | |
2220 | while (true) { | |
7961cba6 | 2221 | ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); |
add9d6be | 2222 | if (unlikely(!ret)) { |
2ab70c21 JS |
2223 | pending = 0; |
2224 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | |
c00f62e6 | 2225 | pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
2ab70c21 JS |
2226 | if (!pring) |
2227 | continue; | |
2228 | if (pring->txcmplq_cnt) | |
2229 | pending += pring->txcmplq_cnt; | |
2230 | } | |
add9d6be | 2231 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
32350664 | 2232 | "6176 Lport x%px Localport x%px wait " |
2ab70c21 JS |
2233 | "timed out. Pending %d. Renewing.\n", |
2234 | lport, vport->localport, pending); | |
add9d6be JS |
2235 | continue; |
2236 | } | |
2237 | break; | |
2238 | } | |
2239 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, | |
32350664 | 2240 | "6177 Lport x%px Localport x%px Complete Success\n", |
add9d6be | 2241 | lport, vport->localport); |
add9d6be | 2242 | } |
d964b3e5 | 2243 | #endif |
add9d6be | 2244 | |
01649561 JS |
2245 | /** |
2246 | * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. | |
2247 | * @pnvme: pointer to lpfc nvme data structure. | |
2248 | * | |
2249 | * This routine is invoked to destroy all lports bound to the phba. | |
2250 | * The lport memory was allocated by the nvme fc transport and is | |
2251 | * released there. This routine ensures all rports bound to the | |
2252 | * lport have been disconnected. | |
2253 | * | |
2254 | **/ | |
2255 | void | |
2256 | lpfc_nvme_destroy_localport(struct lpfc_vport *vport) | |
2257 | { | |
7d708033 | 2258 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
01649561 JS |
2259 | struct nvme_fc_local_port *localport; |
2260 | struct lpfc_nvme_lport *lport; | |
01649561 | 2261 | int ret; |
7961cba6 | 2262 | DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); |
01649561 JS |
2263 | |
2264 | if (vport->nvmei_support == 0) | |
2265 | return; | |
2266 | ||
2267 | localport = vport->localport; | |
01649561 JS |
2268 | lport = (struct lpfc_nvme_lport *)localport->private; |
2269 | ||
2270 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, | |
32350664 | 2271 | "6011 Destroying NVME localport x%px\n", |
01649561 | 2272 | localport); |
166d7211 | 2273 | |
01649561 JS |
2274 | /* lport's rport list is clear. Unregister |
2275 | * lport and release resources. | |
2276 | */ | |
7961cba6 | 2277 | lport->lport_unreg_cmp = &lport_unreg_cmp; |
01649561 | 2278 | ret = nvme_fc_unregister_localport(localport); |
add9d6be JS |
2279 | |
2280 | /* Wait for completion. This either blocks | |
2281 | * indefinitely or succeeds | |
2282 | */ | |
7961cba6 EM |
2283 | lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); |
2284 | vport->localport = NULL; | |
01649561 JS |
2285 | |
2286 | /* Regardless of the unregister upcall response, clear | |
2287 | * nvmei_support. All rports are unregistered and the | |
2288 | * driver will clean up. | |
2289 | */ | |
2290 | vport->nvmei_support = 0; | |
2291 | if (ret == 0) { | |
2292 | lpfc_printf_vlog(vport, | |
2293 | KERN_INFO, LOG_NVME_DISC, | |
2294 | "6009 Unregistered lport Success\n"); | |
2295 | } else { | |
2296 | lpfc_printf_vlog(vport, | |
2297 | KERN_INFO, LOG_NVME_DISC, | |
2298 | "6010 Unregistered lport " | |
2299 | "Failed, status x%x\n", | |
2300 | ret); | |
2301 | } | |
166d7211 | 2302 | #endif |
01649561 JS |
2303 | } |
2304 | ||
2305 | void | |
2306 | lpfc_nvme_update_localport(struct lpfc_vport *vport) | |
2307 | { | |
4410a67a | 2308 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
01649561 JS |
2309 | struct nvme_fc_local_port *localport; |
2310 | struct lpfc_nvme_lport *lport; | |
2311 | ||
2312 | localport = vport->localport; | |
4410a67a JS |
2313 | if (!localport) { |
2314 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, | |
2315 | "6710 Update NVME fail. No localport\n"); | |
2316 | return; | |
2317 | } | |
01649561 | 2318 | lport = (struct lpfc_nvme_lport *)localport->private; |
4410a67a JS |
2319 | if (!lport) { |
2320 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, | |
32350664 | 2321 | "6171 Update NVME fail. localP x%px, No lport\n", |
4410a67a JS |
2322 | localport); |
2323 | return; | |
2324 | } | |
01649561 | 2325 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
32350664 | 2326 | "6012 Update NVME lport x%px did x%x\n", |
01649561 JS |
2327 | localport, vport->fc_myDID); |
2328 | ||
2329 | localport->port_id = vport->fc_myDID; | |
2330 | if (localport->port_id == 0) | |
2331 | localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; | |
2332 | else | |
2333 | localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; | |
2334 | ||
2335 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
32350664 | 2336 | "6030 bound lport x%px to DID x%06x\n", |
01649561 | 2337 | lport, localport->port_id); |
4410a67a | 2338 | #endif |
01649561 JS |
2339 | } |
2340 | ||
2341 | int | |
2342 | lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |
2343 | { | |
7d708033 | 2344 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
01649561 JS |
2345 | int ret = 0; |
2346 | struct nvme_fc_local_port *localport; | |
2347 | struct lpfc_nvme_lport *lport; | |
2348 | struct lpfc_nvme_rport *rport; | |
01466024 | 2349 | struct lpfc_nvme_rport *oldrport; |
01649561 JS |
2350 | struct nvme_fc_remote_port *remote_port; |
2351 | struct nvme_fc_port_info rpinfo; | |
93a3922d | 2352 | struct lpfc_nodelist *prev_ndlp = NULL; |
01649561 JS |
2353 | |
2354 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, | |
2355 | "6006 Register NVME PORT. DID x%06x nlptype x%x\n", | |
2356 | ndlp->nlp_DID, ndlp->nlp_type); | |
2357 | ||
2358 | localport = vport->localport; | |
bb6a8a2c DK |
2359 | if (!localport) |
2360 | return 0; | |
2361 | ||
01649561 JS |
2362 | lport = (struct lpfc_nvme_lport *)localport->private; |
2363 | ||
7a06dcd3 JS |
2364 | /* NVME rports are not preserved across devloss. |
2365 | * Just register this instance. Note, rpinfo->dev_loss_tmo | |
2366 | * is left 0 to indicate accept transport defaults. The | |
2367 | * driver communicates port role capabilities consistent | |
2368 | * with the PRLI response data. | |
2369 | */ | |
2370 | memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); | |
2371 | rpinfo.port_id = ndlp->nlp_DID; | |
2372 | if (ndlp->nlp_type & NLP_NVME_TARGET) | |
2373 | rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; | |
2374 | if (ndlp->nlp_type & NLP_NVME_INITIATOR) | |
2375 | rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; | |
2376 | ||
2377 | if (ndlp->nlp_type & NLP_NVME_DISCOVERY) | |
2378 | rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; | |
2379 | ||
2380 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); | |
2381 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); | |
01466024 | 2382 | |
9e210178 | 2383 | spin_lock_irq(&vport->phba->hbalock); |
01466024 | 2384 | oldrport = lpfc_ndlp_get_nrport(ndlp); |
61184f17 JS |
2385 | if (oldrport) { |
2386 | prev_ndlp = oldrport->ndlp; | |
2387 | spin_unlock_irq(&vport->phba->hbalock); | |
2388 | } else { | |
2389 | spin_unlock_irq(&vport->phba->hbalock); | |
3fd78355 | 2390 | lpfc_nlp_get(ndlp); |
61184f17 | 2391 | } |
3fd78355 | 2392 | |
7a06dcd3 JS |
2393 | ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); |
2394 | if (!ret) { | |
2395 | /* If the ndlp already has an nrport, this is just | |
2396 | * a resume of the existing rport. Else this is a | |
2397 | * new rport. | |
01649561 | 2398 | */ |
b15bd3e6 JS |
2399 | /* Guard against an unregister/reregister |
2400 | * race that leaves the WAIT flag set. | |
2401 | */ | |
2402 | spin_lock_irq(&vport->phba->hbalock); | |
2403 | ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; | |
2404 | spin_unlock_irq(&vport->phba->hbalock); | |
7a06dcd3 | 2405 | rport = remote_port->private; |
01466024 | 2406 | if (oldrport) { |
93a3922d JS |
2407 | /* New remoteport record does not guarantee valid |
2408 | * host private memory area. | |
2409 | */ | |
01466024 | 2410 | if (oldrport == remote_port->private) { |
93a3922d JS |
2411 | /* Same remoteport - ndlp should match. |
2412 | * Just reuse. | |
2413 | */ | |
3fd78355 JS |
2414 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, |
2415 | LOG_NVME_DISC, | |
6825b7bd | 2416 | "6014 Rebind lport to current " |
32350664 JS |
2417 | "remoteport x%px wwpn 0x%llx, " |
2418 | "Data: x%x x%x x%px x%px x%x " | |
2419 | " x%06x\n", | |
3fd78355 JS |
2420 | remote_port, |
2421 | remote_port->port_name, | |
2422 | remote_port->port_id, | |
2423 | remote_port->port_role, | |
61184f17 | 2424 | oldrport->ndlp, |
3fd78355 JS |
2425 | ndlp, |
2426 | ndlp->nlp_type, | |
2427 | ndlp->nlp_DID); | |
6825b7bd JS |
2428 | |
2429 | /* It's a complete rebind only if the driver | |
2430 | * is registering with the same ndlp. Otherwise | |
2431 | * the driver likely executed a node swap | |
2432 | * prior to this registration and the ndlp to | |
2433 | * remoteport binding needs to be redone. | |
2434 | */ | |
2435 | if (prev_ndlp == ndlp) | |
2436 | return 0; | |
2437 | ||
3fd78355 | 2438 | } |
2b75d0f9 | 2439 | |
3fd78355 JS |
2440 | /* Sever the ndlp<->rport association |
2441 | * before dropping the ndlp ref from | |
2442 | * register. | |
2b75d0f9 | 2443 | */ |
3fd78355 | 2444 | spin_lock_irq(&vport->phba->hbalock); |
2b75d0f9 | 2445 | ndlp->nrport = NULL; |
01466024 | 2446 | ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; |
3fd78355 | 2447 | spin_unlock_irq(&vport->phba->hbalock); |
2b75d0f9 | 2448 | rport->ndlp = NULL; |
3fd78355 | 2449 | rport->remoteport = NULL; |
b04744ce JS |
2450 | |
2451 | /* Reference only removed if previous NDLP is no longer | |
2452 | * active. It might be just a swap and removing the | |
2453 | * reference would cause a premature cleanup. | |
2454 | */ | |
2455 | if (prev_ndlp && prev_ndlp != ndlp) { | |
2456 | if ((!NLP_CHK_NODE_ACT(prev_ndlp)) || | |
2457 | (!prev_ndlp->nrport)) | |
2458 | lpfc_nlp_put(prev_ndlp); | |
2459 | } | |
01649561 | 2460 | } |
2b75d0f9 DK |
2461 | |
2462 | /* Clean bind the rport to the ndlp. */ | |
2463 | rport->remoteport = remote_port; | |
2464 | rport->lport = lport; | |
3fd78355 JS |
2465 | rport->ndlp = ndlp; |
2466 | spin_lock_irq(&vport->phba->hbalock); | |
2b75d0f9 | 2467 | ndlp->nrport = rport; |
3fd78355 | 2468 | spin_unlock_irq(&vport->phba->hbalock); |
2b75d0f9 DK |
2469 | lpfc_printf_vlog(vport, KERN_INFO, |
2470 | LOG_NVME_DISC | LOG_NODE, | |
6825b7bd JS |
2471 | "6022 Bind lport x%px to remoteport x%px " |
2472 | "rport x%px WWNN 0x%llx, " | |
2b75d0f9 | 2473 | "Rport WWPN 0x%llx DID " |
32350664 | 2474 | "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", |
93a3922d | 2475 | lport, remote_port, rport, |
2b75d0f9 | 2476 | rpinfo.node_name, rpinfo.port_name, |
3fd78355 | 2477 | rpinfo.port_id, rpinfo.port_role, |
93a3922d | 2478 | ndlp, prev_ndlp); |
01649561 | 2479 | } else { |
7a06dcd3 JS |
2480 | lpfc_printf_vlog(vport, KERN_ERR, |
2481 | LOG_NVME_DISC | LOG_NODE, | |
2482 | "6031 RemotePort Registration failed " | |
2483 | "err: %d, DID x%06x\n", | |
2484 | ret, ndlp->nlp_DID); | |
01649561 | 2485 | } |
7a06dcd3 | 2486 | |
01649561 | 2487 | return ret; |
166d7211 JS |
2488 | #else |
2489 | return 0; | |
2490 | #endif | |
01649561 JS |
2491 | } |
2492 | ||
6f2589f4 JS |
2493 | /** |
2494 | * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport | |
2495 | * | |
2496 | * If the ndlp represents an NVME Target, that we are logged into, | |
2497 | * ping the NVME FC Transport layer to initiate a device rescan | |
2498 | * on this remote NPort. | |
2499 | */ | |
2500 | void | |
2501 | lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |
2502 | { | |
2503 | #if (IS_ENABLED(CONFIG_NVME_FC)) | |
a6d10f24 JS |
2504 | struct lpfc_nvme_rport *nrport; |
2505 | struct nvme_fc_remote_port *remoteport = NULL; | |
6f2589f4 | 2506 | |
a6d10f24 JS |
2507 | spin_lock_irq(&vport->phba->hbalock); |
2508 | nrport = lpfc_ndlp_get_nrport(ndlp); | |
2509 | if (nrport) | |
2510 | remoteport = nrport->remoteport; | |
2511 | spin_unlock_irq(&vport->phba->hbalock); | |
6f2589f4 JS |
2512 | |
2513 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
2514 | "6170 Rescan NPort DID x%06x type x%x " | |
a6d10f24 JS |
2515 | "state x%x nrport x%px remoteport x%px\n", |
2516 | ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, | |
2517 | nrport, remoteport); | |
2518 | ||
2519 | if (!nrport || !remoteport) | |
2520 | goto rescan_exit; | |
6f2589f4 JS |
2521 | |
2522 | /* Only rescan if we are an NVME target in the MAPPED state */ | |
2523 | if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && | |
2524 | ndlp->nlp_state == NLP_STE_MAPPED_NODE) { | |
2525 | nvme_fc_rescan_remoteport(remoteport); | |
2526 | ||
2527 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | |
2528 | "6172 NVME rescanned DID x%06x " | |
2529 | "port_state x%x\n", | |
2530 | ndlp->nlp_DID, remoteport->port_state); | |
2531 | } | |
2532 | return; | |
a6d10f24 JS |
2533 | rescan_exit: |
2534 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | |
2535 | "6169 Skip NVME Rport Rescan, NVME remoteport " | |
2536 | "unregistered\n"); | |
6f2589f4 JS |
2537 | #endif |
2538 | } | |
2539 | ||
01649561 JS |
2540 | /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. |
2541 | * | |
2542 | * There is no notion of Devloss or rport recovery from the current | |
2543 | * nvme_transport perspective. Loss of an rport just means IO cannot | |
2544 | * be sent and recovery is completely up to the initator. | |
2545 | * For now, the driver just unbinds the DID and port_role so that | |
2546 | * no further IO can be issued. Changes are planned for later. | |
2547 | * | |
2548 | * Notes - the ndlp reference count is not decremented here since | |
2549 | * since there is no nvme_transport api for devloss. Node ref count | |
2550 | * is only adjusted in driver unload. | |
2551 | */ | |
2552 | void | |
2553 | lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |
2554 | { | |
7d708033 | 2555 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
01649561 JS |
2556 | int ret; |
2557 | struct nvme_fc_local_port *localport; | |
2558 | struct lpfc_nvme_lport *lport; | |
2559 | struct lpfc_nvme_rport *rport; | |
9e210178 | 2560 | struct nvme_fc_remote_port *remoteport = NULL; |
01649561 JS |
2561 | |
2562 | localport = vport->localport; | |
2563 | ||
2564 | /* This is fundamental error. The localport is always | |
2565 | * available until driver unload. Just exit. | |
2566 | */ | |
2567 | if (!localport) | |
2568 | return; | |
2569 | ||
2570 | lport = (struct lpfc_nvme_lport *)localport->private; | |
2571 | if (!lport) | |
2572 | goto input_err; | |
2573 | ||
9e210178 | 2574 | spin_lock_irq(&vport->phba->hbalock); |
01466024 | 2575 | rport = lpfc_ndlp_get_nrport(ndlp); |
9e210178 JS |
2576 | if (rport) |
2577 | remoteport = rport->remoteport; | |
2578 | spin_unlock_irq(&vport->phba->hbalock); | |
2579 | if (!remoteport) | |
01649561 JS |
2580 | goto input_err; |
2581 | ||
01649561 | 2582 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
32350664 | 2583 | "6033 Unreg nvme remoteport x%px, portname x%llx, " |
01649561 JS |
2584 | "port_id x%06x, portstate x%x port type x%x\n", |
2585 | remoteport, remoteport->port_name, | |
2586 | remoteport->port_id, remoteport->port_state, | |
2587 | ndlp->nlp_type); | |
2588 | ||
2589 | /* Sanity check ndlp type. Only call for NVME ports. Don't | |
2590 | * clear any rport state until the transport calls back. | |
2591 | */ | |
3b5bde69 JS |
2592 | |
2593 | if (ndlp->nlp_type & NLP_NVME_TARGET) { | |
7a06dcd3 JS |
2594 | /* No concern about the role change on the nvme remoteport. |
2595 | * The transport will update it. | |
2596 | */ | |
add9d6be | 2597 | ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG; |
7438273f JS |
2598 | |
2599 | /* Don't let the host nvme transport keep sending keep-alives | |
2600 | * on this remoteport. Vport is unloading, no recovery. The | |
2601 | * return values is ignored. The upcall is a courtesy to the | |
2602 | * transport. | |
2603 | */ | |
2604 | if (vport->load_flag & FC_UNLOADING) | |
2605 | (void)nvme_fc_set_remoteport_devloss(remoteport, 0); | |
2606 | ||
01649561 | 2607 | ret = nvme_fc_unregister_remoteport(remoteport); |
3fd78355 JS |
2608 | if (ret != 0) { |
2609 | lpfc_nlp_put(ndlp); | |
01649561 JS |
2610 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
2611 | "6167 NVME unregister failed %d " | |
2612 | "port_state x%x\n", | |
2613 | ret, remoteport->port_state); | |
3fd78355 | 2614 | } |
01649561 JS |
2615 | } |
2616 | return; | |
2617 | ||
2618 | input_err: | |
166d7211 | 2619 | #endif |
01649561 | 2620 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
32350664 | 2621 | "6168 State error: lport x%px, rport x%px FCID x%06x\n", |
01649561 JS |
2622 | vport->localport, ndlp->rport, ndlp->nlp_DID); |
2623 | } | |
318083ad JS |
2624 | |
2625 | /** | |
2626 | * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort | |
2627 | * @phba: pointer to lpfc hba data structure. | |
2628 | * @axri: pointer to the fcp xri abort wcqe structure. | |
c00f62e6 | 2629 | * @lpfc_ncmd: The nvme job structure for the request being aborted. |
318083ad JS |
2630 | * |
2631 | * This routine is invoked by the worker thread to process a SLI4 fast-path | |
952c303b DK |
2632 | * NVME aborted xri. Aborted NVME IO commands are completed to the transport |
2633 | * here. | |
318083ad JS |
2634 | **/ |
2635 | void | |
2636 | lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, | |
c00f62e6 JS |
2637 | struct sli4_wcqe_xri_aborted *axri, |
2638 | struct lpfc_io_buf *lpfc_ncmd) | |
318083ad JS |
2639 | { |
2640 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); | |
952c303b | 2641 | struct nvmefc_fcp_req *nvme_cmd = NULL; |
c00f62e6 | 2642 | struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; |
318083ad | 2643 | |
86c67379 | 2644 | |
c00f62e6 JS |
2645 | if (ndlp) |
2646 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); | |
86c67379 | 2647 | |
c00f62e6 JS |
2648 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
2649 | "6311 nvme_cmd %p xri x%x tag x%x abort complete and " | |
2650 | "xri released\n", | |
2651 | lpfc_ncmd->nvmeCmd, xri, | |
2652 | lpfc_ncmd->cur_iocbq.iotag); | |
2653 | ||
2654 | /* Aborted NVME commands are required to not complete | |
2655 | * before the abort exchange command fully completes. | |
2656 | * Once completed, it is available via the put list. | |
2657 | */ | |
2658 | if (lpfc_ncmd->nvmeCmd) { | |
2659 | nvme_cmd = lpfc_ncmd->nvmeCmd; | |
2660 | nvme_cmd->done(nvme_cmd); | |
2661 | lpfc_ncmd->nvmeCmd = NULL; | |
2662 | } | |
2663 | lpfc_release_nvme_buf(phba, lpfc_ncmd); | |
318083ad | 2664 | } |
c3725bdc JS |
2665 | |
2666 | /** | |
2667 | * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete | |
2668 | * @phba: Pointer to HBA context object. | |
2669 | * | |
2670 | * This function flushes all wqes in the nvme rings and frees all resources | |
2671 | * in the txcmplq. This function does not issue abort wqes for the IO | |
2672 | * commands in txcmplq, they will just be returned with | |
2673 | * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI | |
2674 | * slot has been permanently disabled. | |
2675 | **/ | |
2676 | void | |
2677 | lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) | |
2678 | { | |
2679 | struct lpfc_sli_ring *pring; | |
2680 | u32 i, wait_cnt = 0; | |
2681 | ||
cdb42bec | 2682 | if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) |
c3725bdc JS |
2683 | return; |
2684 | ||
c00f62e6 | 2685 | /* Cycle through all IO rings and make sure all outstanding |
c3725bdc JS |
2686 | * WQEs have been removed from the txcmplqs. |
2687 | */ | |
cdb42bec | 2688 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
c00f62e6 | 2689 | if (!phba->sli4_hba.hdwq[i].io_wq) |
6a828b0f | 2690 | continue; |
c00f62e6 | 2691 | pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
c3725bdc | 2692 | |
d580c613 JS |
2693 | if (!pring) |
2694 | continue; | |
2695 | ||
c3725bdc JS |
2696 | /* Retrieve everything on the txcmplq */ |
2697 | while (!list_empty(&pring->txcmplq)) { | |
2698 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); | |
2699 | wait_cnt++; | |
2700 | ||
2701 | /* The sleep is 10mS. Every ten seconds, | |
2702 | * dump a message. Something is wrong. | |
2703 | */ | |
2704 | if ((wait_cnt % 1000) == 0) { | |
2705 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | |
2706 | "6178 NVME IO not empty, " | |
2707 | "cnt %d\n", wait_cnt); | |
2708 | } | |
2709 | } | |
2710 | } | |
2711 | } | |
84f2ddf8 JS |
2712 | |
2713 | void | |
2714 | lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn) | |
2715 | { | |
2716 | #if (IS_ENABLED(CONFIG_NVME_FC)) | |
2717 | struct lpfc_io_buf *lpfc_ncmd; | |
2718 | struct nvmefc_fcp_req *nCmd; | |
2719 | struct lpfc_nvme_fcpreq_priv *freqpriv; | |
2720 | ||
2721 | if (!pwqeIn->context1) { | |
2722 | lpfc_sli_release_iocbq(phba, pwqeIn); | |
2723 | return; | |
2724 | } | |
2725 | /* For abort iocb just return, IO iocb will do a done call */ | |
2726 | if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == | |
2727 | CMD_ABORT_XRI_CX) { | |
2728 | lpfc_sli_release_iocbq(phba, pwqeIn); | |
2729 | return; | |
2730 | } | |
2731 | lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; | |
2732 | ||
2733 | spin_lock(&lpfc_ncmd->buf_lock); | |
2734 | if (!lpfc_ncmd->nvmeCmd) { | |
2735 | spin_unlock(&lpfc_ncmd->buf_lock); | |
2736 | lpfc_release_nvme_buf(phba, lpfc_ncmd); | |
2737 | return; | |
2738 | } | |
2739 | ||
2740 | nCmd = lpfc_ncmd->nvmeCmd; | |
2741 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, | |
2742 | "6194 NVME Cancel xri %x\n", | |
2743 | lpfc_ncmd->cur_iocbq.sli4_xritag); | |
2744 | ||
2745 | nCmd->transferred_length = 0; | |
2746 | nCmd->rcv_rsplen = 0; | |
2747 | nCmd->status = NVME_SC_INTERNAL; | |
2748 | freqpriv = nCmd->private; | |
2749 | freqpriv->nvme_buf = NULL; | |
2750 | lpfc_ncmd->nvmeCmd = NULL; | |
2751 | ||
2752 | spin_unlock(&lpfc_ncmd->buf_lock); | |
2753 | nCmd->done(nCmd); | |
2754 | ||
2755 | /* Call release with XB=1 to queue the IO into the abort list. */ | |
2756 | lpfc_release_nvme_buf(phba, lpfc_ncmd); | |
2757 | #endif | |
2758 | } |