scsi: lpfc: Fix NVMEI driver not decrementing counter causing bad rport state.
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
43140ca6 1
dea3101e 2/*******************************************************************
3 * This file is part of the Emulex Linux Device Driver for *
c44ce173 4 * Fibre Channel Host Bus Adapters. *
d080abe0
JS
5 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
6 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
50611577 7 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 8 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 9 * www.broadcom.com *
c44ce173 10 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 11 * *
12 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
13 * modify it under the terms of version 2 of the GNU General *
14 * Public License as published by the Free Software Foundation. *
15 * This program is distributed in the hope that it will be useful. *
16 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
17 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
18 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
19 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
20 * TO BE LEGALLY INVALID. See the GNU General Public License for *
21 * more details, a copy of which can be found in the file COPYING *
22 * included with this package. *
dea3101e 23 *******************************************************************/
24
dea3101e 25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
5a0e3ad6 29#include <linux/slab.h>
1c2ba475 30#include <linux/lockdep.h>
dea3101e 31
91886523 32#include <scsi/scsi.h>
dea3101e 33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
f888ba3c 36#include <scsi/scsi_transport_fc.h>
da0436e9 37#include <scsi/fc/fc_fs.h>
0d878419 38#include <linux/aer.h>
dea3101e 39
895427bd
JS
40#include <linux/nvme-fc-driver.h>
41
da0436e9 42#include "lpfc_hw4.h"
dea3101e 43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
da0436e9 45#include "lpfc_sli4.h"
ea2151b4 46#include "lpfc_nl.h"
dea3101e 47#include "lpfc_disc.h"
dea3101e 48#include "lpfc.h"
895427bd
JS
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
f358dd0c 51#include "lpfc_nvmet.h"
dea3101e 52#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
858c9f6c 55#include "lpfc_debugfs.h"
04c68496 56#include "lpfc_vport.h"
61bda8f7 57#include "lpfc_version.h"
dea3101e 58
59/* There are only four IOCB completion types. */
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
4f774513
JS
67
68/* Provide function prototypes local to this module. */
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
72 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
6669f9bb
JS
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
895427bd 77static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
0558056c 78 struct lpfc_cqe *);
895427bd 79static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 80 int);
ba20c853
JS
81static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
82 uint32_t);
e8d3c3b1
JS
83static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
84static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
895427bd
JS
85static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
86 struct lpfc_sli_ring *pring,
87 struct lpfc_iocbq *cmdiocb);
0558056c 88
4f774513
JS
89static IOCB_t *
90lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
91{
92 return &iocbq->iocb;
93}
94
95/**
96 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
97 * @q: The Work Queue to operate on.
98 * @wqe: The work Queue Entry to put on the Work queue.
99 *
100 * This routine will copy the contents of @wqe to the next available entry on
101 * the @q. This function will then ring the Work Queue Doorbell to signal the
102 * HBA to start processing the Work Queue Entry. This function returns 0 if
103 * successful. If no entries are available on @q then this function will return
104 * -ENOMEM.
105 * The caller is expected to hold the hbalock when calling this routine.
106 **/
107static uint32_t
108lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
109{
2e90f4b5 110 union lpfc_wqe *temp_wqe;
4f774513
JS
111 struct lpfc_register doorbell;
112 uint32_t host_index;
027140ea 113 uint32_t idx;
4f774513 114
2e90f4b5
JS
115 /* sanity check on queue memory */
116 if (unlikely(!q))
117 return -ENOMEM;
118 temp_wqe = q->qe[q->host_index].wqe;
119
4f774513 120 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
121 idx = ((q->host_index + 1) % q->entry_count);
122 if (idx == q->hba_index) {
b84daac9 123 q->WQ_overflow++;
4f774513 124 return -ENOMEM;
b84daac9
JS
125 }
126 q->WQ_posted++;
4f774513 127 /* set consumption flag every once in a while */
ff78d8f9 128 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 129 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
fedd3b7b
JS
130 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
131 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
4f774513 132 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
6b3b3bdb
JS
133 /* ensure WQE bcopy flushed before doorbell write */
134 wmb();
4f774513
JS
135
136 /* Update the host index before invoking device */
137 host_index = q->host_index;
027140ea
JS
138
139 q->host_index = idx;
4f774513
JS
140
141 /* Ring Doorbell */
142 doorbell.word0 = 0;
962bc51b
JS
143 if (q->db_format == LPFC_DB_LIST_FORMAT) {
144 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
145 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
146 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
147 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
148 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
149 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
150 } else {
151 return -EINVAL;
152 }
153 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
154
155 return 0;
156}
157
158/**
159 * lpfc_sli4_wq_release - Updates internal hba index for WQ
160 * @q: The Work Queue to operate on.
161 * @index: The index to advance the hba index to.
162 *
163 * This routine will update the HBA index of a queue to reflect consumption of
164 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
165 * an entry the host calls this function to update the queue's internal
166 * pointers. This routine returns the number of entries that were consumed by
167 * the HBA.
168 **/
169static uint32_t
170lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
171{
172 uint32_t released = 0;
173
2e90f4b5
JS
174 /* sanity check on queue memory */
175 if (unlikely(!q))
176 return 0;
177
4f774513
JS
178 if (q->hba_index == index)
179 return 0;
180 do {
181 q->hba_index = ((q->hba_index + 1) % q->entry_count);
182 released++;
183 } while (q->hba_index != index);
184 return released;
185}
186
187/**
188 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
189 * @q: The Mailbox Queue to operate on.
190 * @wqe: The Mailbox Queue Entry to put on the Work queue.
191 *
192 * This routine will copy the contents of @mqe to the next available entry on
193 * the @q. This function will then ring the Work Queue Doorbell to signal the
194 * HBA to start processing the Work Queue Entry. This function returns 0 if
195 * successful. If no entries are available on @q then this function will return
196 * -ENOMEM.
197 * The caller is expected to hold the hbalock when calling this routine.
198 **/
199static uint32_t
200lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
201{
2e90f4b5 202 struct lpfc_mqe *temp_mqe;
4f774513 203 struct lpfc_register doorbell;
4f774513 204
2e90f4b5
JS
205 /* sanity check on queue memory */
206 if (unlikely(!q))
207 return -ENOMEM;
208 temp_mqe = q->qe[q->host_index].mqe;
209
4f774513
JS
210 /* If the host has not yet processed the next entry then we are done */
211 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
212 return -ENOMEM;
213 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
214 /* Save off the mailbox pointer for completion */
215 q->phba->mbox = (MAILBOX_t *)temp_mqe;
216
217 /* Update the host index before invoking device */
4f774513
JS
218 q->host_index = ((q->host_index + 1) % q->entry_count);
219
220 /* Ring Doorbell */
221 doorbell.word0 = 0;
222 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
223 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
224 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
225 return 0;
226}
227
228/**
229 * lpfc_sli4_mq_release - Updates internal hba index for MQ
230 * @q: The Mailbox Queue to operate on.
231 *
232 * This routine will update the HBA index of a queue to reflect consumption of
233 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
234 * an entry the host calls this function to update the queue's internal
235 * pointers. This routine returns the number of entries that were consumed by
236 * the HBA.
237 **/
238static uint32_t
239lpfc_sli4_mq_release(struct lpfc_queue *q)
240{
2e90f4b5
JS
241 /* sanity check on queue memory */
242 if (unlikely(!q))
243 return 0;
244
4f774513
JS
245 /* Clear the mailbox pointer for completion */
246 q->phba->mbox = NULL;
247 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 return 1;
249}
250
251/**
252 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
253 * @q: The Event Queue to get the first valid EQE from
254 *
255 * This routine will get the first valid Event Queue Entry from @q, update
256 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
257 * the Queue (no more work to do), or the Queue is full of EQEs that have been
258 * processed, but not popped back to the HBA then this routine will return NULL.
259 **/
260static struct lpfc_eqe *
261lpfc_sli4_eq_get(struct lpfc_queue *q)
262{
2e90f4b5 263 struct lpfc_eqe *eqe;
027140ea 264 uint32_t idx;
2e90f4b5
JS
265
266 /* sanity check on queue memory */
267 if (unlikely(!q))
268 return NULL;
269 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
270
271 /* If the next EQE is not valid then we are done */
cb5172ea 272 if (!bf_get_le32(lpfc_eqe_valid, eqe))
4f774513
JS
273 return NULL;
274 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
275 idx = ((q->hba_index + 1) % q->entry_count);
276 if (idx == q->host_index)
4f774513
JS
277 return NULL;
278
027140ea 279 q->hba_index = idx;
27f344eb
JS
280
281 /*
282 * insert barrier for instruction interlock : data from the hardware
283 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
284 * upon. Speculative instructions were allowing a bcopy at the start
285 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
286 * after our return, to copy data before the valid bit check above
287 * was done. As such, some of the copied data was stale. The barrier
288 * ensures the check is before any data is copied.
27f344eb
JS
289 */
290 mb();
4f774513
JS
291 return eqe;
292}
293
ba20c853
JS
294/**
295 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
296 * @q: The Event Queue to disable interrupts
297 *
298 **/
299static inline void
300lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
301{
302 struct lpfc_register doorbell;
303
304 doorbell.word0 = 0;
305 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
306 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
307 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
308 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
309 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
310 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
311}
312
4f774513
JS
313/**
314 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
315 * @q: The Event Queue that the host has completed processing for.
316 * @arm: Indicates whether the host wants to arms this CQ.
317 *
318 * This routine will mark all Event Queue Entries on @q, from the last
319 * known completed entry to the last entry that was processed, as completed
320 * by clearing the valid bit for each completion queue entry. Then it will
321 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
322 * The internal host index in the @q will be updated by this routine to indicate
323 * that the host has finished processing the entries. The @arm parameter
324 * indicates that the queue should be rearmed when ringing the doorbell.
325 *
326 * This function will return the number of EQEs that were popped.
327 **/
328uint32_t
329lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
330{
331 uint32_t released = 0;
332 struct lpfc_eqe *temp_eqe;
333 struct lpfc_register doorbell;
334
2e90f4b5
JS
335 /* sanity check on queue memory */
336 if (unlikely(!q))
337 return 0;
338
4f774513
JS
339 /* while there are valid entries */
340 while (q->hba_index != q->host_index) {
341 temp_eqe = q->qe[q->host_index].eqe;
cb5172ea 342 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
4f774513
JS
343 released++;
344 q->host_index = ((q->host_index + 1) % q->entry_count);
345 }
346 if (unlikely(released == 0 && !arm))
347 return 0;
348
349 /* ring doorbell for number popped */
350 doorbell.word0 = 0;
351 if (arm) {
352 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
353 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354 }
355 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
356 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
357 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
358 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
359 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
4f774513 360 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
361 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
362 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
363 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
364 return released;
365}
366
367/**
368 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
369 * @q: The Completion Queue to get the first valid CQE from
370 *
371 * This routine will get the first valid Completion Queue Entry from @q, update
372 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
373 * the Queue (no more work to do), or the Queue is full of CQEs that have been
374 * processed, but not popped back to the HBA then this routine will return NULL.
375 **/
376static struct lpfc_cqe *
377lpfc_sli4_cq_get(struct lpfc_queue *q)
378{
379 struct lpfc_cqe *cqe;
027140ea 380 uint32_t idx;
4f774513 381
2e90f4b5
JS
382 /* sanity check on queue memory */
383 if (unlikely(!q))
384 return NULL;
385
4f774513 386 /* If the next CQE is not valid then we are done */
cb5172ea 387 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
4f774513
JS
388 return NULL;
389 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
390 idx = ((q->hba_index + 1) % q->entry_count);
391 if (idx == q->host_index)
4f774513
JS
392 return NULL;
393
394 cqe = q->qe[q->hba_index].cqe;
027140ea 395 q->hba_index = idx;
27f344eb
JS
396
397 /*
398 * insert barrier for instruction interlock : data from the hardware
399 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
400 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
401 * instructions allowing action on content before valid bit checked,
402 * add barrier here as well. May not be needed as "content" is a
403 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
404 */
405 mb();
4f774513
JS
406 return cqe;
407}
408
409/**
410 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
411 * @q: The Completion Queue that the host has completed processing for.
412 * @arm: Indicates whether the host wants to arms this CQ.
413 *
414 * This routine will mark all Completion queue entries on @q, from the last
415 * known completed entry to the last entry that was processed, as completed
416 * by clearing the valid bit for each completion queue entry. Then it will
417 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
418 * The internal host index in the @q will be updated by this routine to indicate
419 * that the host has finished processing the entries. The @arm parameter
420 * indicates that the queue should be rearmed when ringing the doorbell.
421 *
422 * This function will return the number of CQEs that were released.
423 **/
424uint32_t
425lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
426{
427 uint32_t released = 0;
428 struct lpfc_cqe *temp_qe;
429 struct lpfc_register doorbell;
430
2e90f4b5
JS
431 /* sanity check on queue memory */
432 if (unlikely(!q))
433 return 0;
4f774513
JS
434 /* while there are valid entries */
435 while (q->hba_index != q->host_index) {
436 temp_qe = q->qe[q->host_index].cqe;
cb5172ea 437 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
4f774513
JS
438 released++;
439 q->host_index = ((q->host_index + 1) % q->entry_count);
440 }
441 if (unlikely(released == 0 && !arm))
442 return 0;
443
444 /* ring doorbell for number popped */
445 doorbell.word0 = 0;
446 if (arm)
447 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
448 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
449 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
450 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
451 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
452 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
4f774513
JS
453 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
454 return released;
455}
456
457/**
458 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
459 * @q: The Header Receive Queue to operate on.
460 * @wqe: The Receive Queue Entry to put on the Receive queue.
461 *
462 * This routine will copy the contents of @wqe to the next available entry on
463 * the @q. This function will then ring the Receive Queue Doorbell to signal the
464 * HBA to start processing the Receive Queue Entry. This function returns the
465 * index that the rqe was copied to if successful. If no entries are available
466 * on @q then this function will return -ENOMEM.
467 * The caller is expected to hold the hbalock when calling this routine.
468 **/
895427bd 469int
4f774513
JS
470lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
471 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
472{
2e90f4b5
JS
473 struct lpfc_rqe *temp_hrqe;
474 struct lpfc_rqe *temp_drqe;
4f774513 475 struct lpfc_register doorbell;
5a25bf36 476 int put_index;
4f774513 477
2e90f4b5
JS
478 /* sanity check on queue memory */
479 if (unlikely(!hq) || unlikely(!dq))
480 return -ENOMEM;
5a25bf36 481 put_index = hq->host_index;
61f3d4bf 482 temp_hrqe = hq->qe[put_index].rqe;
2e90f4b5
JS
483 temp_drqe = dq->qe[dq->host_index].rqe;
484
4f774513
JS
485 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
486 return -EINVAL;
61f3d4bf 487 if (put_index != dq->host_index)
4f774513
JS
488 return -EINVAL;
489 /* If the host has not yet processed the next entry then we are done */
61f3d4bf 490 if (((put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513
JS
491 return -EBUSY;
492 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
493 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
494
495 /* Update the host index to point to the next slot */
61f3d4bf 496 hq->host_index = ((put_index + 1) % hq->entry_count);
4f774513 497 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
61f3d4bf 498 hq->RQ_buf_posted++;
4f774513
JS
499
500 /* Ring The Header Receive Queue Doorbell */
73d91e50 501 if (!(hq->host_index % hq->entry_repost)) {
4f774513 502 doorbell.word0 = 0;
962bc51b
JS
503 if (hq->db_format == LPFC_DB_RING_FORMAT) {
504 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
505 hq->entry_repost);
506 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
507 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
508 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
509 hq->entry_repost);
510 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
511 hq->host_index);
512 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
513 } else {
514 return -EINVAL;
515 }
516 writel(doorbell.word0, hq->db_regaddr);
4f774513
JS
517 }
518 return put_index;
519}
520
521/**
522 * lpfc_sli4_rq_release - Updates internal hba index for RQ
523 * @q: The Header Receive Queue to operate on.
524 *
525 * This routine will update the HBA index of a queue to reflect consumption of
526 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
527 * consumed an entry the host calls this function to update the queue's
528 * internal pointers. This routine returns the number of entries that were
529 * consumed by the HBA.
530 **/
531static uint32_t
532lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
533{
2e90f4b5
JS
534 /* sanity check on queue memory */
535 if (unlikely(!hq) || unlikely(!dq))
536 return 0;
537
4f774513
JS
538 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
539 return 0;
540 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
541 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
542 return 1;
543}
544
e59058c4 545/**
3621a710 546 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
547 * @phba: Pointer to HBA context object.
548 * @pring: Pointer to driver SLI ring object.
549 *
550 * This function returns pointer to next command iocb entry
551 * in the command ring. The caller must hold hbalock to prevent
552 * other threads consume the next command iocb.
553 * SLI-2/SLI-3 provide different sized iocbs.
554 **/
ed957684
JS
555static inline IOCB_t *
556lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
557{
7e56aa25
JS
558 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
559 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
560}
561
e59058c4 562/**
3621a710 563 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
564 * @phba: Pointer to HBA context object.
565 * @pring: Pointer to driver SLI ring object.
566 *
567 * This function returns pointer to next response iocb entry
568 * in the response ring. The caller must hold hbalock to make sure
569 * that no other thread consume the next response iocb.
570 * SLI-2/SLI-3 provide different sized iocbs.
571 **/
ed957684
JS
572static inline IOCB_t *
573lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
574{
7e56aa25
JS
575 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
576 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
577}
578
e59058c4 579/**
3621a710 580 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
581 * @phba: Pointer to HBA context object.
582 *
583 * This function is called with hbalock held. This function
584 * allocates a new driver iocb object from the iocb pool. If the
585 * allocation is successful, it returns pointer to the newly
586 * allocated iocb object else it returns NULL.
587 **/
4f2e66c6 588struct lpfc_iocbq *
2e0fef85 589__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
590{
591 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
592 struct lpfc_iocbq * iocbq = NULL;
593
1c2ba475
JT
594 lockdep_assert_held(&phba->hbalock);
595
0bd4ca25 596 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
597 if (iocbq)
598 phba->iocb_cnt++;
599 if (phba->iocb_cnt > phba->iocb_max)
600 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
601 return iocbq;
602}
603
da0436e9
JS
604/**
605 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
606 * @phba: Pointer to HBA context object.
607 * @xritag: XRI value.
608 *
609 * This function clears the sglq pointer from the array of acive
610 * sglq's. The xritag that is passed in is used to index into the
611 * array. Before the xritag can be used it needs to be adjusted
612 * by subtracting the xribase.
613 *
614 * Returns sglq ponter = success, NULL = Failure.
615 **/
895427bd 616struct lpfc_sglq *
da0436e9
JS
617__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
618{
da0436e9 619 struct lpfc_sglq *sglq;
6d368e53
JS
620
621 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
622 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
623 return sglq;
624}
625
626/**
627 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
628 * @phba: Pointer to HBA context object.
629 * @xritag: XRI value.
630 *
631 * This function returns the sglq pointer from the array of acive
632 * sglq's. The xritag that is passed in is used to index into the
633 * array. Before the xritag can be used it needs to be adjusted
634 * by subtracting the xribase.
635 *
636 * Returns sglq ponter = success, NULL = Failure.
637 **/
0f65ff68 638struct lpfc_sglq *
da0436e9
JS
639__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
640{
da0436e9 641 struct lpfc_sglq *sglq;
6d368e53
JS
642
643 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
644 return sglq;
645}
646
19ca7609 647/**
1151e3ec 648 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
649 * @phba: Pointer to HBA context object.
650 * @xritag: xri used in this exchange.
651 * @rrq: The RRQ to be cleared.
652 *
19ca7609 653 **/
1151e3ec
JS
654void
655lpfc_clr_rrq_active(struct lpfc_hba *phba,
656 uint16_t xritag,
657 struct lpfc_node_rrq *rrq)
19ca7609 658{
1151e3ec 659 struct lpfc_nodelist *ndlp = NULL;
19ca7609 660
1151e3ec
JS
661 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
662 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
663
664 /* The target DID could have been swapped (cable swap)
665 * we should use the ndlp from the findnode if it is
666 * available.
667 */
1151e3ec 668 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
669 ndlp = rrq->ndlp;
670
1151e3ec
JS
671 if (!ndlp)
672 goto out;
673
cff261f6 674 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
675 rrq->send_rrq = 0;
676 rrq->xritag = 0;
677 rrq->rrq_stop_time = 0;
678 }
1151e3ec 679out:
19ca7609
JS
680 mempool_free(rrq, phba->rrq_pool);
681}
682
683/**
684 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
685 * @phba: Pointer to HBA context object.
686 *
687 * This function is called with hbalock held. This function
688 * Checks if stop_time (ratov from setting rrq active) has
689 * been reached, if it has and the send_rrq flag is set then
690 * it will call lpfc_send_rrq. If the send_rrq flag is not set
691 * then it will just call the routine to clear the rrq and
692 * free the rrq resource.
693 * The timer is set to the next rrq that is going to expire before
694 * leaving the routine.
695 *
696 **/
697void
698lpfc_handle_rrq_active(struct lpfc_hba *phba)
699{
700 struct lpfc_node_rrq *rrq;
701 struct lpfc_node_rrq *nextrrq;
702 unsigned long next_time;
703 unsigned long iflags;
1151e3ec 704 LIST_HEAD(send_rrq);
19ca7609
JS
705
706 spin_lock_irqsave(&phba->hbalock, iflags);
707 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 708 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 709 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
710 &phba->active_rrq_list, list) {
711 if (time_after(jiffies, rrq->rrq_stop_time))
712 list_move(&rrq->list, &send_rrq);
713 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
714 next_time = rrq->rrq_stop_time;
715 }
716 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
717 if ((!list_empty(&phba->active_rrq_list)) &&
718 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 719 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
720 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
721 list_del(&rrq->list);
722 if (!rrq->send_rrq)
723 /* this call will free the rrq */
724 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
725 else if (lpfc_send_rrq(phba, rrq)) {
726 /* if we send the rrq then the completion handler
727 * will clear the bit in the xribitmap.
728 */
729 lpfc_clr_rrq_active(phba, rrq->xritag,
730 rrq);
731 }
732 }
19ca7609
JS
733}
734
735/**
736 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
737 * @vport: Pointer to vport context object.
738 * @xri: The xri used in the exchange.
739 * @did: The targets DID for this exchange.
740 *
741 * returns NULL = rrq not found in the phba->active_rrq_list.
742 * rrq = rrq for this xri and target.
743 **/
744struct lpfc_node_rrq *
745lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
746{
747 struct lpfc_hba *phba = vport->phba;
748 struct lpfc_node_rrq *rrq;
749 struct lpfc_node_rrq *nextrrq;
750 unsigned long iflags;
751
752 if (phba->sli_rev != LPFC_SLI_REV4)
753 return NULL;
754 spin_lock_irqsave(&phba->hbalock, iflags);
755 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
756 if (rrq->vport == vport && rrq->xritag == xri &&
757 rrq->nlp_DID == did){
758 list_del(&rrq->list);
759 spin_unlock_irqrestore(&phba->hbalock, iflags);
760 return rrq;
761 }
762 }
763 spin_unlock_irqrestore(&phba->hbalock, iflags);
764 return NULL;
765}
766
767/**
768 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
769 * @vport: Pointer to vport context object.
1151e3ec
JS
770 * @ndlp: Pointer to the lpfc_node_list structure.
771 * If ndlp is NULL Remove all active RRQs for this vport from the
772 * phba->active_rrq_list and clear the rrq.
773 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
774 **/
775void
1151e3ec 776lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
777
778{
779 struct lpfc_hba *phba = vport->phba;
780 struct lpfc_node_rrq *rrq;
781 struct lpfc_node_rrq *nextrrq;
782 unsigned long iflags;
1151e3ec 783 LIST_HEAD(rrq_list);
19ca7609
JS
784
785 if (phba->sli_rev != LPFC_SLI_REV4)
786 return;
1151e3ec
JS
787 if (!ndlp) {
788 lpfc_sli4_vport_delete_els_xri_aborted(vport);
789 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 790 }
1151e3ec
JS
791 spin_lock_irqsave(&phba->hbalock, iflags);
792 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
793 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
794 list_move(&rrq->list, &rrq_list);
19ca7609 795 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
796
797 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
798 list_del(&rrq->list);
799 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
800 }
19ca7609
JS
801}
802
19ca7609 803/**
1151e3ec 804 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
805 * @phba: Pointer to HBA context object.
806 * @ndlp: Targets nodelist pointer for this exchange.
807 * @xritag the xri in the bitmap to test.
808 *
809 * This function is called with hbalock held. This function
810 * returns 0 = rrq not active for this xri
811 * 1 = rrq is valid for this xri.
812 **/
1151e3ec
JS
813int
814lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
815 uint16_t xritag)
816{
1c2ba475 817 lockdep_assert_held(&phba->hbalock);
19ca7609
JS
818 if (!ndlp)
819 return 0;
cff261f6
JS
820 if (!ndlp->active_rrqs_xri_bitmap)
821 return 0;
822 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
19ca7609
JS
823 return 1;
824 else
825 return 0;
826}
827
828/**
829 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
830 * @phba: Pointer to HBA context object.
831 * @ndlp: nodelist pointer for this target.
832 * @xritag: xri used in this exchange.
833 * @rxid: Remote Exchange ID.
834 * @send_rrq: Flag used to determine if we should send rrq els cmd.
835 *
836 * This function takes the hbalock.
837 * The active bit is always set in the active rrq xri_bitmap even
838 * if there is no slot avaiable for the other rrq information.
839 *
840 * returns 0 rrq actived for this xri
841 * < 0 No memory or invalid ndlp.
842 **/
843int
844lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 845 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 846{
19ca7609 847 unsigned long iflags;
b42c07c8
JS
848 struct lpfc_node_rrq *rrq;
849 int empty;
850
851 if (!ndlp)
852 return -EINVAL;
853
854 if (!phba->cfg_enable_rrq)
855 return -EINVAL;
19ca7609
JS
856
857 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
858 if (phba->pport->load_flag & FC_UNLOADING) {
859 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
860 goto out;
861 }
862
863 /*
864 * set the active bit even if there is no mem available.
865 */
866 if (NLP_CHK_FREE_REQ(ndlp))
867 goto out;
868
869 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
870 goto out;
871
cff261f6
JS
872 if (!ndlp->active_rrqs_xri_bitmap)
873 goto out;
874
875 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
876 goto out;
877
19ca7609 878 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
879 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
880 if (!rrq) {
881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
882 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
883 " DID:0x%x Send:%d\n",
884 xritag, rxid, ndlp->nlp_DID, send_rrq);
885 return -EINVAL;
886 }
e5771b4d
JS
887 if (phba->cfg_enable_rrq == 1)
888 rrq->send_rrq = send_rrq;
889 else
890 rrq->send_rrq = 0;
b42c07c8 891 rrq->xritag = xritag;
256ec0d0
JS
892 rrq->rrq_stop_time = jiffies +
893 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
894 rrq->ndlp = ndlp;
895 rrq->nlp_DID = ndlp->nlp_DID;
896 rrq->vport = ndlp->vport;
897 rrq->rxid = rxid;
b42c07c8
JS
898 spin_lock_irqsave(&phba->hbalock, iflags);
899 empty = list_empty(&phba->active_rrq_list);
900 list_add_tail(&rrq->list, &phba->active_rrq_list);
901 phba->hba_flag |= HBA_RRQ_ACTIVE;
902 if (empty)
903 lpfc_worker_wake_up(phba);
904 spin_unlock_irqrestore(&phba->hbalock, iflags);
905 return 0;
906out:
907 spin_unlock_irqrestore(&phba->hbalock, iflags);
908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
909 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
910 " DID:0x%x Send:%d\n",
911 xritag, rxid, ndlp->nlp_DID, send_rrq);
912 return -EINVAL;
19ca7609
JS
913}
914
da0436e9 915/**
895427bd 916 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 917 * @phba: Pointer to HBA context object.
19ca7609 918 * @piocb: Pointer to the iocbq.
da0436e9 919 *
dafe8cea 920 * This function is called with the ring lock held. This function
6d368e53 921 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
922 * list is not empty then it is successful, it returns pointer to the newly
923 * allocated sglq object else it returns NULL.
924 **/
925static struct lpfc_sglq *
895427bd 926__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 927{
895427bd 928 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 929 struct lpfc_sglq *sglq = NULL;
19ca7609 930 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
931 struct lpfc_scsi_buf *lpfc_cmd;
932 struct lpfc_nodelist *ndlp;
933 int found = 0;
934
1c2ba475
JT
935 lockdep_assert_held(&phba->hbalock);
936
19ca7609
JS
937 if (piocbq->iocb_flag & LPFC_IO_FCP) {
938 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
939 ndlp = lpfc_cmd->rdata->pnode;
be858b65 940 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 941 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 942 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
943 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
944 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
945 ndlp = NULL;
946 else
947 ndlp = piocbq->context_un.ndlp;
948 } else {
19ca7609 949 ndlp = piocbq->context1;
6c7cf486 950 }
19ca7609 951
895427bd
JS
952 spin_lock(&phba->sli4_hba.sgl_list_lock);
953 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
954 start_sglq = sglq;
955 while (!found) {
956 if (!sglq)
d11f54b7 957 break;
895427bd
JS
958 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
959 test_bit(sglq->sli4_lxritag,
960 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
961 /* This xri has an rrq outstanding for this DID.
962 * put it back in the list and get another xri.
963 */
895427bd 964 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 965 sglq = NULL;
895427bd 966 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
967 struct lpfc_sglq, list);
968 if (sglq == start_sglq) {
969 sglq = NULL;
970 break;
971 } else
972 continue;
973 }
974 sglq->ndlp = ndlp;
975 found = 1;
6d368e53 976 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
977 sglq->state = SGL_ALLOCATED;
978 }
895427bd 979 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
980 return sglq;
981}
982
f358dd0c
JS
983/**
984 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
985 * @phba: Pointer to HBA context object.
986 * @piocb: Pointer to the iocbq.
987 *
988 * This function is called with the sgl_list lock held. This function
989 * gets a new driver sglq object from the sglq list. If the
990 * list is not empty then it is successful, it returns pointer to the newly
991 * allocated sglq object else it returns NULL.
992 **/
993struct lpfc_sglq *
994__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
995{
996 struct list_head *lpfc_nvmet_sgl_list;
997 struct lpfc_sglq *sglq = NULL;
998
999 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1000
1001 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1002
1003 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1004 if (!sglq)
1005 return NULL;
1006 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1007 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1008 return sglq;
1009}
1010
e59058c4 1011/**
3621a710 1012 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1013 * @phba: Pointer to HBA context object.
1014 *
1015 * This function is called with no lock held. This function
1016 * allocates a new driver iocb object from the iocb pool. If the
1017 * allocation is successful, it returns pointer to the newly
1018 * allocated iocb object else it returns NULL.
1019 **/
2e0fef85
JS
1020struct lpfc_iocbq *
1021lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1022{
1023 struct lpfc_iocbq * iocbq = NULL;
1024 unsigned long iflags;
1025
1026 spin_lock_irqsave(&phba->hbalock, iflags);
1027 iocbq = __lpfc_sli_get_iocbq(phba);
1028 spin_unlock_irqrestore(&phba->hbalock, iflags);
1029 return iocbq;
1030}
1031
4f774513
JS
1032/**
1033 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1034 * @phba: Pointer to HBA context object.
1035 * @iocbq: Pointer to driver iocb object.
1036 *
1037 * This function is called with hbalock held to release driver
1038 * iocb object to the iocb pool. The iotag in the iocb object
1039 * does not change for each use of the iocb object. This function
1040 * clears all other fields of the iocb object when it is freed.
1041 * The sqlq structure that holds the xritag and phys and virtual
1042 * mappings for the scatter gather list is retrieved from the
1043 * active array of sglq. The get of the sglq pointer also clears
1044 * the entry in the array. If the status of the IO indiactes that
1045 * this IO was aborted then the sglq entry it put on the
1046 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1047 * IO has good status or fails for any other reason then the sglq
895427bd 1048 * entry is added to the free list (lpfc_els_sgl_list).
4f774513
JS
1049 **/
1050static void
1051__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1052{
1053 struct lpfc_sglq *sglq;
1054 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1055 unsigned long iflag = 0;
895427bd 1056 struct lpfc_sli_ring *pring;
4f774513 1057
1c2ba475
JT
1058 lockdep_assert_held(&phba->hbalock);
1059
4f774513
JS
1060 if (iocbq->sli4_xritag == NO_XRI)
1061 sglq = NULL;
1062 else
6d368e53
JS
1063 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1064
0e9bb8d7 1065
4f774513 1066 if (sglq) {
f358dd0c
JS
1067 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1068 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1069 iflag);
1070 sglq->state = SGL_FREED;
1071 sglq->ndlp = NULL;
1072 list_add_tail(&sglq->list,
1073 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1074 spin_unlock_irqrestore(
1075 &phba->sli4_hba.sgl_list_lock, iflag);
1076 goto out;
1077 }
1078
895427bd 1079 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1080 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1081 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1082 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1083 iflag);
4f774513 1084 list_add(&sglq->list,
895427bd 1085 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1086 spin_unlock_irqrestore(
895427bd 1087 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1088 } else {
895427bd
JS
1089 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1090 iflag);
0f65ff68 1091 sglq->state = SGL_FREED;
19ca7609 1092 sglq->ndlp = NULL;
fedd3b7b 1093 list_add_tail(&sglq->list,
895427bd
JS
1094 &phba->sli4_hba.lpfc_els_sgl_list);
1095 spin_unlock_irqrestore(
1096 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1097
1098 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1099 if (!list_empty(&pring->txq))
2a9bf3d0 1100 lpfc_worker_wake_up(phba);
0f65ff68 1101 }
4f774513
JS
1102 }
1103
f358dd0c 1104out:
4f774513
JS
1105 /*
1106 * Clean all volatile data fields, preserve iotag and node struct.
1107 */
1108 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1109 iocbq->sli4_lxritag = NO_XRI;
4f774513 1110 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1111 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1112 LPFC_IO_NVME_LS);
4f774513
JS
1113 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1114}
1115
2a9bf3d0 1116
e59058c4 1117/**
3772a991 1118 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1119 * @phba: Pointer to HBA context object.
1120 * @iocbq: Pointer to driver iocb object.
1121 *
1122 * This function is called with hbalock held to release driver
1123 * iocb object to the iocb pool. The iotag in the iocb object
1124 * does not change for each use of the iocb object. This function
1125 * clears all other fields of the iocb object when it is freed.
1126 **/
a6ababd2 1127static void
3772a991 1128__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1129{
2e0fef85 1130 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1131
1c2ba475 1132 lockdep_assert_held(&phba->hbalock);
0e9bb8d7 1133
604a3e30
JB
1134 /*
1135 * Clean all volatile data fields, preserve iotag and node struct.
1136 */
1137 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1138 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1139 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1140}
1141
3772a991
JS
1142/**
1143 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1144 * @phba: Pointer to HBA context object.
1145 * @iocbq: Pointer to driver iocb object.
1146 *
1147 * This function is called with hbalock held to release driver
1148 * iocb object to the iocb pool. The iotag in the iocb object
1149 * does not change for each use of the iocb object. This function
1150 * clears all other fields of the iocb object when it is freed.
1151 **/
1152static void
1153__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1154{
1c2ba475
JT
1155 lockdep_assert_held(&phba->hbalock);
1156
3772a991 1157 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1158 phba->iocb_cnt--;
3772a991
JS
1159}
1160
e59058c4 1161/**
3621a710 1162 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1163 * @phba: Pointer to HBA context object.
1164 * @iocbq: Pointer to driver iocb object.
1165 *
1166 * This function is called with no lock held to release the iocb to
1167 * iocb pool.
1168 **/
2e0fef85
JS
1169void
1170lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1171{
1172 unsigned long iflags;
1173
1174 /*
1175 * Clean all volatile data fields, preserve iotag and node struct.
1176 */
1177 spin_lock_irqsave(&phba->hbalock, iflags);
1178 __lpfc_sli_release_iocbq(phba, iocbq);
1179 spin_unlock_irqrestore(&phba->hbalock, iflags);
1180}
1181
a257bf90
JS
1182/**
1183 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1184 * @phba: Pointer to HBA context object.
1185 * @iocblist: List of IOCBs.
1186 * @ulpstatus: ULP status in IOCB command field.
1187 * @ulpWord4: ULP word-4 in IOCB command field.
1188 *
1189 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1190 * on the list by invoking the complete callback function associated with the
1191 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1192 * fields.
1193 **/
1194void
1195lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1196 uint32_t ulpstatus, uint32_t ulpWord4)
1197{
1198 struct lpfc_iocbq *piocb;
1199
1200 while (!list_empty(iocblist)) {
1201 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
a257bf90
JS
1202 if (!piocb->iocb_cmpl)
1203 lpfc_sli_release_iocbq(phba, piocb);
1204 else {
1205 piocb->iocb.ulpStatus = ulpstatus;
1206 piocb->iocb.un.ulpWord[4] = ulpWord4;
1207 (piocb->iocb_cmpl) (phba, piocb, piocb);
1208 }
1209 }
1210 return;
1211}
1212
e59058c4 1213/**
3621a710
JS
1214 * lpfc_sli_iocb_cmd_type - Get the iocb type
1215 * @iocb_cmnd: iocb command code.
e59058c4
JS
1216 *
1217 * This function is called by ring event handler function to get the iocb type.
1218 * This function translates the iocb command to an iocb command type used to
1219 * decide the final disposition of each completed IOCB.
1220 * The function returns
1221 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1222 * LPFC_SOL_IOCB if it is a solicited iocb completion
1223 * LPFC_ABORT_IOCB if it is an abort iocb
1224 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1225 *
1226 * The caller is not required to hold any lock.
1227 **/
dea3101e 1228static lpfc_iocb_type
1229lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1230{
1231 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1232
1233 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1234 return 0;
1235
1236 switch (iocb_cmnd) {
1237 case CMD_XMIT_SEQUENCE_CR:
1238 case CMD_XMIT_SEQUENCE_CX:
1239 case CMD_XMIT_BCAST_CN:
1240 case CMD_XMIT_BCAST_CX:
1241 case CMD_ELS_REQUEST_CR:
1242 case CMD_ELS_REQUEST_CX:
1243 case CMD_CREATE_XRI_CR:
1244 case CMD_CREATE_XRI_CX:
1245 case CMD_GET_RPI_CN:
1246 case CMD_XMIT_ELS_RSP_CX:
1247 case CMD_GET_RPI_CR:
1248 case CMD_FCP_IWRITE_CR:
1249 case CMD_FCP_IWRITE_CX:
1250 case CMD_FCP_IREAD_CR:
1251 case CMD_FCP_IREAD_CX:
1252 case CMD_FCP_ICMND_CR:
1253 case CMD_FCP_ICMND_CX:
f5603511
JS
1254 case CMD_FCP_TSEND_CX:
1255 case CMD_FCP_TRSP_CX:
1256 case CMD_FCP_TRECEIVE_CX:
1257 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1258 case CMD_ADAPTER_MSG:
1259 case CMD_ADAPTER_DUMP:
1260 case CMD_XMIT_SEQUENCE64_CR:
1261 case CMD_XMIT_SEQUENCE64_CX:
1262 case CMD_XMIT_BCAST64_CN:
1263 case CMD_XMIT_BCAST64_CX:
1264 case CMD_ELS_REQUEST64_CR:
1265 case CMD_ELS_REQUEST64_CX:
1266 case CMD_FCP_IWRITE64_CR:
1267 case CMD_FCP_IWRITE64_CX:
1268 case CMD_FCP_IREAD64_CR:
1269 case CMD_FCP_IREAD64_CX:
1270 case CMD_FCP_ICMND64_CR:
1271 case CMD_FCP_ICMND64_CX:
f5603511
JS
1272 case CMD_FCP_TSEND64_CX:
1273 case CMD_FCP_TRSP64_CX:
1274 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1275 case CMD_GEN_REQUEST64_CR:
1276 case CMD_GEN_REQUEST64_CX:
1277 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1278 case DSSCMD_IWRITE64_CR:
1279 case DSSCMD_IWRITE64_CX:
1280 case DSSCMD_IREAD64_CR:
1281 case DSSCMD_IREAD64_CX:
dea3101e 1282 type = LPFC_SOL_IOCB;
1283 break;
1284 case CMD_ABORT_XRI_CN:
1285 case CMD_ABORT_XRI_CX:
1286 case CMD_CLOSE_XRI_CN:
1287 case CMD_CLOSE_XRI_CX:
1288 case CMD_XRI_ABORTED_CX:
1289 case CMD_ABORT_MXRI64_CN:
6669f9bb 1290 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1291 type = LPFC_ABORT_IOCB;
1292 break;
1293 case CMD_RCV_SEQUENCE_CX:
1294 case CMD_RCV_ELS_REQ_CX:
1295 case CMD_RCV_SEQUENCE64_CX:
1296 case CMD_RCV_ELS_REQ64_CX:
57127f15 1297 case CMD_ASYNC_STATUS:
ed957684
JS
1298 case CMD_IOCB_RCV_SEQ64_CX:
1299 case CMD_IOCB_RCV_ELS64_CX:
1300 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1301 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1302 type = LPFC_UNSOL_IOCB;
1303 break;
3163f725
JS
1304 case CMD_IOCB_XMIT_MSEQ64_CR:
1305 case CMD_IOCB_XMIT_MSEQ64_CX:
1306 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1307 case CMD_IOCB_RCV_ELS_LIST64_CX:
1308 case CMD_IOCB_CLOSE_EXTENDED_CN:
1309 case CMD_IOCB_ABORT_EXTENDED_CN:
1310 case CMD_IOCB_RET_HBQE64_CN:
1311 case CMD_IOCB_FCP_IBIDIR64_CR:
1312 case CMD_IOCB_FCP_IBIDIR64_CX:
1313 case CMD_IOCB_FCP_ITASKMGT64_CX:
1314 case CMD_IOCB_LOGENTRY_CN:
1315 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1316 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1317 __func__, iocb_cmnd);
3163f725
JS
1318 type = LPFC_UNKNOWN_IOCB;
1319 break;
dea3101e 1320 default:
1321 type = LPFC_UNKNOWN_IOCB;
1322 break;
1323 }
1324
1325 return type;
1326}
1327
e59058c4 1328/**
3621a710 1329 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1330 * @phba: Pointer to HBA context object.
1331 *
1332 * This function is called from SLI initialization code
1333 * to configure every ring of the HBA's SLI interface. The
1334 * caller is not required to hold any lock. This function issues
1335 * a config_ring mailbox command for each ring.
1336 * This function returns zero if successful else returns a negative
1337 * error code.
1338 **/
dea3101e 1339static int
ed957684 1340lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1341{
1342 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1343 LPFC_MBOXQ_t *pmb;
1344 MAILBOX_t *pmbox;
1345 int i, rc, ret = 0;
dea3101e 1346
ed957684
JS
1347 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1348 if (!pmb)
1349 return -ENOMEM;
04c68496 1350 pmbox = &pmb->u.mb;
ed957684 1351 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1352 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1353 lpfc_config_ring(phba, i, pmb);
1354 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1355 if (rc != MBX_SUCCESS) {
92d7f7b0 1356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1357 "0446 Adapter failed to init (%d), "
dea3101e 1358 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1359 "ring %d\n",
e8b62011
JS
1360 rc, pmbox->mbxCommand,
1361 pmbox->mbxStatus, i);
2e0fef85 1362 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1363 ret = -ENXIO;
1364 break;
dea3101e 1365 }
1366 }
ed957684
JS
1367 mempool_free(pmb, phba->mbox_mem_pool);
1368 return ret;
dea3101e 1369}
1370
e59058c4 1371/**
3621a710 1372 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1373 * @phba: Pointer to HBA context object.
1374 * @pring: Pointer to driver SLI ring object.
1375 * @piocb: Pointer to the driver iocb object.
1376 *
1377 * This function is called with hbalock held. The function adds the
1378 * new iocb to txcmplq of the given ring. This function always returns
1379 * 0. If this function is called for ELS ring, this function checks if
1380 * there is a vport associated with the ELS command. This function also
1381 * starts els_tmofunc timer if this is an ELS command.
1382 **/
dea3101e 1383static int
2e0fef85
JS
1384lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1385 struct lpfc_iocbq *piocb)
dea3101e 1386{
1c2ba475
JT
1387 lockdep_assert_held(&phba->hbalock);
1388
2319f847 1389 BUG_ON(!piocb);
22466da5 1390
dea3101e 1391 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1392 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 1393
92d7f7b0
JS
1394 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1395 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1396 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1397 BUG_ON(!piocb->vport);
1398 if (!(piocb->vport->load_flag & FC_UNLOADING))
1399 mod_timer(&piocb->vport->els_tmofunc,
1400 jiffies +
1401 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1402 }
dea3101e 1403
2e0fef85 1404 return 0;
dea3101e 1405}
1406
e59058c4 1407/**
3621a710 1408 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1409 * @phba: Pointer to HBA context object.
1410 * @pring: Pointer to driver SLI ring object.
1411 *
1412 * This function is called with hbalock held to get next
1413 * iocb in txq of the given ring. If there is any iocb in
1414 * the txq, the function returns first iocb in the list after
1415 * removing the iocb from the list, else it returns NULL.
1416 **/
2a9bf3d0 1417struct lpfc_iocbq *
2e0fef85 1418lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1419{
dea3101e 1420 struct lpfc_iocbq *cmd_iocb;
1421
1c2ba475
JT
1422 lockdep_assert_held(&phba->hbalock);
1423
858c9f6c 1424 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1425 return cmd_iocb;
dea3101e 1426}
1427
e59058c4 1428/**
3621a710 1429 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1430 * @phba: Pointer to HBA context object.
1431 * @pring: Pointer to driver SLI ring object.
1432 *
1433 * This function is called with hbalock held and the caller must post the
1434 * iocb without releasing the lock. If the caller releases the lock,
1435 * iocb slot returned by the function is not guaranteed to be available.
1436 * The function returns pointer to the next available iocb slot if there
1437 * is available slot in the ring, else it returns NULL.
1438 * If the get index of the ring is ahead of the put index, the function
1439 * will post an error attention event to the worker thread to take the
1440 * HBA to offline state.
1441 **/
dea3101e 1442static IOCB_t *
1443lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1444{
34b02dcd 1445 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1446 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1447
1448 lockdep_assert_held(&phba->hbalock);
1449
7e56aa25
JS
1450 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1451 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1452 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1453
7e56aa25
JS
1454 if (unlikely(pring->sli.sli3.local_getidx ==
1455 pring->sli.sli3.next_cmdidx)) {
dea3101e 1456
7e56aa25 1457 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1458
7e56aa25 1459 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1461 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1462 "is bigger than cmd ring %d\n",
e8b62011 1463 pring->ringno,
7e56aa25
JS
1464 pring->sli.sli3.local_getidx,
1465 max_cmd_idx);
dea3101e 1466
2e0fef85 1467 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1468 /*
1469 * All error attention handlers are posted to
1470 * worker thread
1471 */
1472 phba->work_ha |= HA_ERATT;
1473 phba->work_hs = HS_FFER3;
92d7f7b0 1474
5e9d9b82 1475 lpfc_worker_wake_up(phba);
dea3101e 1476
1477 return NULL;
1478 }
1479
7e56aa25 1480 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1481 return NULL;
1482 }
1483
ed957684 1484 return lpfc_cmd_iocb(phba, pring);
dea3101e 1485}
1486
e59058c4 1487/**
3621a710 1488 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1489 * @phba: Pointer to HBA context object.
1490 * @iocbq: Pointer to driver iocb object.
1491 *
1492 * This function gets an iotag for the iocb. If there is no unused iotag and
1493 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1494 * array and assigns a new iotag.
1495 * The function returns the allocated iotag if successful, else returns zero.
1496 * Zero is not a valid iotag.
1497 * The caller is not required to hold any lock.
1498 **/
604a3e30 1499uint16_t
2e0fef85 1500lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1501{
2e0fef85
JS
1502 struct lpfc_iocbq **new_arr;
1503 struct lpfc_iocbq **old_arr;
604a3e30
JB
1504 size_t new_len;
1505 struct lpfc_sli *psli = &phba->sli;
1506 uint16_t iotag;
dea3101e 1507
2e0fef85 1508 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1509 iotag = psli->last_iotag;
1510 if(++iotag < psli->iocbq_lookup_len) {
1511 psli->last_iotag = iotag;
1512 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1513 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1514 iocbq->iotag = iotag;
1515 return iotag;
2e0fef85 1516 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1517 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1518 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
1519 spin_unlock_irq(&phba->hbalock);
1520 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1521 GFP_KERNEL);
1522 if (new_arr) {
2e0fef85 1523 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1524 old_arr = psli->iocbq_lookup;
1525 if (new_len <= psli->iocbq_lookup_len) {
1526 /* highly unprobable case */
1527 kfree(new_arr);
1528 iotag = psli->last_iotag;
1529 if(++iotag < psli->iocbq_lookup_len) {
1530 psli->last_iotag = iotag;
1531 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1532 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1533 iocbq->iotag = iotag;
1534 return iotag;
1535 }
2e0fef85 1536 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1537 return 0;
1538 }
1539 if (psli->iocbq_lookup)
1540 memcpy(new_arr, old_arr,
1541 ((psli->last_iotag + 1) *
311464ec 1542 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1543 psli->iocbq_lookup = new_arr;
1544 psli->iocbq_lookup_len = new_len;
1545 psli->last_iotag = iotag;
1546 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1547 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1548 iocbq->iotag = iotag;
1549 kfree(old_arr);
1550 return iotag;
1551 }
8f6d98d2 1552 } else
2e0fef85 1553 spin_unlock_irq(&phba->hbalock);
dea3101e 1554
bc73905a 1555 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1556 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1557 psli->last_iotag);
dea3101e 1558
604a3e30 1559 return 0;
dea3101e 1560}
1561
e59058c4 1562/**
3621a710 1563 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1564 * @phba: Pointer to HBA context object.
1565 * @pring: Pointer to driver SLI ring object.
1566 * @iocb: Pointer to iocb slot in the ring.
1567 * @nextiocb: Pointer to driver iocb object which need to be
1568 * posted to firmware.
1569 *
1570 * This function is called with hbalock held to post a new iocb to
1571 * the firmware. This function copies the new iocb to ring iocb slot and
1572 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1573 * a completion call back for this iocb else the function will free the
1574 * iocb object.
1575 **/
dea3101e 1576static void
1577lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1578 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1579{
1c2ba475 1580 lockdep_assert_held(&phba->hbalock);
dea3101e 1581 /*
604a3e30 1582 * Set up an iotag
dea3101e 1583 */
604a3e30 1584 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1585
e2a0a9d6 1586
a58cbd52
JS
1587 if (pring->ringno == LPFC_ELS_RING) {
1588 lpfc_debugfs_slow_ring_trc(phba,
1589 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1590 *(((uint32_t *) &nextiocb->iocb) + 4),
1591 *(((uint32_t *) &nextiocb->iocb) + 6),
1592 *(((uint32_t *) &nextiocb->iocb) + 7));
1593 }
1594
dea3101e 1595 /*
1596 * Issue iocb command to adapter
1597 */
92d7f7b0 1598 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1599 wmb();
1600 pring->stats.iocb_cmd++;
1601
1602 /*
1603 * If there is no completion routine to call, we can release the
1604 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1605 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1606 */
1607 if (nextiocb->iocb_cmpl)
1608 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1609 else
2e0fef85 1610 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1611
1612 /*
1613 * Let the HBA know what IOCB slot will be the next one the
1614 * driver will put a command into.
1615 */
7e56aa25
JS
1616 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1617 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1618}
1619
e59058c4 1620/**
3621a710 1621 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1622 * @phba: Pointer to HBA context object.
1623 * @pring: Pointer to driver SLI ring object.
1624 *
1625 * The caller is not required to hold any lock for calling this function.
1626 * This function updates the chip attention bits for the ring to inform firmware
1627 * that there are pending work to be done for this ring and requests an
1628 * interrupt when there is space available in the ring. This function is
1629 * called when the driver is unable to post more iocbs to the ring due
1630 * to unavailability of space in the ring.
1631 **/
dea3101e 1632static void
2e0fef85 1633lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1634{
1635 int ringno = pring->ringno;
1636
1637 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1638
1639 wmb();
1640
1641 /*
1642 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1643 * The HBA will tell us when an IOCB entry is available.
1644 */
1645 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1646 readl(phba->CAregaddr); /* flush */
1647
1648 pring->stats.iocb_cmd_full++;
1649}
1650
e59058c4 1651/**
3621a710 1652 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1653 * @phba: Pointer to HBA context object.
1654 * @pring: Pointer to driver SLI ring object.
1655 *
1656 * This function updates the chip attention register bit for the
1657 * given ring to inform HBA that there is more work to be done
1658 * in this ring. The caller is not required to hold any lock.
1659 **/
dea3101e 1660static void
2e0fef85 1661lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1662{
1663 int ringno = pring->ringno;
1664
1665 /*
1666 * Tell the HBA that there is work to do in this ring.
1667 */
34b02dcd
JS
1668 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1669 wmb();
1670 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1671 readl(phba->CAregaddr); /* flush */
1672 }
dea3101e 1673}
1674
e59058c4 1675/**
3621a710 1676 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1677 * @phba: Pointer to HBA context object.
1678 * @pring: Pointer to driver SLI ring object.
1679 *
1680 * This function is called with hbalock held to post pending iocbs
1681 * in the txq to the firmware. This function is called when driver
1682 * detects space available in the ring.
1683 **/
dea3101e 1684static void
2e0fef85 1685lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1686{
1687 IOCB_t *iocb;
1688 struct lpfc_iocbq *nextiocb;
1689
1c2ba475
JT
1690 lockdep_assert_held(&phba->hbalock);
1691
dea3101e 1692 /*
1693 * Check to see if:
1694 * (a) there is anything on the txq to send
1695 * (b) link is up
1696 * (c) link attention events can be processed (fcp ring only)
1697 * (d) IOCB processing is not blocked by the outstanding mbox command.
1698 */
0e9bb8d7
JS
1699
1700 if (lpfc_is_link_up(phba) &&
1701 (!list_empty(&pring->txq)) &&
895427bd 1702 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1703 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1704
1705 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1706 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1707 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1708
1709 if (iocb)
1710 lpfc_sli_update_ring(phba, pring);
1711 else
1712 lpfc_sli_update_full_ring(phba, pring);
1713 }
1714
1715 return;
1716}
1717
e59058c4 1718/**
3621a710 1719 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1720 * @phba: Pointer to HBA context object.
1721 * @hbqno: HBQ number.
1722 *
1723 * This function is called with hbalock held to get the next
1724 * available slot for the given HBQ. If there is free slot
1725 * available for the HBQ it will return pointer to the next available
1726 * HBQ entry else it will return NULL.
1727 **/
a6ababd2 1728static struct lpfc_hbq_entry *
ed957684
JS
1729lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1730{
1731 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1732
1c2ba475
JT
1733 lockdep_assert_held(&phba->hbalock);
1734
ed957684
JS
1735 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1736 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1737 hbqp->next_hbqPutIdx = 0;
1738
1739 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1740 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1741 uint32_t getidx = le32_to_cpu(raw_index);
1742
1743 hbqp->local_hbqGetIdx = getidx;
1744
1745 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1746 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1747 LOG_SLI | LOG_VPORT,
e8b62011 1748 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1749 "%u is > than hbqp->entry_count %u\n",
e8b62011 1750 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1751 hbqp->entry_count);
1752
1753 phba->link_state = LPFC_HBA_ERROR;
1754 return NULL;
1755 }
1756
1757 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1758 return NULL;
1759 }
1760
51ef4c26
JS
1761 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1762 hbqp->hbqPutIdx;
ed957684
JS
1763}
1764
e59058c4 1765/**
3621a710 1766 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1767 * @phba: Pointer to HBA context object.
1768 *
1769 * This function is called with no lock held to free all the
1770 * hbq buffers while uninitializing the SLI interface. It also
1771 * frees the HBQ buffers returned by the firmware but not yet
1772 * processed by the upper layers.
1773 **/
ed957684
JS
1774void
1775lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1776{
92d7f7b0
JS
1777 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1778 struct hbq_dmabuf *hbq_buf;
3163f725 1779 unsigned long flags;
51ef4c26 1780 int i, hbq_count;
ed957684 1781
51ef4c26 1782 hbq_count = lpfc_sli_hbq_count();
ed957684 1783 /* Return all memory used by all HBQs */
3163f725 1784 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1785 for (i = 0; i < hbq_count; ++i) {
1786 list_for_each_entry_safe(dmabuf, next_dmabuf,
1787 &phba->hbqs[i].hbq_buffer_list, list) {
1788 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1789 list_del(&hbq_buf->dbuf.list);
1790 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1791 }
a8adb832 1792 phba->hbqs[i].buffer_count = 0;
ed957684 1793 }
3163f725
JS
1794
1795 /* Mark the HBQs not in use */
1796 phba->hbq_in_use = 0;
1797 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1798}
1799
e59058c4 1800/**
3621a710 1801 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1802 * @phba: Pointer to HBA context object.
1803 * @hbqno: HBQ number.
1804 * @hbq_buf: Pointer to HBQ buffer.
1805 *
1806 * This function is called with the hbalock held to post a
1807 * hbq buffer to the firmware. If the function finds an empty
1808 * slot in the HBQ, it will post the buffer. The function will return
1809 * pointer to the hbq entry if it successfully post the buffer
1810 * else it will return NULL.
1811 **/
3772a991 1812static int
ed957684 1813lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1814 struct hbq_dmabuf *hbq_buf)
3772a991 1815{
1c2ba475 1816 lockdep_assert_held(&phba->hbalock);
3772a991
JS
1817 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1818}
1819
1820/**
1821 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1822 * @phba: Pointer to HBA context object.
1823 * @hbqno: HBQ number.
1824 * @hbq_buf: Pointer to HBQ buffer.
1825 *
1826 * This function is called with the hbalock held to post a hbq buffer to the
1827 * firmware. If the function finds an empty slot in the HBQ, it will post the
1828 * buffer and place it on the hbq_buffer_list. The function will return zero if
1829 * it successfully post the buffer else it will return an error.
1830 **/
1831static int
1832lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1833 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1834{
1835 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1836 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 1837
1c2ba475 1838 lockdep_assert_held(&phba->hbalock);
ed957684
JS
1839 /* Get next HBQ entry slot to use */
1840 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1841 if (hbqe) {
1842 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1843
92d7f7b0
JS
1844 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1845 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 1846 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 1847 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1848 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1849 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1850 /* Sync SLIM */
ed957684
JS
1851 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1852 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1853 /* flush */
ed957684 1854 readl(phba->hbq_put + hbqno);
51ef4c26 1855 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1856 return 0;
1857 } else
1858 return -ENOMEM;
ed957684
JS
1859}
1860
4f774513
JS
1861/**
1862 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1863 * @phba: Pointer to HBA context object.
1864 * @hbqno: HBQ number.
1865 * @hbq_buf: Pointer to HBQ buffer.
1866 *
1867 * This function is called with the hbalock held to post an RQE to the SLI4
1868 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1869 * the hbq_buffer_list and return zero, otherwise it will return an error.
1870 **/
1871static int
1872lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1873 struct hbq_dmabuf *hbq_buf)
1874{
1875 int rc;
1876 struct lpfc_rqe hrqe;
1877 struct lpfc_rqe drqe;
895427bd
JS
1878 struct lpfc_queue *hrq;
1879 struct lpfc_queue *drq;
1880
1881 if (hbqno != LPFC_ELS_HBQ)
1882 return 1;
1883 hrq = phba->sli4_hba.hdr_rq;
1884 drq = phba->sli4_hba.dat_rq;
4f774513 1885
1c2ba475 1886 lockdep_assert_held(&phba->hbalock);
4f774513
JS
1887 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1888 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1889 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1890 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 1891 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
1892 if (rc < 0)
1893 return rc;
895427bd 1894 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
1895 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1896 return 0;
1897}
1898
e59058c4 1899/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1900static struct lpfc_hbq_init lpfc_els_hbq = {
1901 .rn = 1,
def9c7a9 1902 .entry_count = 256,
92d7f7b0
JS
1903 .mask_count = 0,
1904 .profile = 0,
51ef4c26 1905 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1906 .buffer_count = 0,
a257bf90
JS
1907 .init_count = 40,
1908 .add_count = 40,
92d7f7b0 1909};
ed957684 1910
e59058c4 1911/* Array of HBQs */
78b2d852 1912struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
1913 &lpfc_els_hbq,
1914};
ed957684 1915
e59058c4 1916/**
3621a710 1917 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1918 * @phba: Pointer to HBA context object.
1919 * @hbqno: HBQ number.
1920 * @count: Number of HBQ buffers to be posted.
1921 *
d7c255b2
JS
1922 * This function is called with no lock held to post more hbq buffers to the
1923 * given HBQ. The function returns the number of HBQ buffers successfully
1924 * posted.
e59058c4 1925 **/
311464ec 1926static int
92d7f7b0 1927lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1928{
d7c255b2 1929 uint32_t i, posted = 0;
3163f725 1930 unsigned long flags;
92d7f7b0 1931 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1932 LIST_HEAD(hbq_buf_list);
eafe1df9 1933 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1934 return 0;
51ef4c26 1935
d7c255b2
JS
1936 if ((phba->hbqs[hbqno].buffer_count + count) >
1937 lpfc_hbq_defs[hbqno]->entry_count)
1938 count = lpfc_hbq_defs[hbqno]->entry_count -
1939 phba->hbqs[hbqno].buffer_count;
1940 if (!count)
1941 return 0;
1942 /* Allocate HBQ entries */
1943 for (i = 0; i < count; i++) {
1944 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1945 if (!hbq_buffer)
1946 break;
1947 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1948 }
3163f725
JS
1949 /* Check whether HBQ is still in use */
1950 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1951 if (!phba->hbq_in_use)
d7c255b2
JS
1952 goto err;
1953 while (!list_empty(&hbq_buf_list)) {
1954 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1955 dbuf.list);
1956 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1957 (hbqno << 16));
3772a991 1958 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1959 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1960 posted++;
1961 } else
51ef4c26 1962 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1963 }
3163f725 1964 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1965 return posted;
1966err:
eafe1df9 1967 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1968 while (!list_empty(&hbq_buf_list)) {
1969 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1970 dbuf.list);
1971 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1972 }
1973 return 0;
ed957684
JS
1974}
1975
e59058c4 1976/**
3621a710 1977 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1978 * @phba: Pointer to HBA context object.
1979 * @qno: HBQ number.
1980 *
1981 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1982 * is called with no lock held. The function returns the number of HBQ entries
1983 * successfully allocated.
e59058c4 1984 **/
92d7f7b0
JS
1985int
1986lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1987{
def9c7a9
JS
1988 if (phba->sli_rev == LPFC_SLI_REV4)
1989 return 0;
1990 else
1991 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1992 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1993}
ed957684 1994
e59058c4 1995/**
3621a710 1996 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1997 * @phba: Pointer to HBA context object.
1998 * @qno: HBQ queue number.
1999 *
2000 * This function is called from SLI initialization code path with
2001 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2002 * function returns the number of HBQ entries successfully allocated.
e59058c4 2003 **/
a6ababd2 2004static int
92d7f7b0
JS
2005lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2006{
def9c7a9
JS
2007 if (phba->sli_rev == LPFC_SLI_REV4)
2008 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2009 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2010 else
2011 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2012 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2013}
2014
3772a991
JS
2015/**
2016 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2017 * @phba: Pointer to HBA context object.
2018 * @hbqno: HBQ number.
2019 *
2020 * This function removes the first hbq buffer on an hbq list and returns a
2021 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2022 **/
2023static struct hbq_dmabuf *
2024lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2025{
2026 struct lpfc_dmabuf *d_buf;
2027
2028 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2029 if (!d_buf)
2030 return NULL;
2031 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2032}
2033
2d7dbc4c
JS
2034/**
2035 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2036 * @phba: Pointer to HBA context object.
2037 * @hbqno: HBQ number.
2038 *
2039 * This function removes the first RQ buffer on an RQ buffer list and returns a
2040 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2041 **/
2042static struct rqb_dmabuf *
2043lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2044{
2045 struct lpfc_dmabuf *h_buf;
2046 struct lpfc_rqb *rqbp;
2047
2048 rqbp = hrq->rqbp;
2049 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2050 struct lpfc_dmabuf, list);
2051 if (!h_buf)
2052 return NULL;
2053 rqbp->buffer_count--;
2054 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2055}
2056
e59058c4 2057/**
3621a710 2058 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2059 * @phba: Pointer to HBA context object.
2060 * @tag: Tag of the hbq buffer.
2061 *
71892418
SH
2062 * This function searches for the hbq buffer associated with the given tag in
2063 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2064 * otherwise it returns NULL.
e59058c4 2065 **/
a6ababd2 2066static struct hbq_dmabuf *
92d7f7b0 2067lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2068{
92d7f7b0
JS
2069 struct lpfc_dmabuf *d_buf;
2070 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2071 uint32_t hbqno;
2072
2073 hbqno = tag >> 16;
a0a74e45 2074 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2075 return NULL;
ed957684 2076
3772a991 2077 spin_lock_irq(&phba->hbalock);
51ef4c26 2078 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2079 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2080 if (hbq_buf->tag == tag) {
3772a991 2081 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2082 return hbq_buf;
ed957684
JS
2083 }
2084 }
3772a991 2085 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2087 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2088 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2089 return NULL;
ed957684
JS
2090}
2091
e59058c4 2092/**
3621a710 2093 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2094 * @phba: Pointer to HBA context object.
2095 * @hbq_buffer: Pointer to HBQ buffer.
2096 *
2097 * This function is called with hbalock. This function gives back
2098 * the hbq buffer to firmware. If the HBQ does not have space to
2099 * post the buffer, it will free the buffer.
2100 **/
ed957684 2101void
51ef4c26 2102lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2103{
2104 uint32_t hbqno;
2105
51ef4c26
JS
2106 if (hbq_buffer) {
2107 hbqno = hbq_buffer->tag >> 16;
3772a991 2108 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2109 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2110 }
2111}
2112
e59058c4 2113/**
3621a710 2114 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2115 * @mbxCommand: mailbox command code.
2116 *
2117 * This function is called by the mailbox event handler function to verify
2118 * that the completed mailbox command is a legitimate mailbox command. If the
2119 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2120 * and the mailbox event handler will take the HBA offline.
2121 **/
dea3101e 2122static int
2123lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2124{
2125 uint8_t ret;
2126
2127 switch (mbxCommand) {
2128 case MBX_LOAD_SM:
2129 case MBX_READ_NV:
2130 case MBX_WRITE_NV:
a8adb832 2131 case MBX_WRITE_VPARMS:
dea3101e 2132 case MBX_RUN_BIU_DIAG:
2133 case MBX_INIT_LINK:
2134 case MBX_DOWN_LINK:
2135 case MBX_CONFIG_LINK:
2136 case MBX_CONFIG_RING:
2137 case MBX_RESET_RING:
2138 case MBX_READ_CONFIG:
2139 case MBX_READ_RCONFIG:
2140 case MBX_READ_SPARM:
2141 case MBX_READ_STATUS:
2142 case MBX_READ_RPI:
2143 case MBX_READ_XRI:
2144 case MBX_READ_REV:
2145 case MBX_READ_LNK_STAT:
2146 case MBX_REG_LOGIN:
2147 case MBX_UNREG_LOGIN:
dea3101e 2148 case MBX_CLEAR_LA:
2149 case MBX_DUMP_MEMORY:
2150 case MBX_DUMP_CONTEXT:
2151 case MBX_RUN_DIAGS:
2152 case MBX_RESTART:
2153 case MBX_UPDATE_CFG:
2154 case MBX_DOWN_LOAD:
2155 case MBX_DEL_LD_ENTRY:
2156 case MBX_RUN_PROGRAM:
2157 case MBX_SET_MASK:
09372820 2158 case MBX_SET_VARIABLE:
dea3101e 2159 case MBX_UNREG_D_ID:
41415862 2160 case MBX_KILL_BOARD:
dea3101e 2161 case MBX_CONFIG_FARP:
41415862 2162 case MBX_BEACON:
dea3101e 2163 case MBX_LOAD_AREA:
2164 case MBX_RUN_BIU_DIAG64:
2165 case MBX_CONFIG_PORT:
2166 case MBX_READ_SPARM64:
2167 case MBX_READ_RPI64:
2168 case MBX_REG_LOGIN64:
76a95d75 2169 case MBX_READ_TOPOLOGY:
09372820 2170 case MBX_WRITE_WWN:
dea3101e 2171 case MBX_SET_DEBUG:
2172 case MBX_LOAD_EXP_ROM:
57127f15 2173 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2174 case MBX_REG_VPI:
2175 case MBX_UNREG_VPI:
858c9f6c 2176 case MBX_HEARTBEAT:
84774a4d
JS
2177 case MBX_PORT_CAPABILITIES:
2178 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2179 case MBX_SLI4_CONFIG:
2180 case MBX_SLI4_REQ_FTRS:
2181 case MBX_REG_FCFI:
2182 case MBX_UNREG_FCFI:
2183 case MBX_REG_VFI:
2184 case MBX_UNREG_VFI:
2185 case MBX_INIT_VPI:
2186 case MBX_INIT_VFI:
2187 case MBX_RESUME_RPI:
c7495937
JS
2188 case MBX_READ_EVENT_LOG_STATUS:
2189 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2190 case MBX_SECURITY_MGMT:
2191 case MBX_AUTH_PORT:
940eb687 2192 case MBX_ACCESS_VDATA:
dea3101e 2193 ret = mbxCommand;
2194 break;
2195 default:
2196 ret = MBX_SHUTDOWN;
2197 break;
2198 }
2e0fef85 2199 return ret;
dea3101e 2200}
e59058c4
JS
2201
2202/**
3621a710 2203 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2204 * @phba: Pointer to HBA context object.
2205 * @pmboxq: Pointer to mailbox command.
2206 *
2207 * This is completion handler function for mailbox commands issued from
2208 * lpfc_sli_issue_mbox_wait function. This function is called by the
2209 * mailbox event handler function with no lock held. This function
2210 * will wake up thread waiting on the wait queue pointed by context1
2211 * of the mailbox.
2212 **/
04c68496 2213void
2e0fef85 2214lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2215{
2216 wait_queue_head_t *pdone_q;
858c9f6c 2217 unsigned long drvr_flag;
dea3101e 2218
2219 /*
2220 * If pdone_q is empty, the driver thread gave up waiting and
2221 * continued running.
2222 */
7054a606 2223 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2224 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2225 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2226 if (pdone_q)
2227 wake_up_interruptible(pdone_q);
858c9f6c 2228 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2229 return;
2230}
2231
e59058c4
JS
2232
2233/**
3621a710 2234 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2235 * @phba: Pointer to HBA context object.
2236 * @pmb: Pointer to mailbox object.
2237 *
2238 * This function is the default mailbox completion handler. It
2239 * frees the memory resources associated with the completed mailbox
2240 * command. If the completed command is a REG_LOGIN mailbox command,
2241 * this function will issue a UREG_LOGIN to re-claim the RPI.
2242 **/
dea3101e 2243void
2e0fef85 2244lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2245{
d439d286 2246 struct lpfc_vport *vport = pmb->vport;
dea3101e 2247 struct lpfc_dmabuf *mp;
d439d286 2248 struct lpfc_nodelist *ndlp;
5af5eee7 2249 struct Scsi_Host *shost;
04c68496 2250 uint16_t rpi, vpi;
7054a606
JS
2251 int rc;
2252
dea3101e 2253 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 2254
dea3101e 2255 if (mp) {
2256 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2257 kfree(mp);
2258 }
7054a606
JS
2259
2260 /*
2261 * If a REG_LOGIN succeeded after node is destroyed or node
2262 * is in re-discovery driver need to cleanup the RPI.
2263 */
2e0fef85 2264 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2265 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2266 !pmb->u.mb.mbxStatus) {
2267 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2268 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2269 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2270 pmb->vport = vport;
92d7f7b0 2271 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2272 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2273 if (rc != MBX_NOT_FINISHED)
2274 return;
2275 }
2276
695a814e
JS
2277 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2278 !(phba->pport->load_flag & FC_UNLOADING) &&
2279 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2280 shost = lpfc_shost_from_vport(vport);
2281 spin_lock_irq(shost->host_lock);
2282 vport->vpi_state |= LPFC_VPI_REGISTERED;
2283 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2284 spin_unlock_irq(shost->host_lock);
695a814e
JS
2285 }
2286
d439d286
JS
2287 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2288 ndlp = (struct lpfc_nodelist *)pmb->context2;
2289 lpfc_nlp_put(ndlp);
2290 pmb->context2 = NULL;
2291 }
2292
dcf2a4e0
JS
2293 /* Check security permission status on INIT_LINK mailbox command */
2294 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2295 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2296 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2297 "2860 SLI authentication is required "
2298 "for INIT_LINK but has not done yet\n");
2299
04c68496
JS
2300 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2301 lpfc_sli4_mbox_cmd_free(phba, pmb);
2302 else
2303 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2304}
be6bb941
JS
2305 /**
2306 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2307 * @phba: Pointer to HBA context object.
2308 * @pmb: Pointer to mailbox object.
2309 *
2310 * This function is the unreg rpi mailbox completion handler. It
2311 * frees the memory resources associated with the completed mailbox
2312 * command. An additional refrenece is put on the ndlp to prevent
2313 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2314 * the unreg mailbox command completes, this routine puts the
2315 * reference back.
2316 *
2317 **/
2318void
2319lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2320{
2321 struct lpfc_vport *vport = pmb->vport;
2322 struct lpfc_nodelist *ndlp;
2323
2324 ndlp = pmb->context1;
2325 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2326 if (phba->sli_rev == LPFC_SLI_REV4 &&
2327 (bf_get(lpfc_sli_intf_if_type,
2328 &phba->sli4_hba.sli_intf) ==
2329 LPFC_SLI_INTF_IF_TYPE_2)) {
2330 if (ndlp) {
2331 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2332 "0010 UNREG_LOGIN vpi:%x "
2333 "rpi:%x DID:%x map:%x %p\n",
2334 vport->vpi, ndlp->nlp_rpi,
2335 ndlp->nlp_DID,
2336 ndlp->nlp_usg_map, ndlp);
7c5e518c 2337 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941
JS
2338 lpfc_nlp_put(ndlp);
2339 }
2340 }
2341 }
2342
2343 mempool_free(pmb, phba->mbox_mem_pool);
2344}
dea3101e 2345
e59058c4 2346/**
3621a710 2347 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2348 * @phba: Pointer to HBA context object.
2349 *
2350 * This function is called with no lock held. This function processes all
2351 * the completed mailbox commands and gives it to upper layers. The interrupt
2352 * service routine processes mailbox completion interrupt and adds completed
2353 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2354 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2355 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2356 * function returns the mailbox commands to the upper layer by calling the
2357 * completion handler function of each mailbox.
2358 **/
dea3101e 2359int
2e0fef85 2360lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2361{
92d7f7b0 2362 MAILBOX_t *pmbox;
dea3101e 2363 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2364 int rc;
2365 LIST_HEAD(cmplq);
dea3101e 2366
2367 phba->sli.slistat.mbox_event++;
2368
92d7f7b0
JS
2369 /* Get all completed mailboxe buffers into the cmplq */
2370 spin_lock_irq(&phba->hbalock);
2371 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2372 spin_unlock_irq(&phba->hbalock);
dea3101e 2373
92d7f7b0
JS
2374 /* Get a Mailbox buffer to setup mailbox commands for callback */
2375 do {
2376 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2377 if (pmb == NULL)
2378 break;
2e0fef85 2379
04c68496 2380 pmbox = &pmb->u.mb;
dea3101e 2381
858c9f6c
JS
2382 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2383 if (pmb->vport) {
2384 lpfc_debugfs_disc_trc(pmb->vport,
2385 LPFC_DISC_TRC_MBOX_VPORT,
2386 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2387 (uint32_t)pmbox->mbxCommand,
2388 pmbox->un.varWords[0],
2389 pmbox->un.varWords[1]);
2390 }
2391 else {
2392 lpfc_debugfs_disc_trc(phba->pport,
2393 LPFC_DISC_TRC_MBOX,
2394 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2395 (uint32_t)pmbox->mbxCommand,
2396 pmbox->un.varWords[0],
2397 pmbox->un.varWords[1]);
2398 }
2399 }
2400
dea3101e 2401 /*
2402 * It is a fatal error if unknown mbox command completion.
2403 */
2404 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2405 MBX_SHUTDOWN) {
af901ca1 2406 /* Unknown mailbox command compl */
92d7f7b0 2407 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2408 "(%d):0323 Unknown Mailbox command "
a183a15f 2409 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2410 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2411 pmbox->mbxCommand,
a183a15f
JS
2412 lpfc_sli_config_mbox_subsys_get(phba,
2413 pmb),
2414 lpfc_sli_config_mbox_opcode_get(phba,
2415 pmb));
2e0fef85 2416 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2417 phba->work_hs = HS_FFER3;
2418 lpfc_handle_eratt(phba);
92d7f7b0 2419 continue;
dea3101e 2420 }
2421
dea3101e 2422 if (pmbox->mbxStatus) {
2423 phba->sli.slistat.mbox_stat_err++;
2424 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2425 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2426 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2427 LOG_MBOX | LOG_SLI,
2428 "(%d):0305 Mbox cmd cmpl "
2429 "error - RETRYing Data: x%x "
2430 "(x%x/x%x) x%x x%x x%x\n",
2431 pmb->vport ? pmb->vport->vpi : 0,
2432 pmbox->mbxCommand,
2433 lpfc_sli_config_mbox_subsys_get(phba,
2434 pmb),
2435 lpfc_sli_config_mbox_opcode_get(phba,
2436 pmb),
2437 pmbox->mbxStatus,
2438 pmbox->un.varWords[0],
2439 pmb->vport->port_state);
dea3101e 2440 pmbox->mbxStatus = 0;
2441 pmbox->mbxOwner = OWN_HOST;
dea3101e 2442 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2443 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2444 continue;
dea3101e 2445 }
2446 }
2447
2448 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2449 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2450 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
e74c03c8
JS
2451 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2452 "x%x x%x x%x\n",
92d7f7b0 2453 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2454 pmbox->mbxCommand,
a183a15f
JS
2455 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2456 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2457 pmb->mbox_cmpl,
2458 *((uint32_t *) pmbox),
2459 pmbox->un.varWords[0],
2460 pmbox->un.varWords[1],
2461 pmbox->un.varWords[2],
2462 pmbox->un.varWords[3],
2463 pmbox->un.varWords[4],
2464 pmbox->un.varWords[5],
2465 pmbox->un.varWords[6],
e74c03c8
JS
2466 pmbox->un.varWords[7],
2467 pmbox->un.varWords[8],
2468 pmbox->un.varWords[9],
2469 pmbox->un.varWords[10]);
dea3101e 2470
92d7f7b0 2471 if (pmb->mbox_cmpl)
dea3101e 2472 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2473 } while (1);
2474 return 0;
2475}
dea3101e 2476
e59058c4 2477/**
3621a710 2478 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2479 * @phba: Pointer to HBA context object.
2480 * @pring: Pointer to driver SLI ring object.
2481 * @tag: buffer tag.
2482 *
2483 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2484 * is set in the tag the buffer is posted for a particular exchange,
2485 * the function will return the buffer without replacing the buffer.
2486 * If the buffer is for unsolicited ELS or CT traffic, this function
2487 * returns the buffer and also posts another buffer to the firmware.
2488 **/
76bb24ef
JS
2489static struct lpfc_dmabuf *
2490lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2491 struct lpfc_sli_ring *pring,
2492 uint32_t tag)
76bb24ef 2493{
9f1e1b50
JS
2494 struct hbq_dmabuf *hbq_entry;
2495
76bb24ef
JS
2496 if (tag & QUE_BUFTAG_BIT)
2497 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2498 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2499 if (!hbq_entry)
2500 return NULL;
2501 return &hbq_entry->dbuf;
76bb24ef 2502}
57127f15 2503
3772a991
JS
2504/**
2505 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2506 * @phba: Pointer to HBA context object.
2507 * @pring: Pointer to driver SLI ring object.
2508 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2509 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2510 * @fch_type: the type for the first frame of the sequence.
2511 *
2512 * This function is called with no lock held. This function uses the r_ctl and
2513 * type of the received sequence to find the correct callback function to call
2514 * to process the sequence.
2515 **/
2516static int
2517lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2518 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2519 uint32_t fch_type)
2520{
2521 int i;
2522
f358dd0c
JS
2523 switch (fch_type) {
2524 case FC_TYPE_NVME:
d613b6a7 2525 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
f358dd0c
JS
2526 return 1;
2527 default:
2528 break;
2529 }
2530
3772a991
JS
2531 /* unSolicited Responses */
2532 if (pring->prt[0].profile) {
2533 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2534 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2535 saveq);
2536 return 1;
2537 }
2538 /* We must search, based on rctl / type
2539 for the right routine */
2540 for (i = 0; i < pring->num_mask; i++) {
2541 if ((pring->prt[i].rctl == fch_r_ctl) &&
2542 (pring->prt[i].type == fch_type)) {
2543 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2544 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2545 (phba, pring, saveq);
2546 return 1;
2547 }
2548 }
2549 return 0;
2550}
e59058c4
JS
2551
2552/**
3621a710 2553 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2554 * @phba: Pointer to HBA context object.
2555 * @pring: Pointer to driver SLI ring object.
2556 * @saveq: Pointer to the unsolicited iocb.
2557 *
2558 * This function is called with no lock held by the ring event handler
2559 * when there is an unsolicited iocb posted to the response ring by the
2560 * firmware. This function gets the buffer associated with the iocbs
2561 * and calls the event handler for the ring. This function handles both
2562 * qring buffers and hbq buffers.
2563 * When the function returns 1 the caller can free the iocb object otherwise
2564 * upper layer functions will free the iocb objects.
2565 **/
dea3101e 2566static int
2567lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2568 struct lpfc_iocbq *saveq)
2569{
2570 IOCB_t * irsp;
2571 WORD5 * w5p;
2572 uint32_t Rctl, Type;
76bb24ef 2573 struct lpfc_iocbq *iocbq;
3163f725 2574 struct lpfc_dmabuf *dmzbuf;
dea3101e 2575
dea3101e 2576 irsp = &(saveq->iocb);
57127f15
JS
2577
2578 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2579 if (pring->lpfc_sli_rcv_async_status)
2580 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2581 else
2582 lpfc_printf_log(phba,
2583 KERN_WARNING,
2584 LOG_SLI,
2585 "0316 Ring %d handler: unexpected "
2586 "ASYNC_STATUS iocb received evt_code "
2587 "0x%x\n",
2588 pring->ringno,
2589 irsp->un.asyncstat.evt_code);
2590 return 1;
2591 }
2592
3163f725
JS
2593 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2594 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2595 if (irsp->ulpBdeCount > 0) {
2596 dmzbuf = lpfc_sli_get_buff(phba, pring,
2597 irsp->un.ulpWord[3]);
2598 lpfc_in_buf_free(phba, dmzbuf);
2599 }
2600
2601 if (irsp->ulpBdeCount > 1) {
2602 dmzbuf = lpfc_sli_get_buff(phba, pring,
2603 irsp->unsli3.sli3Words[3]);
2604 lpfc_in_buf_free(phba, dmzbuf);
2605 }
2606
2607 if (irsp->ulpBdeCount > 2) {
2608 dmzbuf = lpfc_sli_get_buff(phba, pring,
2609 irsp->unsli3.sli3Words[7]);
2610 lpfc_in_buf_free(phba, dmzbuf);
2611 }
2612
2613 return 1;
2614 }
2615
92d7f7b0 2616 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2617 if (irsp->ulpBdeCount != 0) {
2618 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2619 irsp->un.ulpWord[3]);
2620 if (!saveq->context2)
2621 lpfc_printf_log(phba,
2622 KERN_ERR,
2623 LOG_SLI,
2624 "0341 Ring %d Cannot find buffer for "
2625 "an unsolicited iocb. tag 0x%x\n",
2626 pring->ringno,
2627 irsp->un.ulpWord[3]);
76bb24ef
JS
2628 }
2629 if (irsp->ulpBdeCount == 2) {
2630 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2631 irsp->unsli3.sli3Words[7]);
2632 if (!saveq->context3)
2633 lpfc_printf_log(phba,
2634 KERN_ERR,
2635 LOG_SLI,
2636 "0342 Ring %d Cannot find buffer for an"
2637 " unsolicited iocb. tag 0x%x\n",
2638 pring->ringno,
2639 irsp->unsli3.sli3Words[7]);
2640 }
2641 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2642 irsp = &(iocbq->iocb);
76bb24ef
JS
2643 if (irsp->ulpBdeCount != 0) {
2644 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2645 irsp->un.ulpWord[3]);
9c2face6 2646 if (!iocbq->context2)
76bb24ef
JS
2647 lpfc_printf_log(phba,
2648 KERN_ERR,
2649 LOG_SLI,
2650 "0343 Ring %d Cannot find "
2651 "buffer for an unsolicited iocb"
2652 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2653 irsp->un.ulpWord[3]);
76bb24ef
JS
2654 }
2655 if (irsp->ulpBdeCount == 2) {
2656 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2657 irsp->unsli3.sli3Words[7]);
9c2face6 2658 if (!iocbq->context3)
76bb24ef
JS
2659 lpfc_printf_log(phba,
2660 KERN_ERR,
2661 LOG_SLI,
2662 "0344 Ring %d Cannot find "
2663 "buffer for an unsolicited "
2664 "iocb. tag 0x%x\n",
2665 pring->ringno,
2666 irsp->unsli3.sli3Words[7]);
2667 }
2668 }
92d7f7b0 2669 }
9c2face6
JS
2670 if (irsp->ulpBdeCount != 0 &&
2671 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2672 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2673 int found = 0;
2674
2675 /* search continue save q for same XRI */
2676 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2677 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2678 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2679 list_add_tail(&saveq->list, &iocbq->list);
2680 found = 1;
2681 break;
2682 }
2683 }
2684 if (!found)
2685 list_add_tail(&saveq->clist,
2686 &pring->iocb_continue_saveq);
2687 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2688 list_del_init(&iocbq->clist);
2689 saveq = iocbq;
2690 irsp = &(saveq->iocb);
2691 } else
2692 return 0;
2693 }
2694 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2695 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2696 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2697 Rctl = FC_RCTL_ELS_REQ;
2698 Type = FC_TYPE_ELS;
9c2face6
JS
2699 } else {
2700 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2701 Rctl = w5p->hcsw.Rctl;
2702 Type = w5p->hcsw.Type;
2703
2704 /* Firmware Workaround */
2705 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2706 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2707 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2708 Rctl = FC_RCTL_ELS_REQ;
2709 Type = FC_TYPE_ELS;
9c2face6
JS
2710 w5p->hcsw.Rctl = Rctl;
2711 w5p->hcsw.Type = Type;
2712 }
2713 }
92d7f7b0 2714
3772a991 2715 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2716 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2717 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2718 "Type x%x received\n",
e8b62011 2719 pring->ringno, Rctl, Type);
3772a991 2720
92d7f7b0 2721 return 1;
dea3101e 2722}
2723
e59058c4 2724/**
3621a710 2725 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2726 * @phba: Pointer to HBA context object.
2727 * @pring: Pointer to driver SLI ring object.
2728 * @prspiocb: Pointer to response iocb object.
2729 *
2730 * This function looks up the iocb_lookup table to get the command iocb
2731 * corresponding to the given response iocb using the iotag of the
2732 * response iocb. This function is called with the hbalock held.
2733 * This function returns the command iocb object if it finds the command
2734 * iocb else returns NULL.
2735 **/
dea3101e 2736static struct lpfc_iocbq *
2e0fef85
JS
2737lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2738 struct lpfc_sli_ring *pring,
2739 struct lpfc_iocbq *prspiocb)
dea3101e 2740{
dea3101e 2741 struct lpfc_iocbq *cmd_iocb = NULL;
2742 uint16_t iotag;
1c2ba475 2743 lockdep_assert_held(&phba->hbalock);
dea3101e 2744
604a3e30
JB
2745 iotag = prspiocb->iocb.ulpIoTag;
2746
2747 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2748 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 2749 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
2750 /* remove from txcmpl queue list */
2751 list_del_init(&cmd_iocb->list);
4f2e66c6 2752 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
89533e9b 2753 return cmd_iocb;
2a9bf3d0 2754 }
dea3101e 2755 }
2756
dea3101e 2757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 2758 "0317 iotag x%x is out of "
604a3e30 2759 "range: max iotag x%x wd0 x%x\n",
e8b62011 2760 iotag, phba->sli.last_iotag,
604a3e30 2761 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 2762 return NULL;
2763}
2764
3772a991
JS
2765/**
2766 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2767 * @phba: Pointer to HBA context object.
2768 * @pring: Pointer to driver SLI ring object.
2769 * @iotag: IOCB tag.
2770 *
2771 * This function looks up the iocb_lookup table to get the command iocb
2772 * corresponding to the given iotag. This function is called with the
2773 * hbalock held.
2774 * This function returns the command iocb object if it finds the command
2775 * iocb else returns NULL.
2776 **/
2777static struct lpfc_iocbq *
2778lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2779 struct lpfc_sli_ring *pring, uint16_t iotag)
2780{
895427bd 2781 struct lpfc_iocbq *cmd_iocb = NULL;
3772a991 2782
1c2ba475 2783 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2784 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2785 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
2786 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2787 /* remove from txcmpl queue list */
2788 list_del_init(&cmd_iocb->list);
2789 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4f2e66c6 2790 return cmd_iocb;
2a9bf3d0 2791 }
3772a991 2792 }
89533e9b 2793
3772a991 2794 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
2795 "0372 iotag x%x lookup error: max iotag (x%x) "
2796 "iocb_flag x%x\n",
2797 iotag, phba->sli.last_iotag,
2798 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
2799 return NULL;
2800}
2801
e59058c4 2802/**
3621a710 2803 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2804 * @phba: Pointer to HBA context object.
2805 * @pring: Pointer to driver SLI ring object.
2806 * @saveq: Pointer to the response iocb to be processed.
2807 *
2808 * This function is called by the ring event handler for non-fcp
2809 * rings when there is a new response iocb in the response ring.
2810 * The caller is not required to hold any locks. This function
2811 * gets the command iocb associated with the response iocb and
2812 * calls the completion handler for the command iocb. If there
2813 * is no completion handler, the function will free the resources
2814 * associated with command iocb. If the response iocb is for
2815 * an already aborted command iocb, the status of the completion
2816 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2817 * This function always returns 1.
2818 **/
dea3101e 2819static int
2e0fef85 2820lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 2821 struct lpfc_iocbq *saveq)
2822{
2e0fef85 2823 struct lpfc_iocbq *cmdiocbp;
dea3101e 2824 int rc = 1;
2825 unsigned long iflag;
2826
2827 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2828 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2829 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2830 spin_unlock_irqrestore(&phba->hbalock, iflag);
2831
dea3101e 2832 if (cmdiocbp) {
2833 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2834 /*
2835 * If an ELS command failed send an event to mgmt
2836 * application.
2837 */
2838 if (saveq->iocb.ulpStatus &&
2839 (pring->ringno == LPFC_ELS_RING) &&
2840 (cmdiocbp->iocb.ulpCommand ==
2841 CMD_ELS_REQUEST64_CR))
2842 lpfc_send_els_failure_event(phba,
2843 cmdiocbp, saveq);
2844
dea3101e 2845 /*
2846 * Post all ELS completions to the worker thread.
2847 * All other are passed to the completion callback.
2848 */
2849 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
2850 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2851 (cmdiocbp->iocb_flag &
2852 LPFC_DRIVER_ABORTED)) {
2853 spin_lock_irqsave(&phba->hbalock,
2854 iflag);
07951076
JS
2855 cmdiocbp->iocb_flag &=
2856 ~LPFC_DRIVER_ABORTED;
341af102
JS
2857 spin_unlock_irqrestore(&phba->hbalock,
2858 iflag);
07951076
JS
2859 saveq->iocb.ulpStatus =
2860 IOSTAT_LOCAL_REJECT;
2861 saveq->iocb.un.ulpWord[4] =
2862 IOERR_SLI_ABORTED;
0ff10d46
JS
2863
2864 /* Firmware could still be in progress
2865 * of DMAing payload, so don't free data
2866 * buffer till after a hbeat.
2867 */
341af102
JS
2868 spin_lock_irqsave(&phba->hbalock,
2869 iflag);
0ff10d46 2870 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
2871 spin_unlock_irqrestore(&phba->hbalock,
2872 iflag);
2873 }
0f65ff68
JS
2874 if (phba->sli_rev == LPFC_SLI_REV4) {
2875 if (saveq->iocb_flag &
2876 LPFC_EXCHANGE_BUSY) {
2877 /* Set cmdiocb flag for the
2878 * exchange busy so sgl (xri)
2879 * will not be released until
2880 * the abort xri is received
2881 * from hba.
2882 */
2883 spin_lock_irqsave(
2884 &phba->hbalock, iflag);
2885 cmdiocbp->iocb_flag |=
2886 LPFC_EXCHANGE_BUSY;
2887 spin_unlock_irqrestore(
2888 &phba->hbalock, iflag);
2889 }
2890 if (cmdiocbp->iocb_flag &
2891 LPFC_DRIVER_ABORTED) {
2892 /*
2893 * Clear LPFC_DRIVER_ABORTED
2894 * bit in case it was driver
2895 * initiated abort.
2896 */
2897 spin_lock_irqsave(
2898 &phba->hbalock, iflag);
2899 cmdiocbp->iocb_flag &=
2900 ~LPFC_DRIVER_ABORTED;
2901 spin_unlock_irqrestore(
2902 &phba->hbalock, iflag);
2903 cmdiocbp->iocb.ulpStatus =
2904 IOSTAT_LOCAL_REJECT;
2905 cmdiocbp->iocb.un.ulpWord[4] =
2906 IOERR_ABORT_REQUESTED;
2907 /*
2908 * For SLI4, irsiocb contains
2909 * NO_XRI in sli_xritag, it
2910 * shall not affect releasing
2911 * sgl (xri) process.
2912 */
2913 saveq->iocb.ulpStatus =
2914 IOSTAT_LOCAL_REJECT;
2915 saveq->iocb.un.ulpWord[4] =
2916 IOERR_SLI_ABORTED;
2917 spin_lock_irqsave(
2918 &phba->hbalock, iflag);
2919 saveq->iocb_flag |=
2920 LPFC_DELAY_MEM_FREE;
2921 spin_unlock_irqrestore(
2922 &phba->hbalock, iflag);
2923 }
07951076 2924 }
dea3101e 2925 }
2e0fef85 2926 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2927 } else
2928 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 2929 } else {
2930 /*
2931 * Unknown initiating command based on the response iotag.
2932 * This could be the case on the ELS ring because of
2933 * lpfc_els_abort().
2934 */
2935 if (pring->ringno != LPFC_ELS_RING) {
2936 /*
2937 * Ring <ringno> handler: unexpected completion IoTag
2938 * <IoTag>
2939 */
a257bf90 2940 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2941 "0322 Ring %d handler: "
2942 "unexpected completion IoTag x%x "
2943 "Data: x%x x%x x%x x%x\n",
2944 pring->ringno,
2945 saveq->iocb.ulpIoTag,
2946 saveq->iocb.ulpStatus,
2947 saveq->iocb.un.ulpWord[4],
2948 saveq->iocb.ulpCommand,
2949 saveq->iocb.ulpContext);
dea3101e 2950 }
2951 }
68876920 2952
dea3101e 2953 return rc;
2954}
2955
e59058c4 2956/**
3621a710 2957 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2958 * @phba: Pointer to HBA context object.
2959 * @pring: Pointer to driver SLI ring object.
2960 *
2961 * This function is called from the iocb ring event handlers when
2962 * put pointer is ahead of the get pointer for a ring. This function signal
2963 * an error attention condition to the worker thread and the worker
2964 * thread will transition the HBA to offline state.
2965 **/
2e0fef85
JS
2966static void
2967lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2968{
34b02dcd 2969 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2970 /*
025dfdaf 2971 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2972 * rsp ring <portRspMax>
2973 */
2974 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2975 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2976 "is bigger than rsp ring %d\n",
e8b62011 2977 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 2978 pring->sli.sli3.numRiocb);
875fbdfe 2979
2e0fef85 2980 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2981
2982 /*
2983 * All error attention handlers are posted to
2984 * worker thread
2985 */
2986 phba->work_ha |= HA_ERATT;
2987 phba->work_hs = HS_FFER3;
92d7f7b0 2988
5e9d9b82 2989 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2990
2991 return;
2992}
2993
9399627f 2994/**
3621a710 2995 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2996 * @ptr: Pointer to address of HBA context object.
2997 *
2998 * This function is invoked by the Error Attention polling timer when the
2999 * timer times out. It will check the SLI Error Attention register for
3000 * possible attention events. If so, it will post an Error Attention event
3001 * and wake up worker thread to process it. Otherwise, it will set up the
3002 * Error Attention polling timer for the next poll.
3003 **/
3004void lpfc_poll_eratt(unsigned long ptr)
3005{
3006 struct lpfc_hba *phba;
eb016566 3007 uint32_t eratt = 0;
aa6fbb75 3008 uint64_t sli_intr, cnt;
9399627f
JS
3009
3010 phba = (struct lpfc_hba *)ptr;
3011
aa6fbb75
JS
3012 /* Here we will also keep track of interrupts per sec of the hba */
3013 sli_intr = phba->sli.slistat.sli_intr;
3014
3015 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3016 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3017 sli_intr);
3018 else
3019 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3020
65791f1f
JS
3021 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3022 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3023 phba->sli.slistat.sli_ips = cnt;
3024
3025 phba->sli.slistat.sli_prev_intr = sli_intr;
3026
9399627f
JS
3027 /* Check chip HA register for error event */
3028 eratt = lpfc_sli_check_eratt(phba);
3029
3030 if (eratt)
3031 /* Tell the worker thread there is work to do */
3032 lpfc_worker_wake_up(phba);
3033 else
3034 /* Restart the timer for next eratt poll */
256ec0d0
JS
3035 mod_timer(&phba->eratt_poll,
3036 jiffies +
65791f1f 3037 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3038 return;
3039}
3040
875fbdfe 3041
e59058c4 3042/**
3621a710 3043 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3044 * @phba: Pointer to HBA context object.
3045 * @pring: Pointer to driver SLI ring object.
3046 * @mask: Host attention register mask for this ring.
3047 *
3048 * This function is called from the interrupt context when there is a ring
3049 * event for the fcp ring. The caller does not hold any lock.
3050 * The function processes each response iocb in the response ring until it
25985edc 3051 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3052 * LE bit set. The function will call the completion handler of the command iocb
3053 * if the response iocb indicates a completion for a command iocb or it is
3054 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3055 * function if this is an unsolicited iocb.
dea3101e 3056 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3057 * to check it explicitly.
3058 */
3059int
2e0fef85
JS
3060lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3061 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3062{
34b02dcd 3063 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3064 IOCB_t *irsp = NULL;
87f6eaff 3065 IOCB_t *entry = NULL;
dea3101e 3066 struct lpfc_iocbq *cmdiocbq = NULL;
3067 struct lpfc_iocbq rspiocbq;
dea3101e 3068 uint32_t status;
3069 uint32_t portRspPut, portRspMax;
3070 int rc = 1;
3071 lpfc_iocb_type type;
3072 unsigned long iflag;
3073 uint32_t rsp_cmpl = 0;
dea3101e 3074
2e0fef85 3075 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3076 pring->stats.iocb_event++;
3077
dea3101e 3078 /*
3079 * The next available response entry should never exceed the maximum
3080 * entries. If it does, treat it as an adapter hardware error.
3081 */
7e56aa25 3082 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3083 portRspPut = le32_to_cpu(pgp->rspPutInx);
3084 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3085 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3086 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3087 return 1;
3088 }
45ed1190
JS
3089 if (phba->fcp_ring_in_use) {
3090 spin_unlock_irqrestore(&phba->hbalock, iflag);
3091 return 1;
3092 } else
3093 phba->fcp_ring_in_use = 1;
dea3101e 3094
3095 rmb();
7e56aa25 3096 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3097 /*
3098 * Fetch an entry off the ring and copy it into a local data
3099 * structure. The copy involves a byte-swap since the
3100 * network byte order and pci byte orders are different.
3101 */
ed957684 3102 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3103 phba->last_completion_time = jiffies;
875fbdfe 3104
7e56aa25
JS
3105 if (++pring->sli.sli3.rspidx >= portRspMax)
3106 pring->sli.sli3.rspidx = 0;
875fbdfe 3107
87f6eaff
JSEC
3108 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3109 (uint32_t *) &rspiocbq.iocb,
ed957684 3110 phba->iocb_rsp_size);
a4bc3379 3111 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3112 irsp = &rspiocbq.iocb;
3113
dea3101e 3114 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3115 pring->stats.iocb_rsp++;
3116 rsp_cmpl++;
3117
3118 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3119 /*
3120 * If resource errors reported from HBA, reduce
3121 * queuedepths of the SCSI device.
3122 */
3123 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3124 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3125 IOERR_NO_RESOURCES)) {
92d7f7b0 3126 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3127 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3128 spin_lock_irqsave(&phba->hbalock, iflag);
3129 }
3130
dea3101e 3131 /* Rsp ring <ringno> error: IOCB */
3132 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3133 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3134 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3135 pring->ringno,
92d7f7b0
JS
3136 irsp->un.ulpWord[0],
3137 irsp->un.ulpWord[1],
3138 irsp->un.ulpWord[2],
3139 irsp->un.ulpWord[3],
3140 irsp->un.ulpWord[4],
3141 irsp->un.ulpWord[5],
d7c255b2
JS
3142 *(uint32_t *)&irsp->un1,
3143 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3144 }
3145
3146 switch (type) {
3147 case LPFC_ABORT_IOCB:
3148 case LPFC_SOL_IOCB:
3149 /*
3150 * Idle exchange closed via ABTS from port. No iocb
3151 * resources need to be recovered.
3152 */
3153 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3155 "0333 IOCB cmd 0x%x"
dca9479b 3156 " processed. Skipping"
92d7f7b0 3157 " completion\n",
dca9479b 3158 irsp->ulpCommand);
dea3101e 3159 break;
3160 }
3161
604a3e30
JB
3162 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3163 &rspiocbq);
0f65ff68
JS
3164 if (unlikely(!cmdiocbq))
3165 break;
3166 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3167 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3168 if (cmdiocbq->iocb_cmpl) {
3169 spin_unlock_irqrestore(&phba->hbalock, iflag);
3170 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3171 &rspiocbq);
3172 spin_lock_irqsave(&phba->hbalock, iflag);
3173 }
dea3101e 3174 break;
a4bc3379 3175 case LPFC_UNSOL_IOCB:
2e0fef85 3176 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3177 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3178 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3179 break;
dea3101e 3180 default:
3181 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3182 char adaptermsg[LPFC_MAX_ADPTMSG];
3183 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3184 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3185 MAX_MSG_DATA);
898eb71c
JP
3186 dev_warn(&((phba->pcidev)->dev),
3187 "lpfc%d: %s\n",
dea3101e 3188 phba->brd_no, adaptermsg);
3189 } else {
3190 /* Unknown IOCB command */
3191 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3192 "0334 Unknown IOCB command "
92d7f7b0 3193 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3194 type, irsp->ulpCommand,
92d7f7b0
JS
3195 irsp->ulpStatus,
3196 irsp->ulpIoTag,
3197 irsp->ulpContext);
dea3101e 3198 }
3199 break;
3200 }
3201
3202 /*
3203 * The response IOCB has been processed. Update the ring
3204 * pointer in SLIM. If the port response put pointer has not
3205 * been updated, sync the pgp->rspPutInx and fetch the new port
3206 * response put pointer.
3207 */
7e56aa25
JS
3208 writel(pring->sli.sli3.rspidx,
3209 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3210
7e56aa25 3211 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3212 portRspPut = le32_to_cpu(pgp->rspPutInx);
3213 }
3214
3215 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3216 pring->stats.iocb_rsp_full++;
3217 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3218 writel(status, phba->CAregaddr);
3219 readl(phba->CAregaddr);
3220 }
3221 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3222 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3223 pring->stats.iocb_cmd_empty++;
3224
3225 /* Force update of the local copy of cmdGetInx */
7e56aa25 3226 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3227 lpfc_sli_resume_iocb(phba, pring);
3228
3229 if ((pring->lpfc_sli_cmd_available))
3230 (pring->lpfc_sli_cmd_available) (phba, pring);
3231
3232 }
3233
45ed1190 3234 phba->fcp_ring_in_use = 0;
2e0fef85 3235 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3236 return rc;
3237}
3238
e59058c4 3239/**
3772a991
JS
3240 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3241 * @phba: Pointer to HBA context object.
3242 * @pring: Pointer to driver SLI ring object.
3243 * @rspiocbp: Pointer to driver response IOCB object.
3244 *
3245 * This function is called from the worker thread when there is a slow-path
3246 * response IOCB to process. This function chains all the response iocbs until
3247 * seeing the iocb with the LE bit set. The function will call
3248 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3249 * completion of a command iocb. The function will call the
3250 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3251 * The function frees the resources or calls the completion handler if this
3252 * iocb is an abort completion. The function returns NULL when the response
3253 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3254 * this function shall chain the iocb on to the iocb_continueq and return the
3255 * response iocb passed in.
3256 **/
3257static struct lpfc_iocbq *
3258lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3259 struct lpfc_iocbq *rspiocbp)
3260{
3261 struct lpfc_iocbq *saveq;
3262 struct lpfc_iocbq *cmdiocbp;
3263 struct lpfc_iocbq *next_iocb;
3264 IOCB_t *irsp = NULL;
3265 uint32_t free_saveq;
3266 uint8_t iocb_cmd_type;
3267 lpfc_iocb_type type;
3268 unsigned long iflag;
3269 int rc;
3270
3271 spin_lock_irqsave(&phba->hbalock, iflag);
3272 /* First add the response iocb to the countinueq list */
3273 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3274 pring->iocb_continueq_cnt++;
3275
70f23fd6 3276 /* Now, determine whether the list is completed for processing */
3772a991
JS
3277 irsp = &rspiocbp->iocb;
3278 if (irsp->ulpLe) {
3279 /*
3280 * By default, the driver expects to free all resources
3281 * associated with this iocb completion.
3282 */
3283 free_saveq = 1;
3284 saveq = list_get_first(&pring->iocb_continueq,
3285 struct lpfc_iocbq, list);
3286 irsp = &(saveq->iocb);
3287 list_del_init(&pring->iocb_continueq);
3288 pring->iocb_continueq_cnt = 0;
3289
3290 pring->stats.iocb_rsp++;
3291
3292 /*
3293 * If resource errors reported from HBA, reduce
3294 * queuedepths of the SCSI device.
3295 */
3296 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3297 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3298 IOERR_NO_RESOURCES)) {
3772a991
JS
3299 spin_unlock_irqrestore(&phba->hbalock, iflag);
3300 phba->lpfc_rampdown_queue_depth(phba);
3301 spin_lock_irqsave(&phba->hbalock, iflag);
3302 }
3303
3304 if (irsp->ulpStatus) {
3305 /* Rsp ring <ringno> error: IOCB */
3306 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3307 "0328 Rsp Ring %d error: "
3308 "IOCB Data: "
3309 "x%x x%x x%x x%x "
3310 "x%x x%x x%x x%x "
3311 "x%x x%x x%x x%x "
3312 "x%x x%x x%x x%x\n",
3313 pring->ringno,
3314 irsp->un.ulpWord[0],
3315 irsp->un.ulpWord[1],
3316 irsp->un.ulpWord[2],
3317 irsp->un.ulpWord[3],
3318 irsp->un.ulpWord[4],
3319 irsp->un.ulpWord[5],
3320 *(((uint32_t *) irsp) + 6),
3321 *(((uint32_t *) irsp) + 7),
3322 *(((uint32_t *) irsp) + 8),
3323 *(((uint32_t *) irsp) + 9),
3324 *(((uint32_t *) irsp) + 10),
3325 *(((uint32_t *) irsp) + 11),
3326 *(((uint32_t *) irsp) + 12),
3327 *(((uint32_t *) irsp) + 13),
3328 *(((uint32_t *) irsp) + 14),
3329 *(((uint32_t *) irsp) + 15));
3330 }
3331
3332 /*
3333 * Fetch the IOCB command type and call the correct completion
3334 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3335 * get freed back to the lpfc_iocb_list by the discovery
3336 * kernel thread.
3337 */
3338 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3339 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3340 switch (type) {
3341 case LPFC_SOL_IOCB:
3342 spin_unlock_irqrestore(&phba->hbalock, iflag);
3343 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3344 spin_lock_irqsave(&phba->hbalock, iflag);
3345 break;
3346
3347 case LPFC_UNSOL_IOCB:
3348 spin_unlock_irqrestore(&phba->hbalock, iflag);
3349 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3350 spin_lock_irqsave(&phba->hbalock, iflag);
3351 if (!rc)
3352 free_saveq = 0;
3353 break;
3354
3355 case LPFC_ABORT_IOCB:
3356 cmdiocbp = NULL;
3357 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3358 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3359 saveq);
3360 if (cmdiocbp) {
3361 /* Call the specified completion routine */
3362 if (cmdiocbp->iocb_cmpl) {
3363 spin_unlock_irqrestore(&phba->hbalock,
3364 iflag);
3365 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3366 saveq);
3367 spin_lock_irqsave(&phba->hbalock,
3368 iflag);
3369 } else
3370 __lpfc_sli_release_iocbq(phba,
3371 cmdiocbp);
3372 }
3373 break;
3374
3375 case LPFC_UNKNOWN_IOCB:
3376 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3377 char adaptermsg[LPFC_MAX_ADPTMSG];
3378 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3379 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3380 MAX_MSG_DATA);
3381 dev_warn(&((phba->pcidev)->dev),
3382 "lpfc%d: %s\n",
3383 phba->brd_no, adaptermsg);
3384 } else {
3385 /* Unknown IOCB command */
3386 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3387 "0335 Unknown IOCB "
3388 "command Data: x%x "
3389 "x%x x%x x%x\n",
3390 irsp->ulpCommand,
3391 irsp->ulpStatus,
3392 irsp->ulpIoTag,
3393 irsp->ulpContext);
3394 }
3395 break;
3396 }
3397
3398 if (free_saveq) {
3399 list_for_each_entry_safe(rspiocbp, next_iocb,
3400 &saveq->list, list) {
61f35bff 3401 list_del_init(&rspiocbp->list);
3772a991
JS
3402 __lpfc_sli_release_iocbq(phba, rspiocbp);
3403 }
3404 __lpfc_sli_release_iocbq(phba, saveq);
3405 }
3406 rspiocbp = NULL;
3407 }
3408 spin_unlock_irqrestore(&phba->hbalock, iflag);
3409 return rspiocbp;
3410}
3411
3412/**
3413 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3414 * @phba: Pointer to HBA context object.
3415 * @pring: Pointer to driver SLI ring object.
3416 * @mask: Host attention register mask for this ring.
3417 *
3772a991
JS
3418 * This routine wraps the actual slow_ring event process routine from the
3419 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3420 **/
3772a991 3421void
2e0fef85
JS
3422lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3423 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3424{
3425 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3426}
3427
3428/**
3429 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3430 * @phba: Pointer to HBA context object.
3431 * @pring: Pointer to driver SLI ring object.
3432 * @mask: Host attention register mask for this ring.
3433 *
3434 * This function is called from the worker thread when there is a ring event
3435 * for non-fcp rings. The caller does not hold any lock. The function will
3436 * remove each response iocb in the response ring and calls the handle
3437 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3438 **/
3439static void
3440lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3441 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3442{
34b02dcd 3443 struct lpfc_pgp *pgp;
dea3101e 3444 IOCB_t *entry;
3445 IOCB_t *irsp = NULL;
3446 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3447 uint32_t portRspPut, portRspMax;
dea3101e 3448 unsigned long iflag;
3772a991 3449 uint32_t status;
dea3101e 3450
34b02dcd 3451 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3452 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3453 pring->stats.iocb_event++;
3454
dea3101e 3455 /*
3456 * The next available response entry should never exceed the maximum
3457 * entries. If it does, treat it as an adapter hardware error.
3458 */
7e56aa25 3459 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3460 portRspPut = le32_to_cpu(pgp->rspPutInx);
3461 if (portRspPut >= portRspMax) {
3462 /*
025dfdaf 3463 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3464 * rsp ring <portRspMax>
3465 */
ed957684 3466 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3467 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3468 "is bigger than rsp ring %d\n",
e8b62011 3469 pring->ringno, portRspPut, portRspMax);
dea3101e 3470
2e0fef85
JS
3471 phba->link_state = LPFC_HBA_ERROR;
3472 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3473
3474 phba->work_hs = HS_FFER3;
3475 lpfc_handle_eratt(phba);
3476
3772a991 3477 return;
dea3101e 3478 }
3479
3480 rmb();
7e56aa25 3481 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3482 /*
3483 * Build a completion list and call the appropriate handler.
3484 * The process is to get the next available response iocb, get
3485 * a free iocb from the list, copy the response data into the
3486 * free iocb, insert to the continuation list, and update the
3487 * next response index to slim. This process makes response
3488 * iocb's in the ring available to DMA as fast as possible but
3489 * pays a penalty for a copy operation. Since the iocb is
3490 * only 32 bytes, this penalty is considered small relative to
3491 * the PCI reads for register values and a slim write. When
3492 * the ulpLe field is set, the entire Command has been
3493 * received.
3494 */
ed957684
JS
3495 entry = lpfc_resp_iocb(phba, pring);
3496
858c9f6c 3497 phba->last_completion_time = jiffies;
2e0fef85 3498 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3499 if (rspiocbp == NULL) {
3500 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3501 "completion.\n", __func__);
dea3101e 3502 break;
3503 }
3504
ed957684
JS
3505 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3506 phba->iocb_rsp_size);
dea3101e 3507 irsp = &rspiocbp->iocb;
3508
7e56aa25
JS
3509 if (++pring->sli.sli3.rspidx >= portRspMax)
3510 pring->sli.sli3.rspidx = 0;
dea3101e 3511
a58cbd52
JS
3512 if (pring->ringno == LPFC_ELS_RING) {
3513 lpfc_debugfs_slow_ring_trc(phba,
3514 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3515 *(((uint32_t *) irsp) + 4),
3516 *(((uint32_t *) irsp) + 6),
3517 *(((uint32_t *) irsp) + 7));
3518 }
3519
7e56aa25
JS
3520 writel(pring->sli.sli3.rspidx,
3521 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3522
3772a991
JS
3523 spin_unlock_irqrestore(&phba->hbalock, iflag);
3524 /* Handle the response IOCB */
3525 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3526 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3527
3528 /*
3529 * If the port response put pointer has not been updated, sync
3530 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3531 * response put pointer.
3532 */
7e56aa25 3533 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3534 portRspPut = le32_to_cpu(pgp->rspPutInx);
3535 }
7e56aa25 3536 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3537
92d7f7b0 3538 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3539 /* At least one response entry has been freed */
3540 pring->stats.iocb_rsp_full++;
3541 /* SET RxRE_RSP in Chip Att register */
3542 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3543 writel(status, phba->CAregaddr);
3544 readl(phba->CAregaddr); /* flush */
3545 }
3546 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3547 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3548 pring->stats.iocb_cmd_empty++;
3549
3550 /* Force update of the local copy of cmdGetInx */
7e56aa25 3551 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3552 lpfc_sli_resume_iocb(phba, pring);
3553
3554 if ((pring->lpfc_sli_cmd_available))
3555 (pring->lpfc_sli_cmd_available) (phba, pring);
3556
3557 }
3558
2e0fef85 3559 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3560 return;
dea3101e 3561}
3562
4f774513
JS
3563/**
3564 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3565 * @phba: Pointer to HBA context object.
3566 * @pring: Pointer to driver SLI ring object.
3567 * @mask: Host attention register mask for this ring.
3568 *
3569 * This function is called from the worker thread when there is a pending
3570 * ELS response iocb on the driver internal slow-path response iocb worker
3571 * queue. The caller does not hold any lock. The function will remove each
3572 * response iocb from the response worker queue and calls the handle
3573 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3574 **/
3575static void
3576lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3577 struct lpfc_sli_ring *pring, uint32_t mask)
3578{
3579 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3580 struct hbq_dmabuf *dmabuf;
3581 struct lpfc_cq_event *cq_event;
4f774513
JS
3582 unsigned long iflag;
3583
45ed1190
JS
3584 spin_lock_irqsave(&phba->hbalock, iflag);
3585 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3586 spin_unlock_irqrestore(&phba->hbalock, iflag);
3587 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3588 /* Get the response iocb from the head of work queue */
3589 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3590 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3591 cq_event, struct lpfc_cq_event, list);
4f774513 3592 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3593
3594 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3595 case CQE_CODE_COMPL_WQE:
3596 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3597 cq_event);
45ed1190
JS
3598 /* Translate ELS WCQE to response IOCBQ */
3599 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3600 irspiocbq);
3601 if (irspiocbq)
3602 lpfc_sli_sp_handle_rspiocb(phba, pring,
3603 irspiocbq);
4d9ab994
JS
3604 break;
3605 case CQE_CODE_RECEIVE:
7851fe2c 3606 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3607 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3608 cq_event);
3609 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3610 break;
3611 default:
3612 break;
3613 }
4f774513
JS
3614 }
3615}
3616
e59058c4 3617/**
3621a710 3618 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3619 * @phba: Pointer to HBA context object.
3620 * @pring: Pointer to driver SLI ring object.
3621 *
3622 * This function aborts all iocbs in the given ring and frees all the iocb
3623 * objects in txq. This function issues an abort iocb for all the iocb commands
3624 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3625 * the return of this function. The caller is not required to hold any locks.
3626 **/
2e0fef85 3627void
dea3101e 3628lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3629{
2534ba75 3630 LIST_HEAD(completions);
dea3101e 3631 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3632
92d7f7b0
JS
3633 if (pring->ringno == LPFC_ELS_RING) {
3634 lpfc_fabric_abort_hba(phba);
3635 }
3636
dea3101e 3637 /* Error everything on txq and txcmplq
3638 * First do the txq.
3639 */
db55fba8
JS
3640 if (phba->sli_rev >= LPFC_SLI_REV4) {
3641 spin_lock_irq(&pring->ring_lock);
3642 list_splice_init(&pring->txq, &completions);
3643 pring->txq_cnt = 0;
3644 spin_unlock_irq(&pring->ring_lock);
dea3101e 3645
db55fba8
JS
3646 spin_lock_irq(&phba->hbalock);
3647 /* Next issue ABTS for everything on the txcmplq */
3648 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3649 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3650 spin_unlock_irq(&phba->hbalock);
3651 } else {
3652 spin_lock_irq(&phba->hbalock);
3653 list_splice_init(&pring->txq, &completions);
3654 pring->txq_cnt = 0;
dea3101e 3655
db55fba8
JS
3656 /* Next issue ABTS for everything on the txcmplq */
3657 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3658 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3659 spin_unlock_irq(&phba->hbalock);
3660 }
dea3101e 3661
a257bf90
JS
3662 /* Cancel all the IOCBs from the completions list */
3663 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3664 IOERR_SLI_ABORTED);
dea3101e 3665}
3666
895427bd
JS
3667/**
3668 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3669 * @phba: Pointer to HBA context object.
3670 * @pring: Pointer to driver SLI ring object.
3671 *
3672 * This function aborts all iocbs in the given ring and frees all the iocb
3673 * objects in txq. This function issues an abort iocb for all the iocb commands
3674 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3675 * the return of this function. The caller is not required to hold any locks.
3676 **/
3677void
3678lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3679{
3680 LIST_HEAD(completions);
3681 struct lpfc_iocbq *iocb, *next_iocb;
3682
3683 if (pring->ringno == LPFC_ELS_RING)
3684 lpfc_fabric_abort_hba(phba);
3685
3686 spin_lock_irq(&phba->hbalock);
3687 /* Next issue ABTS for everything on the txcmplq */
3688 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3689 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3690 spin_unlock_irq(&phba->hbalock);
3691}
3692
3693
db55fba8
JS
3694/**
3695 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3696 * @phba: Pointer to HBA context object.
3697 * @pring: Pointer to driver SLI ring object.
3698 *
3699 * This function aborts all iocbs in FCP rings and frees all the iocb
3700 * objects in txq. This function issues an abort iocb for all the iocb commands
3701 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3702 * the return of this function. The caller is not required to hold any locks.
3703 **/
3704void
3705lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3706{
3707 struct lpfc_sli *psli = &phba->sli;
3708 struct lpfc_sli_ring *pring;
3709 uint32_t i;
3710
3711 /* Look on all the FCP Rings for the iotag */
3712 if (phba->sli_rev >= LPFC_SLI_REV4) {
3713 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
895427bd 3714 pring = phba->sli4_hba.fcp_wq[i]->pring;
db55fba8
JS
3715 lpfc_sli_abort_iocb_ring(phba, pring);
3716 }
3717 } else {
895427bd 3718 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
3719 lpfc_sli_abort_iocb_ring(phba, pring);
3720 }
3721}
3722
895427bd
JS
3723/**
3724 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3725 * @phba: Pointer to HBA context object.
3726 *
3727 * This function aborts all wqes in NVME rings. This function issues an
3728 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3729 * the txcmplq is not guaranteed to complete before the return of this
3730 * function. The caller is not required to hold any locks.
3731 **/
3732void
3733lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3734{
3735 struct lpfc_sli_ring *pring;
3736 uint32_t i;
3737
3738 if (phba->sli_rev < LPFC_SLI_REV4)
3739 return;
3740
3741 /* Abort all IO on each NVME ring. */
3742 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3743 pring = phba->sli4_hba.nvme_wq[i]->pring;
3744 lpfc_sli_abort_wqe_ring(phba, pring);
3745 }
3746}
3747
db55fba8 3748
a8e497d5 3749/**
3621a710 3750 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3751 * @phba: Pointer to HBA context object.
3752 *
3753 * This function flushes all iocbs in the fcp ring and frees all the iocb
3754 * objects in txq and txcmplq. This function will not issue abort iocbs
3755 * for all the iocb commands in txcmplq, they will just be returned with
3756 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3757 * slot has been permanently disabled.
3758 **/
3759void
3760lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3761{
3762 LIST_HEAD(txq);
3763 LIST_HEAD(txcmplq);
a8e497d5
JS
3764 struct lpfc_sli *psli = &phba->sli;
3765 struct lpfc_sli_ring *pring;
db55fba8 3766 uint32_t i;
a8e497d5
JS
3767
3768 spin_lock_irq(&phba->hbalock);
4f2e66c6
JS
3769 /* Indicate the I/O queues are flushed */
3770 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
3771 spin_unlock_irq(&phba->hbalock);
3772
db55fba8
JS
3773 /* Look on all the FCP Rings for the iotag */
3774 if (phba->sli_rev >= LPFC_SLI_REV4) {
3775 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
895427bd 3776 pring = phba->sli4_hba.fcp_wq[i]->pring;
db55fba8
JS
3777
3778 spin_lock_irq(&pring->ring_lock);
3779 /* Retrieve everything on txq */
3780 list_splice_init(&pring->txq, &txq);
3781 /* Retrieve everything on the txcmplq */
3782 list_splice_init(&pring->txcmplq, &txcmplq);
3783 pring->txq_cnt = 0;
3784 pring->txcmplq_cnt = 0;
3785 spin_unlock_irq(&pring->ring_lock);
3786
3787 /* Flush the txq */
3788 lpfc_sli_cancel_iocbs(phba, &txq,
3789 IOSTAT_LOCAL_REJECT,
3790 IOERR_SLI_DOWN);
3791 /* Flush the txcmpq */
3792 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3793 IOSTAT_LOCAL_REJECT,
3794 IOERR_SLI_DOWN);
3795 }
3796 } else {
895427bd 3797 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 3798
db55fba8
JS
3799 spin_lock_irq(&phba->hbalock);
3800 /* Retrieve everything on txq */
3801 list_splice_init(&pring->txq, &txq);
3802 /* Retrieve everything on the txcmplq */
3803 list_splice_init(&pring->txcmplq, &txcmplq);
3804 pring->txq_cnt = 0;
3805 pring->txcmplq_cnt = 0;
3806 spin_unlock_irq(&phba->hbalock);
3807
3808 /* Flush the txq */
3809 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3810 IOERR_SLI_DOWN);
3811 /* Flush the txcmpq */
3812 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3813 IOERR_SLI_DOWN);
3814 }
a8e497d5
JS
3815}
3816
895427bd
JS
3817/**
3818 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3819 * @phba: Pointer to HBA context object.
3820 *
3821 * This function flushes all wqes in the nvme rings and frees all resources
3822 * in the txcmplq. This function does not issue abort wqes for the IO
3823 * commands in txcmplq, they will just be returned with
3824 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3825 * slot has been permanently disabled.
3826 **/
3827void
3828lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3829{
3830 LIST_HEAD(txcmplq);
3831 struct lpfc_sli_ring *pring;
3832 uint32_t i;
3833
3834 if (phba->sli_rev < LPFC_SLI_REV4)
3835 return;
3836
3837 /* Hint to other driver operations that a flush is in progress. */
3838 spin_lock_irq(&phba->hbalock);
3839 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3840 spin_unlock_irq(&phba->hbalock);
3841
3842 /* Cycle through all NVME rings and complete each IO with
3843 * a local driver reason code. This is a flush so no
3844 * abort exchange to FW.
3845 */
3846 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3847 pring = phba->sli4_hba.nvme_wq[i]->pring;
3848
3849 /* Retrieve everything on the txcmplq */
3850 spin_lock_irq(&pring->ring_lock);
3851 list_splice_init(&pring->txcmplq, &txcmplq);
3852 pring->txcmplq_cnt = 0;
3853 spin_unlock_irq(&pring->ring_lock);
3854
3855 /* Flush the txcmpq &&&PAE */
3856 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3857 IOSTAT_LOCAL_REJECT,
3858 IOERR_SLI_DOWN);
3859 }
3860}
3861
e59058c4 3862/**
3772a991 3863 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
3864 * @phba: Pointer to HBA context object.
3865 * @mask: Bit mask to be checked.
3866 *
3867 * This function reads the host status register and compares
3868 * with the provided bit mask to check if HBA completed
3869 * the restart. This function will wait in a loop for the
3870 * HBA to complete restart. If the HBA does not restart within
3871 * 15 iterations, the function will reset the HBA again. The
3872 * function returns 1 when HBA fail to restart otherwise returns
3873 * zero.
3874 **/
3772a991
JS
3875static int
3876lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3877{
41415862
JW
3878 uint32_t status;
3879 int i = 0;
3880 int retval = 0;
dea3101e 3881
41415862 3882 /* Read the HBA Host Status Register */
9940b97b
JS
3883 if (lpfc_readl(phba->HSregaddr, &status))
3884 return 1;
dea3101e 3885
41415862
JW
3886 /*
3887 * Check status register every 100ms for 5 retries, then every
3888 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3889 * every 2.5 sec for 4.
3890 * Break our of the loop if errors occurred during init.
3891 */
3892 while (((status & mask) != mask) &&
3893 !(status & HS_FFERM) &&
3894 i++ < 20) {
dea3101e 3895
41415862
JW
3896 if (i <= 5)
3897 msleep(10);
3898 else if (i <= 10)
3899 msleep(500);
3900 else
3901 msleep(2500);
dea3101e 3902
41415862 3903 if (i == 15) {
2e0fef85 3904 /* Do post */
92d7f7b0 3905 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3906 lpfc_sli_brdrestart(phba);
3907 }
3908 /* Read the HBA Host Status Register */
9940b97b
JS
3909 if (lpfc_readl(phba->HSregaddr, &status)) {
3910 retval = 1;
3911 break;
3912 }
41415862 3913 }
dea3101e 3914
41415862
JW
3915 /* Check to see if any errors occurred during init */
3916 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
3917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3918 "2751 Adapter failed to restart, "
3919 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3920 status,
3921 readl(phba->MBslimaddr + 0xa8),
3922 readl(phba->MBslimaddr + 0xac));
2e0fef85 3923 phba->link_state = LPFC_HBA_ERROR;
41415862 3924 retval = 1;
dea3101e 3925 }
dea3101e 3926
41415862
JW
3927 return retval;
3928}
dea3101e 3929
da0436e9
JS
3930/**
3931 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3932 * @phba: Pointer to HBA context object.
3933 * @mask: Bit mask to be checked.
3934 *
3935 * This function checks the host status register to check if HBA is
3936 * ready. This function will wait in a loop for the HBA to be ready
3937 * If the HBA is not ready , the function will will reset the HBA PCI
3938 * function again. The function returns 1 when HBA fail to be ready
3939 * otherwise returns zero.
3940 **/
3941static int
3942lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3943{
3944 uint32_t status;
3945 int retval = 0;
3946
3947 /* Read the HBA Host Status Register */
3948 status = lpfc_sli4_post_status_check(phba);
3949
3950 if (status) {
3951 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3952 lpfc_sli_brdrestart(phba);
3953 status = lpfc_sli4_post_status_check(phba);
3954 }
3955
3956 /* Check to see if any errors occurred during init */
3957 if (status) {
3958 phba->link_state = LPFC_HBA_ERROR;
3959 retval = 1;
3960 } else
3961 phba->sli4_hba.intr_enable = 0;
3962
3963 return retval;
3964}
3965
3966/**
3967 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3968 * @phba: Pointer to HBA context object.
3969 * @mask: Bit mask to be checked.
3970 *
3971 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3972 * from the API jump table function pointer from the lpfc_hba struct.
3973 **/
3974int
3975lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3976{
3977 return phba->lpfc_sli_brdready(phba, mask);
3978}
3979
9290831f
JS
3980#define BARRIER_TEST_PATTERN (0xdeadbeef)
3981
e59058c4 3982/**
3621a710 3983 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3984 * @phba: Pointer to HBA context object.
3985 *
1b51197d
JS
3986 * This function is called before resetting an HBA. This function is called
3987 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 3988 **/
2e0fef85 3989void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3990{
65a29c16
JS
3991 uint32_t __iomem *resp_buf;
3992 uint32_t __iomem *mbox_buf;
9290831f 3993 volatile uint32_t mbox;
9940b97b 3994 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
3995 int i;
3996 uint8_t hdrtype;
3997
1c2ba475
JT
3998 lockdep_assert_held(&phba->hbalock);
3999
9290831f
JS
4000 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4001 if (hdrtype != 0x80 ||
4002 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4003 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4004 return;
4005
4006 /*
4007 * Tell the other part of the chip to suspend temporarily all
4008 * its DMA activity.
4009 */
65a29c16 4010 resp_buf = phba->MBslimaddr;
9290831f
JS
4011
4012 /* Disable the error attention */
9940b97b
JS
4013 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4014 return;
9290831f
JS
4015 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4016 readl(phba->HCregaddr); /* flush */
2e0fef85 4017 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4018
9940b97b
JS
4019 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4020 return;
4021 if (ha_copy & HA_ERATT) {
9290831f
JS
4022 /* Clear Chip error bit */
4023 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4024 phba->pport->stopped = 1;
9290831f
JS
4025 }
4026
4027 mbox = 0;
4028 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4029 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4030
4031 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4032 mbox_buf = phba->MBslimaddr;
9290831f
JS
4033 writel(mbox, mbox_buf);
4034
9940b97b
JS
4035 for (i = 0; i < 50; i++) {
4036 if (lpfc_readl((resp_buf + 1), &resp_data))
4037 return;
4038 if (resp_data != ~(BARRIER_TEST_PATTERN))
4039 mdelay(1);
4040 else
4041 break;
4042 }
4043 resp_data = 0;
4044 if (lpfc_readl((resp_buf + 1), &resp_data))
4045 return;
4046 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4047 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4048 phba->pport->stopped)
9290831f
JS
4049 goto restore_hc;
4050 else
4051 goto clear_errat;
4052 }
4053
4054 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4055 resp_data = 0;
4056 for (i = 0; i < 500; i++) {
4057 if (lpfc_readl(resp_buf, &resp_data))
4058 return;
4059 if (resp_data != mbox)
4060 mdelay(1);
4061 else
4062 break;
4063 }
9290831f
JS
4064
4065clear_errat:
4066
9940b97b
JS
4067 while (++i < 500) {
4068 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4069 return;
4070 if (!(ha_copy & HA_ERATT))
4071 mdelay(1);
4072 else
4073 break;
4074 }
9290831f
JS
4075
4076 if (readl(phba->HAregaddr) & HA_ERATT) {
4077 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4078 phba->pport->stopped = 1;
9290831f
JS
4079 }
4080
4081restore_hc:
2e0fef85 4082 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4083 writel(hc_copy, phba->HCregaddr);
4084 readl(phba->HCregaddr); /* flush */
4085}
4086
e59058c4 4087/**
3621a710 4088 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4089 * @phba: Pointer to HBA context object.
4090 *
4091 * This function issues a kill_board mailbox command and waits for
4092 * the error attention interrupt. This function is called for stopping
4093 * the firmware processing. The caller is not required to hold any
4094 * locks. This function calls lpfc_hba_down_post function to free
4095 * any pending commands after the kill. The function will return 1 when it
4096 * fails to kill the board else will return 0.
4097 **/
41415862 4098int
2e0fef85 4099lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4100{
4101 struct lpfc_sli *psli;
4102 LPFC_MBOXQ_t *pmb;
4103 uint32_t status;
4104 uint32_t ha_copy;
4105 int retval;
4106 int i = 0;
dea3101e 4107
41415862 4108 psli = &phba->sli;
dea3101e 4109
41415862 4110 /* Kill HBA */
ed957684 4111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4112 "0329 Kill HBA Data: x%x x%x\n",
4113 phba->pport->port_state, psli->sli_flag);
41415862 4114
98c9ea5c
JS
4115 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4116 if (!pmb)
41415862 4117 return 1;
41415862
JW
4118
4119 /* Disable the error attention */
2e0fef85 4120 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4121 if (lpfc_readl(phba->HCregaddr, &status)) {
4122 spin_unlock_irq(&phba->hbalock);
4123 mempool_free(pmb, phba->mbox_mem_pool);
4124 return 1;
4125 }
41415862
JW
4126 status &= ~HC_ERINT_ENA;
4127 writel(status, phba->HCregaddr);
4128 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4129 phba->link_flag |= LS_IGNORE_ERATT;
4130 spin_unlock_irq(&phba->hbalock);
41415862
JW
4131
4132 lpfc_kill_board(phba, pmb);
4133 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4134 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4135
4136 if (retval != MBX_SUCCESS) {
4137 if (retval != MBX_BUSY)
4138 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4140 "2752 KILL_BOARD command failed retval %d\n",
4141 retval);
2e0fef85
JS
4142 spin_lock_irq(&phba->hbalock);
4143 phba->link_flag &= ~LS_IGNORE_ERATT;
4144 spin_unlock_irq(&phba->hbalock);
41415862
JW
4145 return 1;
4146 }
4147
f4b4c68f
JS
4148 spin_lock_irq(&phba->hbalock);
4149 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4150 spin_unlock_irq(&phba->hbalock);
9290831f 4151
41415862
JW
4152 mempool_free(pmb, phba->mbox_mem_pool);
4153
4154 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4155 * attention every 100ms for 3 seconds. If we don't get ERATT after
4156 * 3 seconds we still set HBA_ERROR state because the status of the
4157 * board is now undefined.
4158 */
9940b97b
JS
4159 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4160 return 1;
41415862
JW
4161 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4162 mdelay(100);
9940b97b
JS
4163 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4164 return 1;
41415862
JW
4165 }
4166
4167 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4168 if (ha_copy & HA_ERATT) {
4169 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4170 phba->pport->stopped = 1;
9290831f 4171 }
2e0fef85 4172 spin_lock_irq(&phba->hbalock);
41415862 4173 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4174 psli->mbox_active = NULL;
2e0fef85
JS
4175 phba->link_flag &= ~LS_IGNORE_ERATT;
4176 spin_unlock_irq(&phba->hbalock);
41415862 4177
41415862 4178 lpfc_hba_down_post(phba);
2e0fef85 4179 phba->link_state = LPFC_HBA_ERROR;
41415862 4180
2e0fef85 4181 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 4182}
4183
e59058c4 4184/**
3772a991 4185 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4186 * @phba: Pointer to HBA context object.
4187 *
4188 * This function resets the HBA by writing HC_INITFF to the control
4189 * register. After the HBA resets, this function resets all the iocb ring
4190 * indices. This function disables PCI layer parity checking during
4191 * the reset.
4192 * This function returns 0 always.
4193 * The caller is not required to hold any locks.
4194 **/
41415862 4195int
2e0fef85 4196lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4197{
41415862 4198 struct lpfc_sli *psli;
dea3101e 4199 struct lpfc_sli_ring *pring;
41415862 4200 uint16_t cfg_value;
dea3101e 4201 int i;
dea3101e 4202
41415862 4203 psli = &phba->sli;
dea3101e 4204
41415862
JW
4205 /* Reset HBA */
4206 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4207 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4208 (phba->pport) ? phba->pport->port_state : 0,
4209 psli->sli_flag);
dea3101e 4210
4211 /* perform board reset */
4212 phba->fc_eventTag = 0;
4d9ab994 4213 phba->link_events = 0;
4492b739
JS
4214 if (phba->pport) {
4215 phba->pport->fc_myDID = 0;
4216 phba->pport->fc_prevDID = 0;
4217 }
dea3101e 4218
41415862
JW
4219 /* Turn off parity checking and serr during the physical reset */
4220 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4221 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4222 (cfg_value &
4223 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4224
3772a991
JS
4225 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4226
41415862
JW
4227 /* Now toggle INITFF bit in the Host Control Register */
4228 writel(HC_INITFF, phba->HCregaddr);
4229 mdelay(1);
4230 readl(phba->HCregaddr); /* flush */
4231 writel(0, phba->HCregaddr);
4232 readl(phba->HCregaddr); /* flush */
4233
4234 /* Restore PCI cmd register */
4235 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 4236
4237 /* Initialize relevant SLI info */
41415862 4238 for (i = 0; i < psli->num_rings; i++) {
895427bd 4239 pring = &psli->sli3_ring[i];
dea3101e 4240 pring->flag = 0;
7e56aa25
JS
4241 pring->sli.sli3.rspidx = 0;
4242 pring->sli.sli3.next_cmdidx = 0;
4243 pring->sli.sli3.local_getidx = 0;
4244 pring->sli.sli3.cmdidx = 0;
dea3101e 4245 pring->missbufcnt = 0;
4246 }
dea3101e 4247
2e0fef85 4248 phba->link_state = LPFC_WARM_START;
41415862
JW
4249 return 0;
4250}
4251
e59058c4 4252/**
da0436e9
JS
4253 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4254 * @phba: Pointer to HBA context object.
4255 *
4256 * This function resets a SLI4 HBA. This function disables PCI layer parity
4257 * checking during resets the device. The caller is not required to hold
4258 * any locks.
4259 *
4260 * This function returns 0 always.
4261 **/
4262int
4263lpfc_sli4_brdreset(struct lpfc_hba *phba)
4264{
4265 struct lpfc_sli *psli = &phba->sli;
4266 uint16_t cfg_value;
0293635e 4267 int rc = 0;
da0436e9
JS
4268
4269 /* Reset HBA */
4270 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4271 "0295 Reset HBA Data: x%x x%x x%x\n",
4272 phba->pport->port_state, psli->sli_flag,
4273 phba->hba_flag);
da0436e9
JS
4274
4275 /* perform board reset */
4276 phba->fc_eventTag = 0;
4d9ab994 4277 phba->link_events = 0;
da0436e9
JS
4278 phba->pport->fc_myDID = 0;
4279 phba->pport->fc_prevDID = 0;
4280
da0436e9
JS
4281 spin_lock_irq(&phba->hbalock);
4282 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4283 phba->fcf.fcf_flag = 0;
da0436e9
JS
4284 spin_unlock_irq(&phba->hbalock);
4285
0293635e
JS
4286 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4287 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4288 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4289 return rc;
4290 }
4291
da0436e9
JS
4292 /* Now physically reset the device */
4293 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4294 "0389 Performing PCI function reset!\n");
be858b65
JS
4295
4296 /* Turn off parity checking and serr during the physical reset */
4297 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4298 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4299 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4300
88318816 4301 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4302 rc = lpfc_pci_function_reset(phba);
88318816 4303 lpfc_sli4_queue_destroy(phba);
da0436e9 4304
be858b65
JS
4305 /* Restore PCI cmd register */
4306 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4307
27b01b82 4308 return rc;
da0436e9
JS
4309}
4310
4311/**
4312 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4313 * @phba: Pointer to HBA context object.
4314 *
4315 * This function is called in the SLI initialization code path to
4316 * restart the HBA. The caller is not required to hold any lock.
4317 * This function writes MBX_RESTART mailbox command to the SLIM and
4318 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4319 * function to free any pending commands. The function enables
4320 * POST only during the first initialization. The function returns zero.
4321 * The function does not guarantee completion of MBX_RESTART mailbox
4322 * command before the return of this function.
4323 **/
da0436e9
JS
4324static int
4325lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4326{
4327 MAILBOX_t *mb;
4328 struct lpfc_sli *psli;
41415862
JW
4329 volatile uint32_t word0;
4330 void __iomem *to_slim;
0d878419 4331 uint32_t hba_aer_enabled;
41415862 4332
2e0fef85 4333 spin_lock_irq(&phba->hbalock);
41415862 4334
0d878419
JS
4335 /* Take PCIe device Advanced Error Reporting (AER) state */
4336 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4337
41415862
JW
4338 psli = &phba->sli;
4339
4340 /* Restart HBA */
4341 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4342 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4343 (phba->pport) ? phba->pport->port_state : 0,
4344 psli->sli_flag);
41415862
JW
4345
4346 word0 = 0;
4347 mb = (MAILBOX_t *) &word0;
4348 mb->mbxCommand = MBX_RESTART;
4349 mb->mbxHc = 1;
4350
9290831f
JS
4351 lpfc_reset_barrier(phba);
4352
41415862
JW
4353 to_slim = phba->MBslimaddr;
4354 writel(*(uint32_t *) mb, to_slim);
4355 readl(to_slim); /* flush */
4356
4357 /* Only skip post after fc_ffinit is completed */
4492b739 4358 if (phba->pport && phba->pport->port_state)
41415862 4359 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4360 else
41415862 4361 word0 = 0; /* This is really setting up word1 */
65a29c16 4362 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4363 writel(*(uint32_t *) mb, to_slim);
4364 readl(to_slim); /* flush */
dea3101e 4365
41415862 4366 lpfc_sli_brdreset(phba);
4492b739
JS
4367 if (phba->pport)
4368 phba->pport->stopped = 0;
2e0fef85 4369 phba->link_state = LPFC_INIT_START;
da0436e9 4370 phba->hba_flag = 0;
2e0fef85 4371 spin_unlock_irq(&phba->hbalock);
41415862 4372
64ba8818
JS
4373 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4374 psli->stats_start = get_seconds();
4375
eaf15d5b
JS
4376 /* Give the INITFF and Post time to settle. */
4377 mdelay(100);
41415862 4378
0d878419
JS
4379 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4380 if (hba_aer_enabled)
4381 pci_disable_pcie_error_reporting(phba->pcidev);
4382
41415862 4383 lpfc_hba_down_post(phba);
dea3101e 4384
4385 return 0;
4386}
4387
da0436e9
JS
4388/**
4389 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4390 * @phba: Pointer to HBA context object.
4391 *
4392 * This function is called in the SLI initialization code path to restart
4393 * a SLI4 HBA. The caller is not required to hold any lock.
4394 * At the end of the function, it calls lpfc_hba_down_post function to
4395 * free any pending commands.
4396 **/
4397static int
4398lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4399{
4400 struct lpfc_sli *psli = &phba->sli;
75baf696 4401 uint32_t hba_aer_enabled;
27b01b82 4402 int rc;
da0436e9
JS
4403
4404 /* Restart HBA */
4405 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4406 "0296 Restart HBA Data: x%x x%x\n",
4407 phba->pport->port_state, psli->sli_flag);
4408
75baf696
JS
4409 /* Take PCIe device Advanced Error Reporting (AER) state */
4410 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4411
27b01b82 4412 rc = lpfc_sli4_brdreset(phba);
da0436e9
JS
4413
4414 spin_lock_irq(&phba->hbalock);
4415 phba->pport->stopped = 0;
4416 phba->link_state = LPFC_INIT_START;
4417 phba->hba_flag = 0;
4418 spin_unlock_irq(&phba->hbalock);
4419
4420 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4421 psli->stats_start = get_seconds();
4422
75baf696
JS
4423 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4424 if (hba_aer_enabled)
4425 pci_disable_pcie_error_reporting(phba->pcidev);
4426
da0436e9
JS
4427 lpfc_hba_down_post(phba);
4428
27b01b82 4429 return rc;
da0436e9
JS
4430}
4431
4432/**
4433 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4434 * @phba: Pointer to HBA context object.
4435 *
4436 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4437 * API jump table function pointer from the lpfc_hba struct.
4438**/
4439int
4440lpfc_sli_brdrestart(struct lpfc_hba *phba)
4441{
4442 return phba->lpfc_sli_brdrestart(phba);
4443}
4444
e59058c4 4445/**
3621a710 4446 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4447 * @phba: Pointer to HBA context object.
4448 *
4449 * This function is called after a HBA restart to wait for successful
4450 * restart of the HBA. Successful restart of the HBA is indicated by
4451 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4452 * iteration, the function will restart the HBA again. The function returns
4453 * zero if HBA successfully restarted else returns negative error code.
4454 **/
4492b739 4455int
dea3101e 4456lpfc_sli_chipset_init(struct lpfc_hba *phba)
4457{
4458 uint32_t status, i = 0;
4459
4460 /* Read the HBA Host Status Register */
9940b97b
JS
4461 if (lpfc_readl(phba->HSregaddr, &status))
4462 return -EIO;
dea3101e 4463
4464 /* Check status register to see what current state is */
4465 i = 0;
4466 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4467
dcf2a4e0
JS
4468 /* Check every 10ms for 10 retries, then every 100ms for 90
4469 * retries, then every 1 sec for 50 retires for a total of
4470 * ~60 seconds before reset the board again and check every
4471 * 1 sec for 50 retries. The up to 60 seconds before the
4472 * board ready is required by the Falcon FIPS zeroization
4473 * complete, and any reset the board in between shall cause
4474 * restart of zeroization, further delay the board ready.
dea3101e 4475 */
dcf2a4e0 4476 if (i++ >= 200) {
dea3101e 4477 /* Adapter failed to init, timeout, status reg
4478 <status> */
ed957684 4479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4480 "0436 Adapter failed to init, "
09372820
JS
4481 "timeout, status reg x%x, "
4482 "FW Data: A8 x%x AC x%x\n", status,
4483 readl(phba->MBslimaddr + 0xa8),
4484 readl(phba->MBslimaddr + 0xac));
2e0fef85 4485 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4486 return -ETIMEDOUT;
4487 }
4488
4489 /* Check to see if any errors occurred during init */
4490 if (status & HS_FFERM) {
4491 /* ERROR: During chipset initialization */
4492 /* Adapter failed to init, chipset, status reg
4493 <status> */
ed957684 4494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4495 "0437 Adapter failed to init, "
09372820
JS
4496 "chipset, status reg x%x, "
4497 "FW Data: A8 x%x AC x%x\n", status,
4498 readl(phba->MBslimaddr + 0xa8),
4499 readl(phba->MBslimaddr + 0xac));
2e0fef85 4500 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4501 return -EIO;
4502 }
4503
dcf2a4e0 4504 if (i <= 10)
dea3101e 4505 msleep(10);
dcf2a4e0
JS
4506 else if (i <= 100)
4507 msleep(100);
4508 else
4509 msleep(1000);
dea3101e 4510
dcf2a4e0
JS
4511 if (i == 150) {
4512 /* Do post */
92d7f7b0 4513 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4514 lpfc_sli_brdrestart(phba);
dea3101e 4515 }
4516 /* Read the HBA Host Status Register */
9940b97b
JS
4517 if (lpfc_readl(phba->HSregaddr, &status))
4518 return -EIO;
dea3101e 4519 }
4520
4521 /* Check to see if any errors occurred during init */
4522 if (status & HS_FFERM) {
4523 /* ERROR: During chipset initialization */
4524 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4526 "0438 Adapter failed to init, chipset, "
09372820
JS
4527 "status reg x%x, "
4528 "FW Data: A8 x%x AC x%x\n", status,
4529 readl(phba->MBslimaddr + 0xa8),
4530 readl(phba->MBslimaddr + 0xac));
2e0fef85 4531 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4532 return -EIO;
4533 }
4534
4535 /* Clear all interrupt enable conditions */
4536 writel(0, phba->HCregaddr);
4537 readl(phba->HCregaddr); /* flush */
4538
4539 /* setup host attn register */
4540 writel(0xffffffff, phba->HAregaddr);
4541 readl(phba->HAregaddr); /* flush */
4542 return 0;
4543}
4544
e59058c4 4545/**
3621a710 4546 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4547 *
4548 * This function calculates and returns the number of HBQs required to be
4549 * configured.
4550 **/
78b2d852 4551int
ed957684
JS
4552lpfc_sli_hbq_count(void)
4553{
92d7f7b0 4554 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4555}
4556
e59058c4 4557/**
3621a710 4558 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4559 *
4560 * This function adds the number of hbq entries in every HBQ to get
4561 * the total number of hbq entries required for the HBA and returns
4562 * the total count.
4563 **/
ed957684
JS
4564static int
4565lpfc_sli_hbq_entry_count(void)
4566{
4567 int hbq_count = lpfc_sli_hbq_count();
4568 int count = 0;
4569 int i;
4570
4571 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4572 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4573 return count;
4574}
4575
e59058c4 4576/**
3621a710 4577 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4578 *
4579 * This function calculates amount of memory required for all hbq entries
4580 * to be configured and returns the total memory required.
4581 **/
dea3101e 4582int
ed957684
JS
4583lpfc_sli_hbq_size(void)
4584{
4585 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4586}
4587
e59058c4 4588/**
3621a710 4589 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4590 * @phba: Pointer to HBA context object.
4591 *
4592 * This function is called during the SLI initialization to configure
4593 * all the HBQs and post buffers to the HBQ. The caller is not
4594 * required to hold any locks. This function will return zero if successful
4595 * else it will return negative error code.
4596 **/
ed957684
JS
4597static int
4598lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4599{
4600 int hbq_count = lpfc_sli_hbq_count();
4601 LPFC_MBOXQ_t *pmb;
4602 MAILBOX_t *pmbox;
4603 uint32_t hbqno;
4604 uint32_t hbq_entry_index;
ed957684 4605
92d7f7b0
JS
4606 /* Get a Mailbox buffer to setup mailbox
4607 * commands for HBA initialization
4608 */
ed957684
JS
4609 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4610
4611 if (!pmb)
4612 return -ENOMEM;
4613
04c68496 4614 pmbox = &pmb->u.mb;
ed957684
JS
4615
4616 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4617 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4618 phba->hbq_in_use = 1;
ed957684
JS
4619
4620 hbq_entry_index = 0;
4621 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4622 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4623 phba->hbqs[hbqno].hbqPutIdx = 0;
4624 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4625 phba->hbqs[hbqno].entry_count =
92d7f7b0 4626 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4627 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4628 hbq_entry_index, pmb);
ed957684
JS
4629 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4630
4631 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4632 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4633 mbxStatus <status>, ring <num> */
4634
4635 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4636 LOG_SLI | LOG_VPORT,
e8b62011 4637 "1805 Adapter failed to init. "
ed957684 4638 "Data: x%x x%x x%x\n",
e8b62011 4639 pmbox->mbxCommand,
ed957684
JS
4640 pmbox->mbxStatus, hbqno);
4641
4642 phba->link_state = LPFC_HBA_ERROR;
4643 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4644 return -ENXIO;
ed957684
JS
4645 }
4646 }
4647 phba->hbq_count = hbq_count;
4648
ed957684
JS
4649 mempool_free(pmb, phba->mbox_mem_pool);
4650
92d7f7b0 4651 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4652 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4653 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4654 return 0;
4655}
4656
4f774513
JS
4657/**
4658 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4659 * @phba: Pointer to HBA context object.
4660 *
4661 * This function is called during the SLI initialization to configure
4662 * all the HBQs and post buffers to the HBQ. The caller is not
4663 * required to hold any locks. This function will return zero if successful
4664 * else it will return negative error code.
4665 **/
4666static int
4667lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4668{
4669 phba->hbq_in_use = 1;
895427bd
JS
4670 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4671 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 4672 phba->hbq_count = 1;
895427bd 4673 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 4674 /* Initially populate or replenish the HBQs */
4f774513
JS
4675 return 0;
4676}
4677
e59058c4 4678/**
3621a710 4679 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4680 * @phba: Pointer to HBA context object.
4681 * @sli_mode: sli mode - 2/3
4682 *
183b8021 4683 * This function is called by the sli initialization code path
e59058c4
JS
4684 * to issue config_port mailbox command. This function restarts the
4685 * HBA firmware and issues a config_port mailbox command to configure
4686 * the SLI interface in the sli mode specified by sli_mode
4687 * variable. The caller is not required to hold any locks.
4688 * The function returns 0 if successful, else returns negative error
4689 * code.
4690 **/
9399627f
JS
4691int
4692lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4693{
4694 LPFC_MBOXQ_t *pmb;
4695 uint32_t resetcount = 0, rc = 0, done = 0;
4696
4697 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4698 if (!pmb) {
2e0fef85 4699 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4700 return -ENOMEM;
4701 }
4702
ed957684 4703 phba->sli_rev = sli_mode;
dea3101e 4704 while (resetcount < 2 && !done) {
2e0fef85 4705 spin_lock_irq(&phba->hbalock);
1c067a42 4706 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4707 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4708 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4709 lpfc_sli_brdrestart(phba);
dea3101e 4710 rc = lpfc_sli_chipset_init(phba);
4711 if (rc)
4712 break;
4713
2e0fef85 4714 spin_lock_irq(&phba->hbalock);
1c067a42 4715 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4716 spin_unlock_irq(&phba->hbalock);
dea3101e 4717 resetcount++;
4718
ed957684
JS
4719 /* Call pre CONFIG_PORT mailbox command initialization. A
4720 * value of 0 means the call was successful. Any other
4721 * nonzero value is a failure, but if ERESTART is returned,
4722 * the driver may reset the HBA and try again.
4723 */
dea3101e 4724 rc = lpfc_config_port_prep(phba);
4725 if (rc == -ERESTART) {
ed957684 4726 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4727 continue;
34b02dcd 4728 } else if (rc)
dea3101e 4729 break;
6d368e53 4730
2e0fef85 4731 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 4732 lpfc_config_port(phba, pmb);
4733 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4734 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4735 LPFC_SLI3_HBQ_ENABLED |
4736 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
4737 LPFC_SLI3_BG_ENABLED |
4738 LPFC_SLI3_DSS_ENABLED);
ed957684 4739 if (rc != MBX_SUCCESS) {
dea3101e 4740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4741 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 4742 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 4743 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 4744 spin_lock_irq(&phba->hbalock);
04c68496 4745 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
4746 spin_unlock_irq(&phba->hbalock);
4747 rc = -ENXIO;
04c68496
JS
4748 } else {
4749 /* Allow asynchronous mailbox command to go through */
4750 spin_lock_irq(&phba->hbalock);
4751 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4752 spin_unlock_irq(&phba->hbalock);
ed957684 4753 done = 1;
cb69f7de
JS
4754
4755 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4756 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4757 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4758 "3110 Port did not grant ASABT\n");
04c68496 4759 }
dea3101e 4760 }
ed957684
JS
4761 if (!done) {
4762 rc = -EINVAL;
4763 goto do_prep_failed;
4764 }
04c68496
JS
4765 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4766 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
4767 rc = -ENXIO;
4768 goto do_prep_failed;
4769 }
04c68496 4770 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 4771 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
4772 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4773 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4774 phba->max_vpi : phba->max_vports;
4775
34b02dcd
JS
4776 } else
4777 phba->max_vpi = 0;
bc73905a
JS
4778 phba->fips_level = 0;
4779 phba->fips_spec_rev = 0;
4780 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 4781 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
4782 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4783 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4784 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4785 "2850 Security Crypto Active. FIPS x%d "
4786 "(Spec Rev: x%d)",
4787 phba->fips_level, phba->fips_spec_rev);
4788 }
4789 if (pmb->u.mb.un.varCfgPort.sec_err) {
4790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4791 "2856 Config Port Security Crypto "
4792 "Error: x%x ",
4793 pmb->u.mb.un.varCfgPort.sec_err);
4794 }
04c68496 4795 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 4796 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 4797 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 4798 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
4799
4800 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4801 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6
JS
4802
4803 if (phba->cfg_enable_bg) {
04c68496 4804 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
4805 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4806 else
4807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4808 "0443 Adapter did not grant "
4809 "BlockGuard\n");
4810 }
34b02dcd 4811 } else {
8f34f4ce 4812 phba->hbq_get = NULL;
34b02dcd 4813 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 4814 phba->max_vpi = 0;
ed957684 4815 }
92d7f7b0 4816do_prep_failed:
ed957684
JS
4817 mempool_free(pmb, phba->mbox_mem_pool);
4818 return rc;
4819}
4820
e59058c4
JS
4821
4822/**
183b8021 4823 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
4824 * @phba: Pointer to HBA context object.
4825 *
183b8021
MY
4826 * This function is the main SLI initialization function. This function
4827 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
4828 * error attention handler code. Caller is not required to hold any
4829 * locks. This function issues config_port mailbox command to configure
4830 * the SLI, setup iocb rings and HBQ rings. In the end the function
4831 * calls the config_port_post function to issue init_link mailbox
4832 * command and to start the discovery. The function will return zero
4833 * if successful, else it will return negative error code.
4834 **/
ed957684
JS
4835int
4836lpfc_sli_hba_setup(struct lpfc_hba *phba)
4837{
4838 uint32_t rc;
6d368e53
JS
4839 int mode = 3, i;
4840 int longs;
ed957684 4841
12247e81 4842 switch (phba->cfg_sli_mode) {
ed957684 4843 case 2:
78b2d852 4844 if (phba->cfg_enable_npiv) {
92d7f7b0 4845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 4846 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 4847 "parameter (%d) to auto (0).\n",
12247e81 4848 phba->cfg_sli_mode);
92d7f7b0
JS
4849 break;
4850 }
ed957684
JS
4851 mode = 2;
4852 break;
4853 case 0:
4854 case 3:
4855 break;
4856 default:
92d7f7b0 4857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
4858 "1819 Unrecognized sli_mode parameter: %d.\n",
4859 phba->cfg_sli_mode);
ed957684
JS
4860
4861 break;
4862 }
b5c53958 4863 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 4864
9399627f
JS
4865 rc = lpfc_sli_config_port(phba, mode);
4866
12247e81 4867 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 4868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4869 "1820 Unable to select SLI-3. "
4870 "Not supported by adapter.\n");
ed957684 4871 if (rc && mode != 2)
9399627f 4872 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
4873 else if (rc && mode == 2)
4874 rc = lpfc_sli_config_port(phba, 3);
ed957684 4875 if (rc)
dea3101e 4876 goto lpfc_sli_hba_setup_error;
4877
0d878419
JS
4878 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4879 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4880 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4881 if (!rc) {
4882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4883 "2709 This device supports "
4884 "Advanced Error Reporting (AER)\n");
4885 spin_lock_irq(&phba->hbalock);
4886 phba->hba_flag |= HBA_AER_ENABLED;
4887 spin_unlock_irq(&phba->hbalock);
4888 } else {
4889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4890 "2708 This device does not support "
b069d7eb
JS
4891 "Advanced Error Reporting (AER): %d\n",
4892 rc);
0d878419
JS
4893 phba->cfg_aer_support = 0;
4894 }
4895 }
4896
ed957684
JS
4897 if (phba->sli_rev == 3) {
4898 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4899 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
4900 } else {
4901 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4902 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 4903 phba->sli3_options = 0;
ed957684
JS
4904 }
4905
4906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4907 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4908 phba->sli_rev, phba->max_vpi);
ed957684 4909 rc = lpfc_sli_ring_map(phba);
dea3101e 4910
4911 if (rc)
4912 goto lpfc_sli_hba_setup_error;
4913
6d368e53
JS
4914 /* Initialize VPIs. */
4915 if (phba->sli_rev == LPFC_SLI_REV3) {
4916 /*
4917 * The VPI bitmask and physical ID array are allocated
4918 * and initialized once only - at driver load. A port
4919 * reset doesn't need to reinitialize this memory.
4920 */
4921 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4922 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4923 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4924 GFP_KERNEL);
4925 if (!phba->vpi_bmask) {
4926 rc = -ENOMEM;
4927 goto lpfc_sli_hba_setup_error;
4928 }
4929
4930 phba->vpi_ids = kzalloc(
4931 (phba->max_vpi+1) * sizeof(uint16_t),
4932 GFP_KERNEL);
4933 if (!phba->vpi_ids) {
4934 kfree(phba->vpi_bmask);
4935 rc = -ENOMEM;
4936 goto lpfc_sli_hba_setup_error;
4937 }
4938 for (i = 0; i < phba->max_vpi; i++)
4939 phba->vpi_ids[i] = i;
4940 }
4941 }
4942
9399627f 4943 /* Init HBQs */
ed957684
JS
4944 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4945 rc = lpfc_sli_hbq_setup(phba);
4946 if (rc)
4947 goto lpfc_sli_hba_setup_error;
4948 }
04c68496 4949 spin_lock_irq(&phba->hbalock);
dea3101e 4950 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 4951 spin_unlock_irq(&phba->hbalock);
dea3101e 4952
4953 rc = lpfc_config_port_post(phba);
4954 if (rc)
4955 goto lpfc_sli_hba_setup_error;
4956
ed957684
JS
4957 return rc;
4958
92d7f7b0 4959lpfc_sli_hba_setup_error:
2e0fef85 4960 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 4961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4962 "0445 Firmware initialization failed\n");
dea3101e 4963 return rc;
4964}
4965
e59058c4 4966/**
da0436e9
JS
4967 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4968 * @phba: Pointer to HBA context object.
4969 * @mboxq: mailbox pointer.
4970 * This function issue a dump mailbox command to read config region
4971 * 23 and parse the records in the region and populate driver
4972 * data structure.
e59058c4 4973 **/
da0436e9 4974static int
ff78d8f9 4975lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 4976{
ff78d8f9 4977 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
4978 struct lpfc_dmabuf *mp;
4979 struct lpfc_mqe *mqe;
4980 uint32_t data_length;
4981 int rc;
dea3101e 4982
da0436e9
JS
4983 /* Program the default value of vlan_id and fc_map */
4984 phba->valid_vlan = 0;
4985 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4986 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4987 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4988
ff78d8f9
JS
4989 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4990 if (!mboxq)
da0436e9
JS
4991 return -ENOMEM;
4992
ff78d8f9
JS
4993 mqe = &mboxq->u.mqe;
4994 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4995 rc = -ENOMEM;
4996 goto out_free_mboxq;
4997 }
4998
da0436e9
JS
4999 mp = (struct lpfc_dmabuf *) mboxq->context1;
5000 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5001
5002 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5003 "(%d):2571 Mailbox cmd x%x Status x%x "
5004 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5005 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5006 "CQ: x%x x%x x%x x%x\n",
5007 mboxq->vport ? mboxq->vport->vpi : 0,
5008 bf_get(lpfc_mqe_command, mqe),
5009 bf_get(lpfc_mqe_status, mqe),
5010 mqe->un.mb_words[0], mqe->un.mb_words[1],
5011 mqe->un.mb_words[2], mqe->un.mb_words[3],
5012 mqe->un.mb_words[4], mqe->un.mb_words[5],
5013 mqe->un.mb_words[6], mqe->un.mb_words[7],
5014 mqe->un.mb_words[8], mqe->un.mb_words[9],
5015 mqe->un.mb_words[10], mqe->un.mb_words[11],
5016 mqe->un.mb_words[12], mqe->un.mb_words[13],
5017 mqe->un.mb_words[14], mqe->un.mb_words[15],
5018 mqe->un.mb_words[16], mqe->un.mb_words[50],
5019 mboxq->mcqe.word0,
5020 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5021 mboxq->mcqe.trailer);
5022
5023 if (rc) {
5024 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5025 kfree(mp);
ff78d8f9
JS
5026 rc = -EIO;
5027 goto out_free_mboxq;
da0436e9
JS
5028 }
5029 data_length = mqe->un.mb_words[5];
a0c87cbd 5030 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5031 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5032 kfree(mp);
ff78d8f9
JS
5033 rc = -EIO;
5034 goto out_free_mboxq;
d11e31dd 5035 }
dea3101e 5036
da0436e9
JS
5037 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5038 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5039 kfree(mp);
ff78d8f9
JS
5040 rc = 0;
5041
5042out_free_mboxq:
5043 mempool_free(mboxq, phba->mbox_mem_pool);
5044 return rc;
da0436e9 5045}
e59058c4
JS
5046
5047/**
da0436e9
JS
5048 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5049 * @phba: pointer to lpfc hba data structure.
5050 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5051 * @vpd: pointer to the memory to hold resulting port vpd data.
5052 * @vpd_size: On input, the number of bytes allocated to @vpd.
5053 * On output, the number of data bytes in @vpd.
e59058c4 5054 *
da0436e9
JS
5055 * This routine executes a READ_REV SLI4 mailbox command. In
5056 * addition, this routine gets the port vpd data.
5057 *
5058 * Return codes
af901ca1 5059 * 0 - successful
d439d286 5060 * -ENOMEM - could not allocated memory.
e59058c4 5061 **/
da0436e9
JS
5062static int
5063lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5064 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5065{
da0436e9
JS
5066 int rc = 0;
5067 uint32_t dma_size;
5068 struct lpfc_dmabuf *dmabuf;
5069 struct lpfc_mqe *mqe;
dea3101e 5070
da0436e9
JS
5071 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5072 if (!dmabuf)
5073 return -ENOMEM;
5074
5075 /*
5076 * Get a DMA buffer for the vpd data resulting from the READ_REV
5077 * mailbox command.
a257bf90 5078 */
da0436e9 5079 dma_size = *vpd_size;
1aee383d
JP
5080 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5081 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5082 if (!dmabuf->virt) {
5083 kfree(dmabuf);
5084 return -ENOMEM;
a257bf90
JS
5085 }
5086
da0436e9
JS
5087 /*
5088 * The SLI4 implementation of READ_REV conflicts at word1,
5089 * bits 31:16 and SLI4 adds vpd functionality not present
5090 * in SLI3. This code corrects the conflicts.
1dcb58e5 5091 */
da0436e9
JS
5092 lpfc_read_rev(phba, mboxq);
5093 mqe = &mboxq->u.mqe;
5094 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5095 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5096 mqe->un.read_rev.word1 &= 0x0000FFFF;
5097 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5098 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5099
5100 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5101 if (rc) {
5102 dma_free_coherent(&phba->pcidev->dev, dma_size,
5103 dmabuf->virt, dmabuf->phys);
def9c7a9 5104 kfree(dmabuf);
da0436e9
JS
5105 return -EIO;
5106 }
1dcb58e5 5107
da0436e9
JS
5108 /*
5109 * The available vpd length cannot be bigger than the
5110 * DMA buffer passed to the port. Catch the less than
5111 * case and update the caller's size.
5112 */
5113 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5114 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5115
d7c47992
JS
5116 memcpy(vpd, dmabuf->virt, *vpd_size);
5117
da0436e9
JS
5118 dma_free_coherent(&phba->pcidev->dev, dma_size,
5119 dmabuf->virt, dmabuf->phys);
5120 kfree(dmabuf);
5121 return 0;
dea3101e 5122}
5123
cd1c8301
JS
5124/**
5125 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5126 * @phba: pointer to lpfc hba data structure.
5127 *
5128 * This routine retrieves SLI4 device physical port name this PCI function
5129 * is attached to.
5130 *
5131 * Return codes
4907cb7b 5132 * 0 - successful
cd1c8301
JS
5133 * otherwise - failed to retrieve physical port name
5134 **/
5135static int
5136lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5137{
5138 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5139 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5140 struct lpfc_controller_attribute *cntl_attr;
5141 struct lpfc_mbx_get_port_name *get_port_name;
5142 void *virtaddr = NULL;
5143 uint32_t alloclen, reqlen;
5144 uint32_t shdr_status, shdr_add_status;
5145 union lpfc_sli4_cfg_shdr *shdr;
5146 char cport_name = 0;
5147 int rc;
5148
5149 /* We assume nothing at this point */
5150 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5151 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5152
5153 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5154 if (!mboxq)
5155 return -ENOMEM;
cd1c8301 5156 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
5157 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5158 lpfc_sli4_read_config(phba);
5159 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5160 goto retrieve_ppname;
cd1c8301
JS
5161
5162 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5163 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5164 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5165 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5166 LPFC_SLI4_MBX_NEMBED);
5167 if (alloclen < reqlen) {
5168 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5169 "3084 Allocated DMA memory size (%d) is "
5170 "less than the requested DMA memory size "
5171 "(%d)\n", alloclen, reqlen);
5172 rc = -ENOMEM;
5173 goto out_free_mboxq;
5174 }
5175 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5176 virtaddr = mboxq->sge_array->addr[0];
5177 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5178 shdr = &mbx_cntl_attr->cfg_shdr;
5179 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5180 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5181 if (shdr_status || shdr_add_status || rc) {
5182 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5183 "3085 Mailbox x%x (x%x/x%x) failed, "
5184 "rc:x%x, status:x%x, add_status:x%x\n",
5185 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5186 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5187 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5188 rc, shdr_status, shdr_add_status);
5189 rc = -ENXIO;
5190 goto out_free_mboxq;
5191 }
5192 cntl_attr = &mbx_cntl_attr->cntl_attr;
5193 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5194 phba->sli4_hba.lnk_info.lnk_tp =
5195 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5196 phba->sli4_hba.lnk_info.lnk_no =
5197 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5198 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5199 "3086 lnk_type:%d, lnk_numb:%d\n",
5200 phba->sli4_hba.lnk_info.lnk_tp,
5201 phba->sli4_hba.lnk_info.lnk_no);
5202
5203retrieve_ppname:
5204 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5205 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5206 sizeof(struct lpfc_mbx_get_port_name) -
5207 sizeof(struct lpfc_sli4_cfg_mhdr),
5208 LPFC_SLI4_MBX_EMBED);
5209 get_port_name = &mboxq->u.mqe.un.get_port_name;
5210 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5211 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5212 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5213 phba->sli4_hba.lnk_info.lnk_tp);
5214 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5215 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5217 if (shdr_status || shdr_add_status || rc) {
5218 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5219 "3087 Mailbox x%x (x%x/x%x) failed: "
5220 "rc:x%x, status:x%x, add_status:x%x\n",
5221 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5222 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5223 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5224 rc, shdr_status, shdr_add_status);
5225 rc = -ENXIO;
5226 goto out_free_mboxq;
5227 }
5228 switch (phba->sli4_hba.lnk_info.lnk_no) {
5229 case LPFC_LINK_NUMBER_0:
5230 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5231 &get_port_name->u.response);
5232 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5233 break;
5234 case LPFC_LINK_NUMBER_1:
5235 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5236 &get_port_name->u.response);
5237 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5238 break;
5239 case LPFC_LINK_NUMBER_2:
5240 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5241 &get_port_name->u.response);
5242 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5243 break;
5244 case LPFC_LINK_NUMBER_3:
5245 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5246 &get_port_name->u.response);
5247 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5248 break;
5249 default:
5250 break;
5251 }
5252
5253 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5254 phba->Port[0] = cport_name;
5255 phba->Port[1] = '\0';
5256 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5257 "3091 SLI get port name: %s\n", phba->Port);
5258 }
5259
5260out_free_mboxq:
5261 if (rc != MBX_TIMEOUT) {
5262 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5263 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5264 else
5265 mempool_free(mboxq, phba->mbox_mem_pool);
5266 }
5267 return rc;
5268}
5269
e59058c4 5270/**
da0436e9
JS
5271 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5272 * @phba: pointer to lpfc hba data structure.
e59058c4 5273 *
da0436e9
JS
5274 * This routine is called to explicitly arm the SLI4 device's completion and
5275 * event queues
5276 **/
5277static void
5278lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5279{
895427bd 5280 int qidx;
da0436e9
JS
5281
5282 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5283 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
895427bd
JS
5284 if (phba->sli4_hba.nvmels_cq)
5285 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5286 LPFC_QUEUE_REARM);
5287
5288 if (phba->sli4_hba.fcp_cq)
5289 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5290 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5291 LPFC_QUEUE_REARM);
5292
5293 if (phba->sli4_hba.nvme_cq)
5294 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5295 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5296 LPFC_QUEUE_REARM);
1ba981fd 5297
f38fa0bb 5298 if (phba->cfg_fof)
1ba981fd
JS
5299 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5300
895427bd
JS
5301 if (phba->sli4_hba.hba_eq)
5302 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5303 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5304 LPFC_QUEUE_REARM);
1ba981fd 5305
2d7dbc4c
JS
5306 if (phba->nvmet_support) {
5307 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5308 lpfc_sli4_cq_release(
5309 phba->sli4_hba.nvmet_cqset[qidx],
5310 LPFC_QUEUE_REARM);
5311 }
2e90f4b5 5312 }
1ba981fd
JS
5313
5314 if (phba->cfg_fof)
5315 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
da0436e9
JS
5316}
5317
6d368e53
JS
5318/**
5319 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5320 * @phba: Pointer to HBA context object.
5321 * @type: The resource extent type.
b76f2dc9
JS
5322 * @extnt_count: buffer to hold port available extent count.
5323 * @extnt_size: buffer to hold element count per extent.
6d368e53 5324 *
b76f2dc9
JS
5325 * This function calls the port and retrievs the number of available
5326 * extents and their size for a particular extent type.
5327 *
5328 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5329 **/
b76f2dc9 5330int
6d368e53
JS
5331lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5332 uint16_t *extnt_count, uint16_t *extnt_size)
5333{
5334 int rc = 0;
5335 uint32_t length;
5336 uint32_t mbox_tmo;
5337 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5338 LPFC_MBOXQ_t *mbox;
5339
5340 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5341 if (!mbox)
5342 return -ENOMEM;
5343
5344 /* Find out how many extents are available for this resource type */
5345 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5346 sizeof(struct lpfc_sli4_cfg_mhdr));
5347 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5348 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5349 length, LPFC_SLI4_MBX_EMBED);
5350
5351 /* Send an extents count of 0 - the GET doesn't use it. */
5352 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5353 LPFC_SLI4_MBX_EMBED);
5354 if (unlikely(rc)) {
5355 rc = -EIO;
5356 goto err_exit;
5357 }
5358
5359 if (!phba->sli4_hba.intr_enable)
5360 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5361 else {
a183a15f 5362 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5363 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5364 }
5365 if (unlikely(rc)) {
5366 rc = -EIO;
5367 goto err_exit;
5368 }
5369
5370 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5371 if (bf_get(lpfc_mbox_hdr_status,
5372 &rsrc_info->header.cfg_shdr.response)) {
5373 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5374 "2930 Failed to get resource extents "
5375 "Status 0x%x Add'l Status 0x%x\n",
5376 bf_get(lpfc_mbox_hdr_status,
5377 &rsrc_info->header.cfg_shdr.response),
5378 bf_get(lpfc_mbox_hdr_add_status,
5379 &rsrc_info->header.cfg_shdr.response));
5380 rc = -EIO;
5381 goto err_exit;
5382 }
5383
5384 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5385 &rsrc_info->u.rsp);
5386 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5387 &rsrc_info->u.rsp);
8a9d2e80
JS
5388
5389 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5390 "3162 Retrieved extents type-%d from port: count:%d, "
5391 "size:%d\n", type, *extnt_count, *extnt_size);
5392
5393err_exit:
6d368e53
JS
5394 mempool_free(mbox, phba->mbox_mem_pool);
5395 return rc;
5396}
5397
5398/**
5399 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5400 * @phba: Pointer to HBA context object.
5401 * @type: The extent type to check.
5402 *
5403 * This function reads the current available extents from the port and checks
5404 * if the extent count or extent size has changed since the last access.
5405 * Callers use this routine post port reset to understand if there is a
5406 * extent reprovisioning requirement.
5407 *
5408 * Returns:
5409 * -Error: error indicates problem.
5410 * 1: Extent count or size has changed.
5411 * 0: No changes.
5412 **/
5413static int
5414lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5415{
5416 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5417 uint16_t size_diff, rsrc_ext_size;
5418 int rc = 0;
5419 struct lpfc_rsrc_blks *rsrc_entry;
5420 struct list_head *rsrc_blk_list = NULL;
5421
5422 size_diff = 0;
5423 curr_ext_cnt = 0;
5424 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5425 &rsrc_ext_cnt,
5426 &rsrc_ext_size);
5427 if (unlikely(rc))
5428 return -EIO;
5429
5430 switch (type) {
5431 case LPFC_RSC_TYPE_FCOE_RPI:
5432 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5433 break;
5434 case LPFC_RSC_TYPE_FCOE_VPI:
5435 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5436 break;
5437 case LPFC_RSC_TYPE_FCOE_XRI:
5438 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5439 break;
5440 case LPFC_RSC_TYPE_FCOE_VFI:
5441 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5442 break;
5443 default:
5444 break;
5445 }
5446
5447 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5448 curr_ext_cnt++;
5449 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5450 size_diff++;
5451 }
5452
5453 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5454 rc = 1;
5455
5456 return rc;
5457}
5458
5459/**
5460 * lpfc_sli4_cfg_post_extnts -
5461 * @phba: Pointer to HBA context object.
5462 * @extnt_cnt - number of available extents.
5463 * @type - the extent type (rpi, xri, vfi, vpi).
5464 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5465 * @mbox - pointer to the caller's allocated mailbox structure.
5466 *
5467 * This function executes the extents allocation request. It also
5468 * takes care of the amount of memory needed to allocate or get the
5469 * allocated extents. It is the caller's responsibility to evaluate
5470 * the response.
5471 *
5472 * Returns:
5473 * -Error: Error value describes the condition found.
5474 * 0: if successful
5475 **/
5476static int
8a9d2e80 5477lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5478 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5479{
5480 int rc = 0;
5481 uint32_t req_len;
5482 uint32_t emb_len;
5483 uint32_t alloc_len, mbox_tmo;
5484
5485 /* Calculate the total requested length of the dma memory */
8a9d2e80 5486 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5487
5488 /*
5489 * Calculate the size of an embedded mailbox. The uint32_t
5490 * accounts for extents-specific word.
5491 */
5492 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5493 sizeof(uint32_t);
5494
5495 /*
5496 * Presume the allocation and response will fit into an embedded
5497 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5498 */
5499 *emb = LPFC_SLI4_MBX_EMBED;
5500 if (req_len > emb_len) {
8a9d2e80 5501 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5502 sizeof(union lpfc_sli4_cfg_shdr) +
5503 sizeof(uint32_t);
5504 *emb = LPFC_SLI4_MBX_NEMBED;
5505 }
5506
5507 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5508 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5509 req_len, *emb);
5510 if (alloc_len < req_len) {
5511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5512 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5513 "less than the requested DMA memory "
5514 "size (x%x)\n", alloc_len, req_len);
5515 return -ENOMEM;
5516 }
8a9d2e80 5517 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5518 if (unlikely(rc))
5519 return -EIO;
5520
5521 if (!phba->sli4_hba.intr_enable)
5522 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5523 else {
a183a15f 5524 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5525 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5526 }
5527
5528 if (unlikely(rc))
5529 rc = -EIO;
5530 return rc;
5531}
5532
5533/**
5534 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5535 * @phba: Pointer to HBA context object.
5536 * @type: The resource extent type to allocate.
5537 *
5538 * This function allocates the number of elements for the specified
5539 * resource type.
5540 **/
5541static int
5542lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5543{
5544 bool emb = false;
5545 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5546 uint16_t rsrc_id, rsrc_start, j, k;
5547 uint16_t *ids;
5548 int i, rc;
5549 unsigned long longs;
5550 unsigned long *bmask;
5551 struct lpfc_rsrc_blks *rsrc_blks;
5552 LPFC_MBOXQ_t *mbox;
5553 uint32_t length;
5554 struct lpfc_id_range *id_array = NULL;
5555 void *virtaddr = NULL;
5556 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5557 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5558 struct list_head *ext_blk_list;
5559
5560 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5561 &rsrc_cnt,
5562 &rsrc_size);
5563 if (unlikely(rc))
5564 return -EIO;
5565
5566 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5567 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5568 "3009 No available Resource Extents "
5569 "for resource type 0x%x: Count: 0x%x, "
5570 "Size 0x%x\n", type, rsrc_cnt,
5571 rsrc_size);
5572 return -ENOMEM;
5573 }
5574
8a9d2e80
JS
5575 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5576 "2903 Post resource extents type-0x%x: "
5577 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5578
5579 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5580 if (!mbox)
5581 return -ENOMEM;
5582
8a9d2e80 5583 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5584 if (unlikely(rc)) {
5585 rc = -EIO;
5586 goto err_exit;
5587 }
5588
5589 /*
5590 * Figure out where the response is located. Then get local pointers
5591 * to the response data. The port does not guarantee to respond to
5592 * all extents counts request so update the local variable with the
5593 * allocated count from the port.
5594 */
5595 if (emb == LPFC_SLI4_MBX_EMBED) {
5596 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5597 id_array = &rsrc_ext->u.rsp.id[0];
5598 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5599 } else {
5600 virtaddr = mbox->sge_array->addr[0];
5601 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5602 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5603 id_array = &n_rsrc->id;
5604 }
5605
5606 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5607 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5608
5609 /*
5610 * Based on the resource size and count, correct the base and max
5611 * resource values.
5612 */
5613 length = sizeof(struct lpfc_rsrc_blks);
5614 switch (type) {
5615 case LPFC_RSC_TYPE_FCOE_RPI:
5616 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5617 sizeof(unsigned long),
5618 GFP_KERNEL);
5619 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5620 rc = -ENOMEM;
5621 goto err_exit;
5622 }
5623 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5624 sizeof(uint16_t),
5625 GFP_KERNEL);
5626 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5627 kfree(phba->sli4_hba.rpi_bmask);
5628 rc = -ENOMEM;
5629 goto err_exit;
5630 }
5631
5632 /*
5633 * The next_rpi was initialized with the maximum available
5634 * count but the port may allocate a smaller number. Catch
5635 * that case and update the next_rpi.
5636 */
5637 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5638
5639 /* Initialize local ptrs for common extent processing later. */
5640 bmask = phba->sli4_hba.rpi_bmask;
5641 ids = phba->sli4_hba.rpi_ids;
5642 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5643 break;
5644 case LPFC_RSC_TYPE_FCOE_VPI:
5645 phba->vpi_bmask = kzalloc(longs *
5646 sizeof(unsigned long),
5647 GFP_KERNEL);
5648 if (unlikely(!phba->vpi_bmask)) {
5649 rc = -ENOMEM;
5650 goto err_exit;
5651 }
5652 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5653 sizeof(uint16_t),
5654 GFP_KERNEL);
5655 if (unlikely(!phba->vpi_ids)) {
5656 kfree(phba->vpi_bmask);
5657 rc = -ENOMEM;
5658 goto err_exit;
5659 }
5660
5661 /* Initialize local ptrs for common extent processing later. */
5662 bmask = phba->vpi_bmask;
5663 ids = phba->vpi_ids;
5664 ext_blk_list = &phba->lpfc_vpi_blk_list;
5665 break;
5666 case LPFC_RSC_TYPE_FCOE_XRI:
5667 phba->sli4_hba.xri_bmask = kzalloc(longs *
5668 sizeof(unsigned long),
5669 GFP_KERNEL);
5670 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5671 rc = -ENOMEM;
5672 goto err_exit;
5673 }
8a9d2e80 5674 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5675 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5676 sizeof(uint16_t),
5677 GFP_KERNEL);
5678 if (unlikely(!phba->sli4_hba.xri_ids)) {
5679 kfree(phba->sli4_hba.xri_bmask);
5680 rc = -ENOMEM;
5681 goto err_exit;
5682 }
5683
5684 /* Initialize local ptrs for common extent processing later. */
5685 bmask = phba->sli4_hba.xri_bmask;
5686 ids = phba->sli4_hba.xri_ids;
5687 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5688 break;
5689 case LPFC_RSC_TYPE_FCOE_VFI:
5690 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5691 sizeof(unsigned long),
5692 GFP_KERNEL);
5693 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5694 rc = -ENOMEM;
5695 goto err_exit;
5696 }
5697 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5698 sizeof(uint16_t),
5699 GFP_KERNEL);
5700 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5701 kfree(phba->sli4_hba.vfi_bmask);
5702 rc = -ENOMEM;
5703 goto err_exit;
5704 }
5705
5706 /* Initialize local ptrs for common extent processing later. */
5707 bmask = phba->sli4_hba.vfi_bmask;
5708 ids = phba->sli4_hba.vfi_ids;
5709 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5710 break;
5711 default:
5712 /* Unsupported Opcode. Fail call. */
5713 id_array = NULL;
5714 bmask = NULL;
5715 ids = NULL;
5716 ext_blk_list = NULL;
5717 goto err_exit;
5718 }
5719
5720 /*
5721 * Complete initializing the extent configuration with the
5722 * allocated ids assigned to this function. The bitmask serves
5723 * as an index into the array and manages the available ids. The
5724 * array just stores the ids communicated to the port via the wqes.
5725 */
5726 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5727 if ((i % 2) == 0)
5728 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5729 &id_array[k]);
5730 else
5731 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5732 &id_array[k]);
5733
5734 rsrc_blks = kzalloc(length, GFP_KERNEL);
5735 if (unlikely(!rsrc_blks)) {
5736 rc = -ENOMEM;
5737 kfree(bmask);
5738 kfree(ids);
5739 goto err_exit;
5740 }
5741 rsrc_blks->rsrc_start = rsrc_id;
5742 rsrc_blks->rsrc_size = rsrc_size;
5743 list_add_tail(&rsrc_blks->list, ext_blk_list);
5744 rsrc_start = rsrc_id;
895427bd 5745 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6d368e53 5746 phba->sli4_hba.scsi_xri_start = rsrc_start +
895427bd
JS
5747 lpfc_sli4_get_iocb_cnt(phba);
5748 phba->sli4_hba.nvme_xri_start =
5749 phba->sli4_hba.scsi_xri_start +
5750 phba->sli4_hba.scsi_xri_max;
5751 }
6d368e53
JS
5752
5753 while (rsrc_id < (rsrc_start + rsrc_size)) {
5754 ids[j] = rsrc_id;
5755 rsrc_id++;
5756 j++;
5757 }
5758 /* Entire word processed. Get next word.*/
5759 if ((i % 2) == 1)
5760 k++;
5761 }
5762 err_exit:
5763 lpfc_sli4_mbox_cmd_free(phba, mbox);
5764 return rc;
5765}
5766
895427bd
JS
5767
5768
6d368e53
JS
5769/**
5770 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5771 * @phba: Pointer to HBA context object.
5772 * @type: the extent's type.
5773 *
5774 * This function deallocates all extents of a particular resource type.
5775 * SLI4 does not allow for deallocating a particular extent range. It
5776 * is the caller's responsibility to release all kernel memory resources.
5777 **/
5778static int
5779lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5780{
5781 int rc;
5782 uint32_t length, mbox_tmo = 0;
5783 LPFC_MBOXQ_t *mbox;
5784 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5785 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5786
5787 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5788 if (!mbox)
5789 return -ENOMEM;
5790
5791 /*
5792 * This function sends an embedded mailbox because it only sends the
5793 * the resource type. All extents of this type are released by the
5794 * port.
5795 */
5796 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5797 sizeof(struct lpfc_sli4_cfg_mhdr));
5798 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5799 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5800 length, LPFC_SLI4_MBX_EMBED);
5801
5802 /* Send an extents count of 0 - the dealloc doesn't use it. */
5803 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5804 LPFC_SLI4_MBX_EMBED);
5805 if (unlikely(rc)) {
5806 rc = -EIO;
5807 goto out_free_mbox;
5808 }
5809 if (!phba->sli4_hba.intr_enable)
5810 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5811 else {
a183a15f 5812 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5813 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5814 }
5815 if (unlikely(rc)) {
5816 rc = -EIO;
5817 goto out_free_mbox;
5818 }
5819
5820 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5821 if (bf_get(lpfc_mbox_hdr_status,
5822 &dealloc_rsrc->header.cfg_shdr.response)) {
5823 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5824 "2919 Failed to release resource extents "
5825 "for type %d - Status 0x%x Add'l Status 0x%x. "
5826 "Resource memory not released.\n",
5827 type,
5828 bf_get(lpfc_mbox_hdr_status,
5829 &dealloc_rsrc->header.cfg_shdr.response),
5830 bf_get(lpfc_mbox_hdr_add_status,
5831 &dealloc_rsrc->header.cfg_shdr.response));
5832 rc = -EIO;
5833 goto out_free_mbox;
5834 }
5835
5836 /* Release kernel memory resources for the specific type. */
5837 switch (type) {
5838 case LPFC_RSC_TYPE_FCOE_VPI:
5839 kfree(phba->vpi_bmask);
5840 kfree(phba->vpi_ids);
5841 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5842 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5843 &phba->lpfc_vpi_blk_list, list) {
5844 list_del_init(&rsrc_blk->list);
5845 kfree(rsrc_blk);
5846 }
16a3a208 5847 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
5848 break;
5849 case LPFC_RSC_TYPE_FCOE_XRI:
5850 kfree(phba->sli4_hba.xri_bmask);
5851 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5852 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5853 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5854 list_del_init(&rsrc_blk->list);
5855 kfree(rsrc_blk);
5856 }
5857 break;
5858 case LPFC_RSC_TYPE_FCOE_VFI:
5859 kfree(phba->sli4_hba.vfi_bmask);
5860 kfree(phba->sli4_hba.vfi_ids);
5861 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5862 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5863 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5864 list_del_init(&rsrc_blk->list);
5865 kfree(rsrc_blk);
5866 }
5867 break;
5868 case LPFC_RSC_TYPE_FCOE_RPI:
5869 /* RPI bitmask and physical id array are cleaned up earlier. */
5870 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5871 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5872 list_del_init(&rsrc_blk->list);
5873 kfree(rsrc_blk);
5874 }
5875 break;
5876 default:
5877 break;
5878 }
5879
5880 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5881
5882 out_free_mbox:
5883 mempool_free(mbox, phba->mbox_mem_pool);
5884 return rc;
5885}
5886
bd4b3e5c 5887static void
7bdedb34
JS
5888lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5889 uint32_t feature)
65791f1f 5890{
65791f1f 5891 uint32_t len;
65791f1f 5892
65791f1f
JS
5893 len = sizeof(struct lpfc_mbx_set_feature) -
5894 sizeof(struct lpfc_sli4_cfg_mhdr);
5895 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5896 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5897 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
5898
5899 switch (feature) {
5900 case LPFC_SET_UE_RECOVERY:
5901 bf_set(lpfc_mbx_set_feature_UER,
5902 &mbox->u.mqe.un.set_feature, 1);
5903 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5904 mbox->u.mqe.un.set_feature.param_len = 8;
5905 break;
5906 case LPFC_SET_MDS_DIAGS:
5907 bf_set(lpfc_mbx_set_feature_mds,
5908 &mbox->u.mqe.un.set_feature, 1);
5909 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5910 &mbox->u.mqe.un.set_feature, 0);
5911 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5912 mbox->u.mqe.un.set_feature.param_len = 8;
5913 break;
65791f1f 5914 }
7bdedb34
JS
5915
5916 return;
65791f1f
JS
5917}
5918
6d368e53
JS
5919/**
5920 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5921 * @phba: Pointer to HBA context object.
5922 *
5923 * This function allocates all SLI4 resource identifiers.
5924 **/
5925int
5926lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5927{
5928 int i, rc, error = 0;
5929 uint16_t count, base;
5930 unsigned long longs;
5931
ff78d8f9
JS
5932 if (!phba->sli4_hba.rpi_hdrs_in_use)
5933 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
5934 if (phba->sli4_hba.extents_in_use) {
5935 /*
5936 * The port supports resource extents. The XRI, VPI, VFI, RPI
5937 * resource extent count must be read and allocated before
5938 * provisioning the resource id arrays.
5939 */
5940 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5941 LPFC_IDX_RSRC_RDY) {
5942 /*
5943 * Extent-based resources are set - the driver could
5944 * be in a port reset. Figure out if any corrective
5945 * actions need to be taken.
5946 */
5947 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5948 LPFC_RSC_TYPE_FCOE_VFI);
5949 if (rc != 0)
5950 error++;
5951 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5952 LPFC_RSC_TYPE_FCOE_VPI);
5953 if (rc != 0)
5954 error++;
5955 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5956 LPFC_RSC_TYPE_FCOE_XRI);
5957 if (rc != 0)
5958 error++;
5959 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5960 LPFC_RSC_TYPE_FCOE_RPI);
5961 if (rc != 0)
5962 error++;
5963
5964 /*
5965 * It's possible that the number of resources
5966 * provided to this port instance changed between
5967 * resets. Detect this condition and reallocate
5968 * resources. Otherwise, there is no action.
5969 */
5970 if (error) {
5971 lpfc_printf_log(phba, KERN_INFO,
5972 LOG_MBOX | LOG_INIT,
5973 "2931 Detected extent resource "
5974 "change. Reallocating all "
5975 "extents.\n");
5976 rc = lpfc_sli4_dealloc_extent(phba,
5977 LPFC_RSC_TYPE_FCOE_VFI);
5978 rc = lpfc_sli4_dealloc_extent(phba,
5979 LPFC_RSC_TYPE_FCOE_VPI);
5980 rc = lpfc_sli4_dealloc_extent(phba,
5981 LPFC_RSC_TYPE_FCOE_XRI);
5982 rc = lpfc_sli4_dealloc_extent(phba,
5983 LPFC_RSC_TYPE_FCOE_RPI);
5984 } else
5985 return 0;
5986 }
5987
5988 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5989 if (unlikely(rc))
5990 goto err_exit;
5991
5992 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5993 if (unlikely(rc))
5994 goto err_exit;
5995
5996 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5997 if (unlikely(rc))
5998 goto err_exit;
5999
6000 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6001 if (unlikely(rc))
6002 goto err_exit;
6003 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6004 LPFC_IDX_RSRC_RDY);
6005 return rc;
6006 } else {
6007 /*
6008 * The port does not support resource extents. The XRI, VPI,
6009 * VFI, RPI resource ids were determined from READ_CONFIG.
6010 * Just allocate the bitmasks and provision the resource id
6011 * arrays. If a port reset is active, the resources don't
6012 * need any action - just exit.
6013 */
6014 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6015 LPFC_IDX_RSRC_RDY) {
6016 lpfc_sli4_dealloc_resource_identifiers(phba);
6017 lpfc_sli4_remove_rpis(phba);
6018 }
6d368e53
JS
6019 /* RPIs. */
6020 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6021 if (count <= 0) {
6022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6023 "3279 Invalid provisioning of "
6024 "rpi:%d\n", count);
6025 rc = -EINVAL;
6026 goto err_exit;
6027 }
6d368e53
JS
6028 base = phba->sli4_hba.max_cfg_param.rpi_base;
6029 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6030 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6031 sizeof(unsigned long),
6032 GFP_KERNEL);
6033 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6034 rc = -ENOMEM;
6035 goto err_exit;
6036 }
6037 phba->sli4_hba.rpi_ids = kzalloc(count *
6038 sizeof(uint16_t),
6039 GFP_KERNEL);
6040 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6041 rc = -ENOMEM;
6042 goto free_rpi_bmask;
6043 }
6044
6045 for (i = 0; i < count; i++)
6046 phba->sli4_hba.rpi_ids[i] = base + i;
6047
6048 /* VPIs. */
6049 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6050 if (count <= 0) {
6051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6052 "3280 Invalid provisioning of "
6053 "vpi:%d\n", count);
6054 rc = -EINVAL;
6055 goto free_rpi_ids;
6056 }
6d368e53
JS
6057 base = phba->sli4_hba.max_cfg_param.vpi_base;
6058 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6059 phba->vpi_bmask = kzalloc(longs *
6060 sizeof(unsigned long),
6061 GFP_KERNEL);
6062 if (unlikely(!phba->vpi_bmask)) {
6063 rc = -ENOMEM;
6064 goto free_rpi_ids;
6065 }
6066 phba->vpi_ids = kzalloc(count *
6067 sizeof(uint16_t),
6068 GFP_KERNEL);
6069 if (unlikely(!phba->vpi_ids)) {
6070 rc = -ENOMEM;
6071 goto free_vpi_bmask;
6072 }
6073
6074 for (i = 0; i < count; i++)
6075 phba->vpi_ids[i] = base + i;
6076
6077 /* XRIs. */
6078 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6079 if (count <= 0) {
6080 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6081 "3281 Invalid provisioning of "
6082 "xri:%d\n", count);
6083 rc = -EINVAL;
6084 goto free_vpi_ids;
6085 }
6d368e53
JS
6086 base = phba->sli4_hba.max_cfg_param.xri_base;
6087 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6088 phba->sli4_hba.xri_bmask = kzalloc(longs *
6089 sizeof(unsigned long),
6090 GFP_KERNEL);
6091 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6092 rc = -ENOMEM;
6093 goto free_vpi_ids;
6094 }
41899be7 6095 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
6096 phba->sli4_hba.xri_ids = kzalloc(count *
6097 sizeof(uint16_t),
6098 GFP_KERNEL);
6099 if (unlikely(!phba->sli4_hba.xri_ids)) {
6100 rc = -ENOMEM;
6101 goto free_xri_bmask;
6102 }
6103
6104 for (i = 0; i < count; i++)
6105 phba->sli4_hba.xri_ids[i] = base + i;
6106
6107 /* VFIs. */
6108 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6109 if (count <= 0) {
6110 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6111 "3282 Invalid provisioning of "
6112 "vfi:%d\n", count);
6113 rc = -EINVAL;
6114 goto free_xri_ids;
6115 }
6d368e53
JS
6116 base = phba->sli4_hba.max_cfg_param.vfi_base;
6117 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6118 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6119 sizeof(unsigned long),
6120 GFP_KERNEL);
6121 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6122 rc = -ENOMEM;
6123 goto free_xri_ids;
6124 }
6125 phba->sli4_hba.vfi_ids = kzalloc(count *
6126 sizeof(uint16_t),
6127 GFP_KERNEL);
6128 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6129 rc = -ENOMEM;
6130 goto free_vfi_bmask;
6131 }
6132
6133 for (i = 0; i < count; i++)
6134 phba->sli4_hba.vfi_ids[i] = base + i;
6135
6136 /*
6137 * Mark all resources ready. An HBA reset doesn't need
6138 * to reset the initialization.
6139 */
6140 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6141 LPFC_IDX_RSRC_RDY);
6142 return 0;
6143 }
6144
6145 free_vfi_bmask:
6146 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6147 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6148 free_xri_ids:
6149 kfree(phba->sli4_hba.xri_ids);
cd60be49 6150 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6151 free_xri_bmask:
6152 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6153 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6154 free_vpi_ids:
6155 kfree(phba->vpi_ids);
cd60be49 6156 phba->vpi_ids = NULL;
6d368e53
JS
6157 free_vpi_bmask:
6158 kfree(phba->vpi_bmask);
cd60be49 6159 phba->vpi_bmask = NULL;
6d368e53
JS
6160 free_rpi_ids:
6161 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6162 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6163 free_rpi_bmask:
6164 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6165 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6166 err_exit:
6167 return rc;
6168}
6169
6170/**
6171 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6172 * @phba: Pointer to HBA context object.
6173 *
6174 * This function allocates the number of elements for the specified
6175 * resource type.
6176 **/
6177int
6178lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6179{
6180 if (phba->sli4_hba.extents_in_use) {
6181 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6182 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6183 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6184 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6185 } else {
6186 kfree(phba->vpi_bmask);
16a3a208 6187 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6188 kfree(phba->vpi_ids);
6189 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6190 kfree(phba->sli4_hba.xri_bmask);
6191 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6192 kfree(phba->sli4_hba.vfi_bmask);
6193 kfree(phba->sli4_hba.vfi_ids);
6194 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6195 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6196 }
6197
6198 return 0;
6199}
6200
b76f2dc9
JS
6201/**
6202 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6203 * @phba: Pointer to HBA context object.
6204 * @type: The resource extent type.
6205 * @extnt_count: buffer to hold port extent count response
6206 * @extnt_size: buffer to hold port extent size response.
6207 *
6208 * This function calls the port to read the host allocated extents
6209 * for a particular type.
6210 **/
6211int
6212lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6213 uint16_t *extnt_cnt, uint16_t *extnt_size)
6214{
6215 bool emb;
6216 int rc = 0;
6217 uint16_t curr_blks = 0;
6218 uint32_t req_len, emb_len;
6219 uint32_t alloc_len, mbox_tmo;
6220 struct list_head *blk_list_head;
6221 struct lpfc_rsrc_blks *rsrc_blk;
6222 LPFC_MBOXQ_t *mbox;
6223 void *virtaddr = NULL;
6224 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6225 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6226 union lpfc_sli4_cfg_shdr *shdr;
6227
6228 switch (type) {
6229 case LPFC_RSC_TYPE_FCOE_VPI:
6230 blk_list_head = &phba->lpfc_vpi_blk_list;
6231 break;
6232 case LPFC_RSC_TYPE_FCOE_XRI:
6233 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6234 break;
6235 case LPFC_RSC_TYPE_FCOE_VFI:
6236 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6237 break;
6238 case LPFC_RSC_TYPE_FCOE_RPI:
6239 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6240 break;
6241 default:
6242 return -EIO;
6243 }
6244
6245 /* Count the number of extents currently allocatd for this type. */
6246 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6247 if (curr_blks == 0) {
6248 /*
6249 * The GET_ALLOCATED mailbox does not return the size,
6250 * just the count. The size should be just the size
6251 * stored in the current allocated block and all sizes
6252 * for an extent type are the same so set the return
6253 * value now.
6254 */
6255 *extnt_size = rsrc_blk->rsrc_size;
6256 }
6257 curr_blks++;
6258 }
6259
b76f2dc9
JS
6260 /*
6261 * Calculate the size of an embedded mailbox. The uint32_t
6262 * accounts for extents-specific word.
6263 */
6264 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6265 sizeof(uint32_t);
6266
6267 /*
6268 * Presume the allocation and response will fit into an embedded
6269 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6270 */
6271 emb = LPFC_SLI4_MBX_EMBED;
6272 req_len = emb_len;
6273 if (req_len > emb_len) {
6274 req_len = curr_blks * sizeof(uint16_t) +
6275 sizeof(union lpfc_sli4_cfg_shdr) +
6276 sizeof(uint32_t);
6277 emb = LPFC_SLI4_MBX_NEMBED;
6278 }
6279
6280 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6281 if (!mbox)
6282 return -ENOMEM;
6283 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6284
6285 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6286 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6287 req_len, emb);
6288 if (alloc_len < req_len) {
6289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6290 "2983 Allocated DMA memory size (x%x) is "
6291 "less than the requested DMA memory "
6292 "size (x%x)\n", alloc_len, req_len);
6293 rc = -ENOMEM;
6294 goto err_exit;
6295 }
6296 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6297 if (unlikely(rc)) {
6298 rc = -EIO;
6299 goto err_exit;
6300 }
6301
6302 if (!phba->sli4_hba.intr_enable)
6303 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6304 else {
a183a15f 6305 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
6306 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6307 }
6308
6309 if (unlikely(rc)) {
6310 rc = -EIO;
6311 goto err_exit;
6312 }
6313
6314 /*
6315 * Figure out where the response is located. Then get local pointers
6316 * to the response data. The port does not guarantee to respond to
6317 * all extents counts request so update the local variable with the
6318 * allocated count from the port.
6319 */
6320 if (emb == LPFC_SLI4_MBX_EMBED) {
6321 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6322 shdr = &rsrc_ext->header.cfg_shdr;
6323 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6324 } else {
6325 virtaddr = mbox->sge_array->addr[0];
6326 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6327 shdr = &n_rsrc->cfg_shdr;
6328 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6329 }
6330
6331 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6333 "2984 Failed to read allocated resources "
6334 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6335 type,
6336 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6337 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6338 rc = -EIO;
6339 goto err_exit;
6340 }
6341 err_exit:
6342 lpfc_sli4_mbox_cmd_free(phba, mbox);
6343 return rc;
6344}
6345
8a9d2e80 6346/**
0ef69968 6347 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 6348 * @phba: pointer to lpfc hba data structure.
895427bd
JS
6349 * @pring: Pointer to driver SLI ring object.
6350 * @sgl_list: linked link of sgl buffers to post
6351 * @cnt: number of linked list buffers
8a9d2e80 6352 *
895427bd 6353 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
6354 * repost them to the port by using SGL block post. This is needed after a
6355 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
6356 * of buffer sgls which contains contiguous xris and uses the non-embedded
6357 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
6358 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6359 * mailbox command for posting.
6360 *
6361 * Returns: 0 = success, non-zero failure.
6362 **/
6363static int
895427bd
JS
6364lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6365 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
6366{
6367 struct lpfc_sglq *sglq_entry = NULL;
6368 struct lpfc_sglq *sglq_entry_next = NULL;
6369 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
6370 int status, total_cnt;
6371 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
6372 int last_xritag = NO_XRI;
6373 LIST_HEAD(prep_sgl_list);
6374 LIST_HEAD(blck_sgl_list);
6375 LIST_HEAD(allc_sgl_list);
6376 LIST_HEAD(post_sgl_list);
6377 LIST_HEAD(free_sgl_list);
6378
38c20673 6379 spin_lock_irq(&phba->hbalock);
895427bd
JS
6380 spin_lock(&phba->sli4_hba.sgl_list_lock);
6381 list_splice_init(sgl_list, &allc_sgl_list);
6382 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6383 spin_unlock_irq(&phba->hbalock);
8a9d2e80 6384
895427bd 6385 total_cnt = cnt;
8a9d2e80
JS
6386 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6387 &allc_sgl_list, list) {
6388 list_del_init(&sglq_entry->list);
6389 block_cnt++;
6390 if ((last_xritag != NO_XRI) &&
6391 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6392 /* a hole in xri block, form a sgl posting block */
6393 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6394 post_cnt = block_cnt - 1;
6395 /* prepare list for next posting block */
6396 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6397 block_cnt = 1;
6398 } else {
6399 /* prepare list for next posting block */
6400 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6401 /* enough sgls for non-embed sgl mbox command */
6402 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6403 list_splice_init(&prep_sgl_list,
6404 &blck_sgl_list);
6405 post_cnt = block_cnt;
6406 block_cnt = 0;
6407 }
6408 }
6409 num_posted++;
6410
6411 /* keep track of last sgl's xritag */
6412 last_xritag = sglq_entry->sli4_xritag;
6413
895427bd
JS
6414 /* end of repost sgl list condition for buffers */
6415 if (num_posted == total_cnt) {
8a9d2e80
JS
6416 if (post_cnt == 0) {
6417 list_splice_init(&prep_sgl_list,
6418 &blck_sgl_list);
6419 post_cnt = block_cnt;
6420 } else if (block_cnt == 1) {
6421 status = lpfc_sli4_post_sgl(phba,
6422 sglq_entry->phys, 0,
6423 sglq_entry->sli4_xritag);
6424 if (!status) {
6425 /* successful, put sgl to posted list */
6426 list_add_tail(&sglq_entry->list,
6427 &post_sgl_list);
6428 } else {
6429 /* Failure, put sgl to free list */
6430 lpfc_printf_log(phba, KERN_WARNING,
6431 LOG_SLI,
895427bd 6432 "3159 Failed to post "
8a9d2e80
JS
6433 "sgl, xritag:x%x\n",
6434 sglq_entry->sli4_xritag);
6435 list_add_tail(&sglq_entry->list,
6436 &free_sgl_list);
711ea882 6437 total_cnt--;
8a9d2e80
JS
6438 }
6439 }
6440 }
6441
6442 /* continue until a nembed page worth of sgls */
6443 if (post_cnt == 0)
6444 continue;
6445
895427bd
JS
6446 /* post the buffer list sgls as a block */
6447 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6448 post_cnt);
8a9d2e80
JS
6449
6450 if (!status) {
6451 /* success, put sgl list to posted sgl list */
6452 list_splice_init(&blck_sgl_list, &post_sgl_list);
6453 } else {
6454 /* Failure, put sgl list to free sgl list */
6455 sglq_entry_first = list_first_entry(&blck_sgl_list,
6456 struct lpfc_sglq,
6457 list);
6458 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 6459 "3160 Failed to post sgl-list, "
8a9d2e80
JS
6460 "xritag:x%x-x%x\n",
6461 sglq_entry_first->sli4_xritag,
6462 (sglq_entry_first->sli4_xritag +
6463 post_cnt - 1));
6464 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 6465 total_cnt -= post_cnt;
8a9d2e80
JS
6466 }
6467
6468 /* don't reset xirtag due to hole in xri block */
6469 if (block_cnt == 0)
6470 last_xritag = NO_XRI;
6471
895427bd 6472 /* reset sgl post count for next round of posting */
8a9d2e80
JS
6473 post_cnt = 0;
6474 }
6475
895427bd 6476 /* free the sgls failed to post */
8a9d2e80
JS
6477 lpfc_free_sgl_list(phba, &free_sgl_list);
6478
895427bd 6479 /* push sgls posted to the available list */
8a9d2e80 6480 if (!list_empty(&post_sgl_list)) {
38c20673 6481 spin_lock_irq(&phba->hbalock);
895427bd
JS
6482 spin_lock(&phba->sli4_hba.sgl_list_lock);
6483 list_splice_init(&post_sgl_list, sgl_list);
6484 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6485 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
6486 } else {
6487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 6488 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
6489 return -EIO;
6490 }
895427bd
JS
6491
6492 /* return the number of XRIs actually posted */
6493 return total_cnt;
8a9d2e80
JS
6494}
6495
61bda8f7
JS
6496void
6497lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6498{
6499 uint32_t len;
6500
6501 len = sizeof(struct lpfc_mbx_set_host_data) -
6502 sizeof(struct lpfc_sli4_cfg_mhdr);
6503 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6504 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6505 LPFC_SLI4_MBX_EMBED);
6506
6507 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
6508 mbox->u.mqe.un.set_host_data.param_len =
6509 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
6510 snprintf(mbox->u.mqe.un.set_host_data.data,
6511 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6512 "Linux %s v"LPFC_DRIVER_VERSION,
6513 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6514}
6515
da0436e9 6516/**
183b8021 6517 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
6518 * @phba: Pointer to HBA context object.
6519 *
183b8021
MY
6520 * This function is the main SLI4 device initialization PCI function. This
6521 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
6522 * HBA error attention handler code. Caller is not required to hold any
6523 * locks.
6524 **/
6525int
6526lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6527{
2d7dbc4c 6528 int rc, i;
da0436e9
JS
6529 LPFC_MBOXQ_t *mboxq;
6530 struct lpfc_mqe *mqe;
6531 uint8_t *vpd;
6532 uint32_t vpd_size;
6533 uint32_t ftr_rsp = 0;
6534 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6535 struct lpfc_vport *vport = phba->pport;
6536 struct lpfc_dmabuf *mp;
2d7dbc4c 6537 struct lpfc_rqb *rqbp;
da0436e9
JS
6538
6539 /* Perform a PCI function reset to start from clean */
6540 rc = lpfc_pci_function_reset(phba);
6541 if (unlikely(rc))
6542 return -ENODEV;
6543
6544 /* Check the HBA Host Status Register for readyness */
6545 rc = lpfc_sli4_post_status_check(phba);
6546 if (unlikely(rc))
6547 return -ENODEV;
6548 else {
6549 spin_lock_irq(&phba->hbalock);
6550 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6551 spin_unlock_irq(&phba->hbalock);
6552 }
6553
6554 /*
6555 * Allocate a single mailbox container for initializing the
6556 * port.
6557 */
6558 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6559 if (!mboxq)
6560 return -ENOMEM;
6561
da0436e9 6562 /* Issue READ_REV to collect vpd and FW information. */
49198b37 6563 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
6564 vpd = kzalloc(vpd_size, GFP_KERNEL);
6565 if (!vpd) {
6566 rc = -ENOMEM;
6567 goto out_free_mbox;
6568 }
6569
6570 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
6571 if (unlikely(rc)) {
6572 kfree(vpd);
6573 goto out_free_mbox;
6574 }
572709e2 6575
da0436e9 6576 mqe = &mboxq->u.mqe;
f1126688 6577 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 6578 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 6579 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
6580 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6581 } else {
76a95d75 6582 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 6583 }
45ed1190
JS
6584
6585 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6586 LPFC_DCBX_CEE_MODE)
6587 phba->hba_flag |= HBA_FIP_SUPPORT;
6588 else
6589 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6590
4f2e66c6
JS
6591 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6592
c31098ce 6593 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
6594 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6595 "0376 READ_REV Error. SLI Level %d "
6596 "FCoE enabled %d\n",
76a95d75 6597 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 6598 rc = -EIO;
76a95d75
JS
6599 kfree(vpd);
6600 goto out_free_mbox;
da0436e9 6601 }
cd1c8301 6602
ff78d8f9
JS
6603 /*
6604 * Continue initialization with default values even if driver failed
6605 * to read FCoE param config regions, only read parameters if the
6606 * board is FCoE
6607 */
6608 if (phba->hba_flag & HBA_FCOE_MODE &&
6609 lpfc_sli4_read_fcoe_params(phba))
6610 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6611 "2570 Failed to read FCoE parameters\n");
6612
cd1c8301
JS
6613 /*
6614 * Retrieve sli4 device physical port name, failure of doing it
6615 * is considered as non-fatal.
6616 */
6617 rc = lpfc_sli4_retrieve_pport_name(phba);
6618 if (!rc)
6619 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6620 "3080 Successful retrieving SLI4 device "
6621 "physical port name: %s.\n", phba->Port);
6622
da0436e9
JS
6623 /*
6624 * Evaluate the read rev and vpd data. Populate the driver
6625 * state with the results. If this routine fails, the failure
6626 * is not fatal as the driver will use generic values.
6627 */
6628 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6629 if (unlikely(!rc)) {
6630 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6631 "0377 Error %d parsing vpd. "
6632 "Using defaults.\n", rc);
6633 rc = 0;
6634 }
76a95d75 6635 kfree(vpd);
da0436e9 6636
f1126688
JS
6637 /* Save information as VPD data */
6638 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6639 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6640 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6641 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6642 &mqe->un.read_rev);
6643 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6644 &mqe->un.read_rev);
6645 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6646 &mqe->un.read_rev);
6647 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6648 &mqe->un.read_rev);
6649 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6650 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6651 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6652 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6653 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6654 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6655 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6656 "(%d):0380 READ_REV Status x%x "
6657 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6658 mboxq->vport ? mboxq->vport->vpi : 0,
6659 bf_get(lpfc_mqe_status, mqe),
6660 phba->vpd.rev.opFwName,
6661 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6662 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 6663
572709e2
JS
6664 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6665 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6666 if (phba->pport->cfg_lun_queue_depth > rc) {
6667 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6668 "3362 LUN queue depth changed from %d to %d\n",
6669 phba->pport->cfg_lun_queue_depth, rc);
6670 phba->pport->cfg_lun_queue_depth = rc;
6671 }
6672
65791f1f 6673 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
6674 LPFC_SLI_INTF_IF_TYPE_0) {
6675 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6676 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6677 if (rc == MBX_SUCCESS) {
6678 phba->hba_flag |= HBA_RECOVERABLE_UE;
6679 /* Set 1Sec interval to detect UE */
6680 phba->eratt_poll_interval = 1;
6681 phba->sli4_hba.ue_to_sr = bf_get(
6682 lpfc_mbx_set_feature_UESR,
6683 &mboxq->u.mqe.un.set_feature);
6684 phba->sli4_hba.ue_to_rp = bf_get(
6685 lpfc_mbx_set_feature_UERP,
6686 &mboxq->u.mqe.un.set_feature);
6687 }
6688 }
6689
6690 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6691 /* Enable MDS Diagnostics only if the SLI Port supports it */
6692 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6693 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6694 if (rc != MBX_SUCCESS)
6695 phba->mds_diags_support = 0;
6696 }
572709e2 6697
da0436e9
JS
6698 /*
6699 * Discover the port's supported feature set and match it against the
6700 * hosts requests.
6701 */
6702 lpfc_request_features(phba, mboxq);
6703 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6704 if (unlikely(rc)) {
6705 rc = -EIO;
76a95d75 6706 goto out_free_mbox;
da0436e9
JS
6707 }
6708
6709 /*
6710 * The port must support FCP initiator mode as this is the
6711 * only mode running in the host.
6712 */
6713 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6714 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6715 "0378 No support for fcpi mode.\n");
6716 ftr_rsp++;
6717 }
fedd3b7b
JS
6718 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6719 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6720 else
6721 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
da0436e9
JS
6722 /*
6723 * If the port cannot support the host's requested features
6724 * then turn off the global config parameters to disable the
6725 * feature in the driver. This is not a fatal error.
6726 */
bf08611b
JS
6727 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6728 if (phba->cfg_enable_bg) {
6729 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6730 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6731 else
6732 ftr_rsp++;
6733 }
da0436e9
JS
6734
6735 if (phba->max_vpi && phba->cfg_enable_npiv &&
6736 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6737 ftr_rsp++;
6738
6739 if (ftr_rsp) {
6740 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6741 "0379 Feature Mismatch Data: x%08x %08x "
6742 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6743 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6744 phba->cfg_enable_npiv, phba->max_vpi);
6745 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6746 phba->cfg_enable_bg = 0;
6747 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6748 phba->cfg_enable_npiv = 0;
6749 }
6750
6751 /* These SLI3 features are assumed in SLI4 */
6752 spin_lock_irq(&phba->hbalock);
6753 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6754 spin_unlock_irq(&phba->hbalock);
6755
6d368e53
JS
6756 /*
6757 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6758 * calls depends on these resources to complete port setup.
6759 */
6760 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6761 if (rc) {
6762 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6763 "2920 Failed to alloc Resource IDs "
6764 "rc = x%x\n", rc);
6765 goto out_free_mbox;
6766 }
6767
61bda8f7
JS
6768 lpfc_set_host_data(phba, mboxq);
6769
6770 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6771 if (rc) {
6772 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6773 "2134 Failed to set host os driver version %x",
6774 rc);
6775 }
6776
da0436e9 6777 /* Read the port's service parameters. */
9f1177a3
JS
6778 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6779 if (rc) {
6780 phba->link_state = LPFC_HBA_ERROR;
6781 rc = -ENOMEM;
76a95d75 6782 goto out_free_mbox;
9f1177a3
JS
6783 }
6784
da0436e9
JS
6785 mboxq->vport = vport;
6786 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6787 mp = (struct lpfc_dmabuf *) mboxq->context1;
6788 if (rc == MBX_SUCCESS) {
6789 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6790 rc = 0;
6791 }
6792
6793 /*
6794 * This memory was allocated by the lpfc_read_sparam routine. Release
6795 * it to the mbuf pool.
6796 */
6797 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6798 kfree(mp);
6799 mboxq->context1 = NULL;
6800 if (unlikely(rc)) {
6801 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6802 "0382 READ_SPARAM command failed "
6803 "status %d, mbxStatus x%x\n",
6804 rc, bf_get(lpfc_mqe_status, mqe));
6805 phba->link_state = LPFC_HBA_ERROR;
6806 rc = -EIO;
76a95d75 6807 goto out_free_mbox;
da0436e9
JS
6808 }
6809
0558056c 6810 lpfc_update_vport_wwn(vport);
da0436e9
JS
6811
6812 /* Update the fc_host data structures with new wwn. */
6813 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6814 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6815
895427bd
JS
6816 /* Create all the SLI4 queues */
6817 rc = lpfc_sli4_queue_create(phba);
6818 if (rc) {
6819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6820 "3089 Failed to allocate queues\n");
6821 rc = -ENODEV;
6822 goto out_free_mbox;
6823 }
6824 /* Set up all the queues to the device */
6825 rc = lpfc_sli4_queue_setup(phba);
6826 if (unlikely(rc)) {
6827 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6828 "0381 Error %d during queue setup.\n ", rc);
6829 goto out_stop_timers;
6830 }
6831 /* Initialize the driver internal SLI layer lists. */
6832 lpfc_sli4_setup(phba);
6833 lpfc_sli4_queue_init(phba);
6834
6835 /* update host els xri-sgl sizes and mappings */
6836 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
6837 if (unlikely(rc)) {
6838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6839 "1400 Failed to update xri-sgl size and "
6840 "mapping: %d\n", rc);
895427bd 6841 goto out_destroy_queue;
da0436e9
JS
6842 }
6843
8a9d2e80 6844 /* register the els sgl pool to the port */
895427bd
JS
6845 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6846 phba->sli4_hba.els_xri_cnt);
6847 if (unlikely(rc < 0)) {
8a9d2e80
JS
6848 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6849 "0582 Error %d during els sgl post "
6850 "operation\n", rc);
6851 rc = -ENODEV;
895427bd 6852 goto out_destroy_queue;
8a9d2e80 6853 }
895427bd 6854 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 6855
f358dd0c
JS
6856 if (phba->nvmet_support) {
6857 /* update host nvmet xri-sgl sizes and mappings */
6858 rc = lpfc_sli4_nvmet_sgl_update(phba);
6859 if (unlikely(rc)) {
6860 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6861 "6308 Failed to update nvmet-sgl size "
6862 "and mapping: %d\n", rc);
6863 goto out_destroy_queue;
6864 }
6865
6866 /* register the nvmet sgl pool to the port */
6867 rc = lpfc_sli4_repost_sgl_list(
6868 phba,
6869 &phba->sli4_hba.lpfc_nvmet_sgl_list,
6870 phba->sli4_hba.nvmet_xri_cnt);
6871 if (unlikely(rc < 0)) {
6872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6873 "3117 Error %d during nvmet "
6874 "sgl post\n", rc);
6875 rc = -ENODEV;
6876 goto out_destroy_queue;
6877 }
6878 phba->sli4_hba.nvmet_xri_cnt = rc;
d613b6a7 6879 lpfc_nvmet_create_targetport(phba);
f358dd0c 6880 } else {
895427bd
JS
6881 /* update host scsi xri-sgl sizes and mappings */
6882 rc = lpfc_sli4_scsi_sgl_update(phba);
6883 if (unlikely(rc)) {
6884 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6885 "6309 Failed to update scsi-sgl size "
6886 "and mapping: %d\n", rc);
6887 goto out_destroy_queue;
6888 }
6889
6890 /* update host nvme xri-sgl sizes and mappings */
6891 rc = lpfc_sli4_nvme_sgl_update(phba);
6892 if (unlikely(rc)) {
6893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6894 "6082 Failed to update nvme-sgl size "
6895 "and mapping: %d\n", rc);
6896 goto out_destroy_queue;
6897 }
6898 }
6899
2d7dbc4c
JS
6900 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6901
6902 /* Post initial buffers to all RQs created */
6903 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6904 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6905 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6906 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6907 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 6908 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
6909 rqbp->buffer_count = 0;
6910
2d7dbc4c
JS
6911 lpfc_post_rq_buffer(
6912 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6913 phba->sli4_hba.nvmet_mrq_data[i],
6914 phba->cfg_nvmet_mrq_post);
6915 }
6916 }
6917
895427bd
JS
6918 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6919 /* register the allocated scsi sgl pool to the port */
6920 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6921 if (unlikely(rc)) {
6922 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6923 "0383 Error %d during scsi sgl post "
6924 "operation\n", rc);
6925 /* Some Scsi buffers were moved to abort scsi list */
6926 /* A pci function reset will repost them */
6927 rc = -ENODEV;
6928 goto out_destroy_queue;
6929 }
da0436e9
JS
6930 }
6931
01649561
JS
6932 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
6933 (phba->nvmet_support == 0)) {
6934
6935 /* register the allocated nvme sgl pool to the port */
6936 rc = lpfc_repost_nvme_sgl_list(phba);
6937 if (unlikely(rc)) {
6938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6939 "6116 Error %d during nvme sgl post "
6940 "operation\n", rc);
6941 /* Some NVME buffers were moved to abort nvme list */
6942 /* A pci function reset will repost them */
6943 rc = -ENODEV;
6944 goto out_destroy_queue;
6945 }
da0436e9
JS
6946 }
6947
6948 /* Post the rpi header region to the device. */
6949 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6950 if (unlikely(rc)) {
6951 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6952 "0393 Error %d during rpi post operation\n",
6953 rc);
6954 rc = -ENODEV;
895427bd 6955 goto out_destroy_queue;
da0436e9 6956 }
97f2ecf1 6957 lpfc_sli4_node_prep(phba);
da0436e9 6958
895427bd 6959 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 6960 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
6961 /*
6962 * The FC Port needs to register FCFI (index 0)
6963 */
6964 lpfc_reg_fcfi(phba, mboxq);
6965 mboxq->vport = phba->pport;
6966 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6967 if (rc != MBX_SUCCESS)
6968 goto out_unset_queue;
6969 rc = 0;
6970 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6971 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
6972 } else {
6973 /* We are a NVME Target mode with MRQ > 1 */
6974
6975 /* First register the FCFI */
6976 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
6977 mboxq->vport = phba->pport;
6978 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6979 if (rc != MBX_SUCCESS)
6980 goto out_unset_queue;
6981 rc = 0;
6982 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
6983 &mboxq->u.mqe.un.reg_fcfi_mrq);
6984
6985 /* Next register the MRQs */
6986 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
6987 mboxq->vport = phba->pport;
6988 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6989 if (rc != MBX_SUCCESS)
6990 goto out_unset_queue;
6991 rc = 0;
895427bd
JS
6992 }
6993 /* Check if the port is configured to be disabled */
6994 lpfc_sli_read_link_ste(phba);
da0436e9
JS
6995 }
6996
6997 /* Arm the CQs and then EQs on device */
6998 lpfc_sli4_arm_cqeq_intr(phba);
6999
7000 /* Indicate device interrupt mode */
7001 phba->sli4_hba.intr_enable = 1;
7002
7003 /* Allow asynchronous mailbox command to go through */
7004 spin_lock_irq(&phba->hbalock);
7005 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7006 spin_unlock_irq(&phba->hbalock);
7007
7008 /* Post receive buffers to the device */
7009 lpfc_sli4_rb_setup(phba);
7010
fc2b989b
JS
7011 /* Reset HBA FCF states after HBA reset */
7012 phba->fcf.fcf_flag = 0;
7013 phba->fcf.current_rec.flag = 0;
7014
da0436e9 7015 /* Start the ELS watchdog timer */
8fa38513 7016 mod_timer(&vport->els_tmofunc,
256ec0d0 7017 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7018
7019 /* Start heart beat timer */
7020 mod_timer(&phba->hb_tmofunc,
256ec0d0 7021 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7022 phba->hb_outstanding = 0;
7023 phba->last_completion_time = jiffies;
7024
7025 /* Start error attention (ERATT) polling timer */
256ec0d0 7026 mod_timer(&phba->eratt_poll,
65791f1f 7027 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7028
75baf696
JS
7029 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7030 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7031 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7032 if (!rc) {
7033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7034 "2829 This device supports "
7035 "Advanced Error Reporting (AER)\n");
7036 spin_lock_irq(&phba->hbalock);
7037 phba->hba_flag |= HBA_AER_ENABLED;
7038 spin_unlock_irq(&phba->hbalock);
7039 } else {
7040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7041 "2830 This device does not support "
7042 "Advanced Error Reporting (AER)\n");
7043 phba->cfg_aer_support = 0;
7044 }
0a96e975 7045 rc = 0;
75baf696
JS
7046 }
7047
da0436e9
JS
7048 /*
7049 * The port is ready, set the host's link state to LINK_DOWN
7050 * in preparation for link interrupts.
7051 */
da0436e9
JS
7052 spin_lock_irq(&phba->hbalock);
7053 phba->link_state = LPFC_LINK_DOWN;
7054 spin_unlock_irq(&phba->hbalock);
026abb87
JS
7055 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7056 (phba->hba_flag & LINK_DISABLED)) {
7057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7058 "3103 Adapter Link is disabled.\n");
7059 lpfc_down_link(phba, mboxq);
7060 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7061 if (rc != MBX_SUCCESS) {
7062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7063 "3104 Adapter failed to issue "
7064 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7065 goto out_unset_queue;
7066 }
7067 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7068 /* don't perform init_link on SLI4 FC port loopback test */
7069 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7070 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7071 if (rc)
7072 goto out_unset_queue;
7073 }
5350d872
JS
7074 }
7075 mempool_free(mboxq, phba->mbox_mem_pool);
7076 return rc;
76a95d75 7077out_unset_queue:
da0436e9 7078 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7079 lpfc_sli4_queue_unset(phba);
7080out_destroy_queue:
7081 lpfc_sli4_queue_destroy(phba);
da0436e9 7082out_stop_timers:
5350d872 7083 lpfc_stop_hba_timers(phba);
da0436e9
JS
7084out_free_mbox:
7085 mempool_free(mboxq, phba->mbox_mem_pool);
7086 return rc;
7087}
7088
7089/**
7090 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7091 * @ptr: context object - pointer to hba structure.
7092 *
7093 * This is the callback function for mailbox timer. The mailbox
7094 * timer is armed when a new mailbox command is issued and the timer
7095 * is deleted when the mailbox complete. The function is called by
7096 * the kernel timer code when a mailbox does not complete within
7097 * expected time. This function wakes up the worker thread to
7098 * process the mailbox timeout and returns. All the processing is
7099 * done by the worker thread function lpfc_mbox_timeout_handler.
7100 **/
7101void
7102lpfc_mbox_timeout(unsigned long ptr)
7103{
7104 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7105 unsigned long iflag;
7106 uint32_t tmo_posted;
7107
7108 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7109 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7110 if (!tmo_posted)
7111 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7112 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7113
7114 if (!tmo_posted)
7115 lpfc_worker_wake_up(phba);
7116 return;
7117}
7118
e8d3c3b1
JS
7119/**
7120 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7121 * are pending
7122 * @phba: Pointer to HBA context object.
7123 *
7124 * This function checks if any mailbox completions are present on the mailbox
7125 * completion queue.
7126 **/
3bb11fc5 7127static bool
e8d3c3b1
JS
7128lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7129{
7130
7131 uint32_t idx;
7132 struct lpfc_queue *mcq;
7133 struct lpfc_mcqe *mcqe;
7134 bool pending_completions = false;
7135
7136 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7137 return false;
7138
7139 /* Check for completions on mailbox completion queue */
7140
7141 mcq = phba->sli4_hba.mbx_cq;
7142 idx = mcq->hba_index;
7143 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
7144 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7145 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7146 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7147 pending_completions = true;
7148 break;
7149 }
7150 idx = (idx + 1) % mcq->entry_count;
7151 if (mcq->hba_index == idx)
7152 break;
7153 }
7154 return pending_completions;
7155
7156}
7157
7158/**
7159 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7160 * that were missed.
7161 * @phba: Pointer to HBA context object.
7162 *
7163 * For sli4, it is possible to miss an interrupt. As such mbox completions
7164 * maybe missed causing erroneous mailbox timeouts to occur. This function
7165 * checks to see if mbox completions are on the mailbox completion queue
7166 * and will process all the completions associated with the eq for the
7167 * mailbox completion queue.
7168 **/
7169bool
7170lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7171{
7172
7173 uint32_t eqidx;
7174 struct lpfc_queue *fpeq = NULL;
7175 struct lpfc_eqe *eqe;
7176 bool mbox_pending;
7177
7178 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7179 return false;
7180
7181 /* Find the eq associated with the mcq */
7182
7183 if (phba->sli4_hba.hba_eq)
895427bd 7184 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
e8d3c3b1
JS
7185 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7186 phba->sli4_hba.mbx_cq->assoc_qid) {
7187 fpeq = phba->sli4_hba.hba_eq[eqidx];
7188 break;
7189 }
7190 if (!fpeq)
7191 return false;
7192
7193 /* Turn off interrupts from this EQ */
7194
7195 lpfc_sli4_eq_clr_intr(fpeq);
7196
7197 /* Check to see if a mbox completion is pending */
7198
7199 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7200
7201 /*
7202 * If a mbox completion is pending, process all the events on EQ
7203 * associated with the mbox completion queue (this could include
7204 * mailbox commands, async events, els commands, receive queue data
7205 * and fcp commands)
7206 */
7207
7208 if (mbox_pending)
7209 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7210 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7211 fpeq->EQ_processed++;
7212 }
7213
7214 /* Always clear and re-arm the EQ */
7215
7216 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7217
7218 return mbox_pending;
7219
7220}
da0436e9
JS
7221
7222/**
7223 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7224 * @phba: Pointer to HBA context object.
7225 *
7226 * This function is called from worker thread when a mailbox command times out.
7227 * The caller is not required to hold any locks. This function will reset the
7228 * HBA and recover all the pending commands.
7229 **/
7230void
7231lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7232{
7233 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
7234 MAILBOX_t *mb = NULL;
7235
da0436e9 7236 struct lpfc_sli *psli = &phba->sli;
da0436e9 7237
e8d3c3b1
JS
7238 /* If the mailbox completed, process the completion and return */
7239 if (lpfc_sli4_process_missed_mbox_completions(phba))
7240 return;
7241
eb016566
JS
7242 if (pmbox != NULL)
7243 mb = &pmbox->u.mb;
da0436e9
JS
7244 /* Check the pmbox pointer first. There is a race condition
7245 * between the mbox timeout handler getting executed in the
7246 * worklist and the mailbox actually completing. When this
7247 * race condition occurs, the mbox_active will be NULL.
7248 */
7249 spin_lock_irq(&phba->hbalock);
7250 if (pmbox == NULL) {
7251 lpfc_printf_log(phba, KERN_WARNING,
7252 LOG_MBOX | LOG_SLI,
7253 "0353 Active Mailbox cleared - mailbox timeout "
7254 "exiting\n");
7255 spin_unlock_irq(&phba->hbalock);
7256 return;
7257 }
7258
7259 /* Mbox cmd <mbxCommand> timeout */
7260 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7261 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7262 mb->mbxCommand,
7263 phba->pport->port_state,
7264 phba->sli.sli_flag,
7265 phba->sli.mbox_active);
7266 spin_unlock_irq(&phba->hbalock);
7267
7268 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7269 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 7270 * it to fail all outstanding SCSI IO.
da0436e9
JS
7271 */
7272 spin_lock_irq(&phba->pport->work_port_lock);
7273 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7274 spin_unlock_irq(&phba->pport->work_port_lock);
7275 spin_lock_irq(&phba->hbalock);
7276 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 7277 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
7278 spin_unlock_irq(&phba->hbalock);
7279
db55fba8 7280 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
7281
7282 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7283 "0345 Resetting board due to mailbox timeout\n");
7284
7285 /* Reset the HBA device */
7286 lpfc_reset_hba(phba);
7287}
7288
7289/**
7290 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7291 * @phba: Pointer to HBA context object.
7292 * @pmbox: Pointer to mailbox object.
7293 * @flag: Flag indicating how the mailbox need to be processed.
7294 *
7295 * This function is called by discovery code and HBA management code
7296 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7297 * function gets the hbalock to protect the data structures.
7298 * The mailbox command can be submitted in polling mode, in which case
7299 * this function will wait in a polling loop for the completion of the
7300 * mailbox.
7301 * If the mailbox is submitted in no_wait mode (not polling) the
7302 * function will submit the command and returns immediately without waiting
7303 * for the mailbox completion. The no_wait is supported only when HBA
7304 * is in SLI2/SLI3 mode - interrupts are enabled.
7305 * The SLI interface allows only one mailbox pending at a time. If the
7306 * mailbox is issued in polling mode and there is already a mailbox
7307 * pending, then the function will return an error. If the mailbox is issued
7308 * in NO_WAIT mode and there is a mailbox pending already, the function
7309 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7310 * The sli layer owns the mailbox object until the completion of mailbox
7311 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7312 * return codes the caller owns the mailbox command after the return of
7313 * the function.
e59058c4 7314 **/
3772a991
JS
7315static int
7316lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7317 uint32_t flag)
dea3101e 7318{
bf07bdea 7319 MAILBOX_t *mbx;
2e0fef85 7320 struct lpfc_sli *psli = &phba->sli;
dea3101e 7321 uint32_t status, evtctr;
9940b97b 7322 uint32_t ha_copy, hc_copy;
dea3101e 7323 int i;
09372820 7324 unsigned long timeout;
dea3101e 7325 unsigned long drvr_flag = 0;
34b02dcd 7326 uint32_t word0, ldata;
dea3101e 7327 void __iomem *to_slim;
58da1ffb
JS
7328 int processing_queue = 0;
7329
7330 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7331 if (!pmbox) {
8568a4d2 7332 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 7333 /* processing mbox queue from intr_handler */
3772a991
JS
7334 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7335 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7336 return MBX_SUCCESS;
7337 }
58da1ffb 7338 processing_queue = 1;
58da1ffb
JS
7339 pmbox = lpfc_mbox_get(phba);
7340 if (!pmbox) {
7341 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7342 return MBX_SUCCESS;
7343 }
7344 }
dea3101e 7345
ed957684 7346 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 7347 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 7348 if(!pmbox->vport) {
58da1ffb 7349 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 7350 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 7351 LOG_MBOX | LOG_VPORT,
e8b62011 7352 "1806 Mbox x%x failed. No vport\n",
3772a991 7353 pmbox->u.mb.mbxCommand);
ed957684 7354 dump_stack();
58da1ffb 7355 goto out_not_finished;
ed957684
JS
7356 }
7357 }
7358
8d63f375 7359 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
7360 if (unlikely(pci_channel_offline(phba->pcidev))) {
7361 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7362 goto out_not_finished;
7363 }
8d63f375 7364
a257bf90
JS
7365 /* If HBA has a deferred error attention, fail the iocb. */
7366 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7367 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7368 goto out_not_finished;
7369 }
7370
dea3101e 7371 psli = &phba->sli;
92d7f7b0 7372
bf07bdea 7373 mbx = &pmbox->u.mb;
dea3101e 7374 status = MBX_SUCCESS;
7375
2e0fef85
JS
7376 if (phba->link_state == LPFC_HBA_ERROR) {
7377 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
7378
7379 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
7380 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7381 "(%d):0311 Mailbox command x%x cannot "
7382 "issue Data: x%x x%x\n",
7383 pmbox->vport ? pmbox->vport->vpi : 0,
7384 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 7385 goto out_not_finished;
41415862
JW
7386 }
7387
bf07bdea 7388 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
7389 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7390 !(hc_copy & HC_MBINT_ENA)) {
7391 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7392 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
7393 "(%d):2528 Mailbox command x%x cannot "
7394 "issue Data: x%x x%x\n",
7395 pmbox->vport ? pmbox->vport->vpi : 0,
7396 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
7397 goto out_not_finished;
7398 }
9290831f
JS
7399 }
7400
dea3101e 7401 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7402 /* Polling for a mbox command when another one is already active
7403 * is not allowed in SLI. Also, the driver must have established
7404 * SLI2 mode to queue and process multiple mbox commands.
7405 */
7406
7407 if (flag & MBX_POLL) {
2e0fef85 7408 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 7409
7410 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
7411 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7412 "(%d):2529 Mailbox command x%x "
7413 "cannot issue Data: x%x x%x\n",
7414 pmbox->vport ? pmbox->vport->vpi : 0,
7415 pmbox->u.mb.mbxCommand,
7416 psli->sli_flag, flag);
58da1ffb 7417 goto out_not_finished;
dea3101e 7418 }
7419
3772a991 7420 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 7421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 7422 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
7423 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7424 "(%d):2530 Mailbox command x%x "
7425 "cannot issue Data: x%x x%x\n",
7426 pmbox->vport ? pmbox->vport->vpi : 0,
7427 pmbox->u.mb.mbxCommand,
7428 psli->sli_flag, flag);
58da1ffb 7429 goto out_not_finished;
dea3101e 7430 }
7431
dea3101e 7432 /* Another mailbox command is still being processed, queue this
7433 * command to be processed later.
7434 */
7435 lpfc_mbox_put(phba, pmbox);
7436
7437 /* Mbox cmd issue - BUSY */
ed957684 7438 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 7439 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 7440 "x%x x%x x%x x%x\n",
92d7f7b0 7441 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
bf07bdea 7442 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 7443 psli->sli_flag, flag);
dea3101e 7444
7445 psli->slistat.mbox_busy++;
2e0fef85 7446 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 7447
858c9f6c
JS
7448 if (pmbox->vport) {
7449 lpfc_debugfs_disc_trc(pmbox->vport,
7450 LPFC_DISC_TRC_MBOX_VPORT,
7451 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
7452 (uint32_t)mbx->mbxCommand,
7453 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
7454 }
7455 else {
7456 lpfc_debugfs_disc_trc(phba->pport,
7457 LPFC_DISC_TRC_MBOX,
7458 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
7459 (uint32_t)mbx->mbxCommand,
7460 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
7461 }
7462
2e0fef85 7463 return MBX_BUSY;
dea3101e 7464 }
7465
dea3101e 7466 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7467
7468 /* If we are not polling, we MUST be in SLI2 mode */
7469 if (flag != MBX_POLL) {
3772a991 7470 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 7471 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 7472 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 7473 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 7474 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
7475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7476 "(%d):2531 Mailbox command x%x "
7477 "cannot issue Data: x%x x%x\n",
7478 pmbox->vport ? pmbox->vport->vpi : 0,
7479 pmbox->u.mb.mbxCommand,
7480 psli->sli_flag, flag);
58da1ffb 7481 goto out_not_finished;
dea3101e 7482 }
7483 /* timeout active mbox command */
256ec0d0
JS
7484 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7485 1000);
7486 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 7487 }
7488
7489 /* Mailbox cmd <cmd> issue */
ed957684 7490 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 7491 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 7492 "x%x\n",
e8b62011 7493 pmbox->vport ? pmbox->vport->vpi : 0,
bf07bdea 7494 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 7495 psli->sli_flag, flag);
dea3101e 7496
bf07bdea 7497 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
7498 if (pmbox->vport) {
7499 lpfc_debugfs_disc_trc(pmbox->vport,
7500 LPFC_DISC_TRC_MBOX_VPORT,
7501 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
7502 (uint32_t)mbx->mbxCommand,
7503 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
7504 }
7505 else {
7506 lpfc_debugfs_disc_trc(phba->pport,
7507 LPFC_DISC_TRC_MBOX,
7508 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
7509 (uint32_t)mbx->mbxCommand,
7510 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
7511 }
7512 }
7513
dea3101e 7514 psli->slistat.mbox_cmd++;
7515 evtctr = psli->slistat.mbox_event;
7516
7517 /* next set own bit for the adapter and copy over command word */
bf07bdea 7518 mbx->mbxOwner = OWN_CHIP;
dea3101e 7519
3772a991 7520 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
7521 /* Populate mbox extension offset word. */
7522 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 7523 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
7524 = (uint8_t *)phba->mbox_ext
7525 - (uint8_t *)phba->mbox;
7526 }
7527
7528 /* Copy the mailbox extension data */
7529 if (pmbox->in_ext_byte_len && pmbox->context2) {
7530 lpfc_sli_pcimem_bcopy(pmbox->context2,
7531 (uint8_t *)phba->mbox_ext,
7532 pmbox->in_ext_byte_len);
7533 }
7534 /* Copy command data to host SLIM area */
bf07bdea 7535 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 7536 } else {
7a470277
JS
7537 /* Populate mbox extension offset word. */
7538 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 7539 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
7540 = MAILBOX_HBA_EXT_OFFSET;
7541
7542 /* Copy the mailbox extension data */
895427bd 7543 if (pmbox->in_ext_byte_len && pmbox->context2)
7a470277
JS
7544 lpfc_memcpy_to_slim(phba->MBslimaddr +
7545 MAILBOX_HBA_EXT_OFFSET,
7546 pmbox->context2, pmbox->in_ext_byte_len);
7547
895427bd 7548 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 7549 /* copy command data into host mbox for cmpl */
895427bd
JS
7550 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7551 MAILBOX_CMD_SIZE);
dea3101e 7552
7553 /* First copy mbox command data to HBA SLIM, skip past first
7554 word */
7555 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 7556 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 7557 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7558
7559 /* Next copy over first word, with mbxOwner set */
bf07bdea 7560 ldata = *((uint32_t *)mbx);
dea3101e 7561 to_slim = phba->MBslimaddr;
7562 writel(ldata, to_slim);
7563 readl(to_slim); /* flush */
7564
895427bd 7565 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 7566 /* switch over to host mailbox */
3772a991 7567 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 7568 }
7569
7570 wmb();
dea3101e 7571
7572 switch (flag) {
7573 case MBX_NOWAIT:
09372820 7574 /* Set up reference to mailbox command */
dea3101e 7575 psli->mbox_active = pmbox;
09372820
JS
7576 /* Interrupt board to do it */
7577 writel(CA_MBATT, phba->CAregaddr);
7578 readl(phba->CAregaddr); /* flush */
7579 /* Don't wait for it to finish, just return */
dea3101e 7580 break;
7581
7582 case MBX_POLL:
09372820 7583 /* Set up null reference to mailbox command */
dea3101e 7584 psli->mbox_active = NULL;
09372820
JS
7585 /* Interrupt board to do it */
7586 writel(CA_MBATT, phba->CAregaddr);
7587 readl(phba->CAregaddr); /* flush */
7588
3772a991 7589 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 7590 /* First read mbox status word */
34b02dcd 7591 word0 = *((uint32_t *)phba->mbox);
dea3101e 7592 word0 = le32_to_cpu(word0);
7593 } else {
7594 /* First read mbox status word */
9940b97b
JS
7595 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7596 spin_unlock_irqrestore(&phba->hbalock,
7597 drvr_flag);
7598 goto out_not_finished;
7599 }
dea3101e 7600 }
7601
7602 /* Read the HBA Host Attention Register */
9940b97b
JS
7603 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7604 spin_unlock_irqrestore(&phba->hbalock,
7605 drvr_flag);
7606 goto out_not_finished;
7607 }
a183a15f
JS
7608 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7609 1000) + jiffies;
09372820 7610 i = 0;
dea3101e 7611 /* Wait for command to complete */
41415862
JW
7612 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7613 (!(ha_copy & HA_MBATT) &&
2e0fef85 7614 (phba->link_state > LPFC_WARM_START))) {
09372820 7615 if (time_after(jiffies, timeout)) {
dea3101e 7616 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 7617 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 7618 drvr_flag);
58da1ffb 7619 goto out_not_finished;
dea3101e 7620 }
7621
7622 /* Check if we took a mbox interrupt while we were
7623 polling */
7624 if (((word0 & OWN_CHIP) != OWN_CHIP)
7625 && (evtctr != psli->slistat.mbox_event))
7626 break;
7627
09372820
JS
7628 if (i++ > 10) {
7629 spin_unlock_irqrestore(&phba->hbalock,
7630 drvr_flag);
7631 msleep(1);
7632 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7633 }
dea3101e 7634
3772a991 7635 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 7636 /* First copy command data */
34b02dcd 7637 word0 = *((uint32_t *)phba->mbox);
dea3101e 7638 word0 = le32_to_cpu(word0);
bf07bdea 7639 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 7640 MAILBOX_t *slimmb;
34b02dcd 7641 uint32_t slimword0;
dea3101e 7642 /* Check real SLIM for any errors */
7643 slimword0 = readl(phba->MBslimaddr);
7644 slimmb = (MAILBOX_t *) & slimword0;
7645 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7646 && slimmb->mbxStatus) {
7647 psli->sli_flag &=
3772a991 7648 ~LPFC_SLI_ACTIVE;
dea3101e 7649 word0 = slimword0;
7650 }
7651 }
7652 } else {
7653 /* First copy command data */
7654 word0 = readl(phba->MBslimaddr);
7655 }
7656 /* Read the HBA Host Attention Register */
9940b97b
JS
7657 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7658 spin_unlock_irqrestore(&phba->hbalock,
7659 drvr_flag);
7660 goto out_not_finished;
7661 }
dea3101e 7662 }
7663
3772a991 7664 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 7665 /* copy results back to user */
2ea259ee
JS
7666 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7667 MAILBOX_CMD_SIZE);
7a470277
JS
7668 /* Copy the mailbox extension data */
7669 if (pmbox->out_ext_byte_len && pmbox->context2) {
7670 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7671 pmbox->context2,
7672 pmbox->out_ext_byte_len);
7673 }
dea3101e 7674 } else {
7675 /* First copy command data */
bf07bdea 7676 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 7677 MAILBOX_CMD_SIZE);
7a470277
JS
7678 /* Copy the mailbox extension data */
7679 if (pmbox->out_ext_byte_len && pmbox->context2) {
7680 lpfc_memcpy_from_slim(pmbox->context2,
7681 phba->MBslimaddr +
7682 MAILBOX_HBA_EXT_OFFSET,
7683 pmbox->out_ext_byte_len);
dea3101e 7684 }
7685 }
7686
7687 writel(HA_MBATT, phba->HAregaddr);
7688 readl(phba->HAregaddr); /* flush */
7689
7690 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 7691 status = mbx->mbxStatus;
dea3101e 7692 }
7693
2e0fef85
JS
7694 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7695 return status;
58da1ffb
JS
7696
7697out_not_finished:
7698 if (processing_queue) {
da0436e9 7699 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
7700 lpfc_mbox_cmpl_put(phba, pmbox);
7701 }
7702 return MBX_NOT_FINISHED;
dea3101e 7703}
7704
f1126688
JS
7705/**
7706 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7707 * @phba: Pointer to HBA context object.
7708 *
7709 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7710 * the driver internal pending mailbox queue. It will then try to wait out the
7711 * possible outstanding mailbox command before return.
7712 *
7713 * Returns:
7714 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7715 * the outstanding mailbox command timed out.
7716 **/
7717static int
7718lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7719{
7720 struct lpfc_sli *psli = &phba->sli;
f1126688 7721 int rc = 0;
a183a15f 7722 unsigned long timeout = 0;
f1126688
JS
7723
7724 /* Mark the asynchronous mailbox command posting as blocked */
7725 spin_lock_irq(&phba->hbalock);
7726 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
7727 /* Determine how long we might wait for the active mailbox
7728 * command to be gracefully completed by firmware.
7729 */
a183a15f
JS
7730 if (phba->sli.mbox_active)
7731 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7732 phba->sli.mbox_active) *
7733 1000) + jiffies;
7734 spin_unlock_irq(&phba->hbalock);
7735
e8d3c3b1
JS
7736 /* Make sure the mailbox is really active */
7737 if (timeout)
7738 lpfc_sli4_process_missed_mbox_completions(phba);
7739
f1126688
JS
7740 /* Wait for the outstnading mailbox command to complete */
7741 while (phba->sli.mbox_active) {
7742 /* Check active mailbox complete status every 2ms */
7743 msleep(2);
7744 if (time_after(jiffies, timeout)) {
7745 /* Timeout, marked the outstanding cmd not complete */
7746 rc = 1;
7747 break;
7748 }
7749 }
7750
7751 /* Can not cleanly block async mailbox command, fails it */
7752 if (rc) {
7753 spin_lock_irq(&phba->hbalock);
7754 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7755 spin_unlock_irq(&phba->hbalock);
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7762 * @phba: Pointer to HBA context object.
7763 *
7764 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7765 * commands from the driver internal pending mailbox queue. It makes sure
7766 * that there is no outstanding mailbox command before resuming posting
7767 * asynchronous mailbox commands. If, for any reason, there is outstanding
7768 * mailbox command, it will try to wait it out before resuming asynchronous
7769 * mailbox command posting.
7770 **/
7771static void
7772lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7773{
7774 struct lpfc_sli *psli = &phba->sli;
7775
7776 spin_lock_irq(&phba->hbalock);
7777 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7778 /* Asynchronous mailbox posting is not blocked, do nothing */
7779 spin_unlock_irq(&phba->hbalock);
7780 return;
7781 }
7782
7783 /* Outstanding synchronous mailbox command is guaranteed to be done,
7784 * successful or timeout, after timing-out the outstanding mailbox
7785 * command shall always be removed, so just unblock posting async
7786 * mailbox command and resume
7787 */
7788 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7789 spin_unlock_irq(&phba->hbalock);
7790
7791 /* wake up worker thread to post asynchronlous mailbox command */
7792 lpfc_worker_wake_up(phba);
7793}
7794
2d843edc
JS
7795/**
7796 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7797 * @phba: Pointer to HBA context object.
7798 * @mboxq: Pointer to mailbox object.
7799 *
7800 * The function waits for the bootstrap mailbox register ready bit from
7801 * port for twice the regular mailbox command timeout value.
7802 *
7803 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7804 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7805 **/
7806static int
7807lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7808{
7809 uint32_t db_ready;
7810 unsigned long timeout;
7811 struct lpfc_register bmbx_reg;
7812
7813 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7814 * 1000) + jiffies;
7815
7816 do {
7817 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7818 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7819 if (!db_ready)
7820 msleep(2);
7821
7822 if (time_after(jiffies, timeout))
7823 return MBXERR_ERROR;
7824 } while (!db_ready);
7825
7826 return 0;
7827}
7828
da0436e9
JS
7829/**
7830 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7831 * @phba: Pointer to HBA context object.
7832 * @mboxq: Pointer to mailbox object.
7833 *
7834 * The function posts a mailbox to the port. The mailbox is expected
7835 * to be comletely filled in and ready for the port to operate on it.
7836 * This routine executes a synchronous completion operation on the
7837 * mailbox by polling for its completion.
7838 *
7839 * The caller must not be holding any locks when calling this routine.
7840 *
7841 * Returns:
7842 * MBX_SUCCESS - mailbox posted successfully
7843 * Any of the MBX error values.
7844 **/
7845static int
7846lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7847{
7848 int rc = MBX_SUCCESS;
7849 unsigned long iflag;
da0436e9
JS
7850 uint32_t mcqe_status;
7851 uint32_t mbx_cmnd;
da0436e9
JS
7852 struct lpfc_sli *psli = &phba->sli;
7853 struct lpfc_mqe *mb = &mboxq->u.mqe;
7854 struct lpfc_bmbx_create *mbox_rgn;
7855 struct dma_address *dma_address;
da0436e9
JS
7856
7857 /*
7858 * Only one mailbox can be active to the bootstrap mailbox region
7859 * at a time and there is no queueing provided.
7860 */
7861 spin_lock_irqsave(&phba->hbalock, iflag);
7862 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7863 spin_unlock_irqrestore(&phba->hbalock, iflag);
7864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7865 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7866 "cannot issue Data: x%x x%x\n",
7867 mboxq->vport ? mboxq->vport->vpi : 0,
7868 mboxq->u.mb.mbxCommand,
a183a15f
JS
7869 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7870 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7871 psli->sli_flag, MBX_POLL);
7872 return MBXERR_ERROR;
7873 }
7874 /* The server grabs the token and owns it until release */
7875 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7876 phba->sli.mbox_active = mboxq;
7877 spin_unlock_irqrestore(&phba->hbalock, iflag);
7878
2d843edc
JS
7879 /* wait for bootstrap mbox register for readyness */
7880 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7881 if (rc)
7882 goto exit;
7883
da0436e9
JS
7884 /*
7885 * Initialize the bootstrap memory region to avoid stale data areas
7886 * in the mailbox post. Then copy the caller's mailbox contents to
7887 * the bmbx mailbox region.
7888 */
7889 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7890 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7891 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7892 sizeof(struct lpfc_mqe));
7893
7894 /* Post the high mailbox dma address to the port and wait for ready. */
7895 dma_address = &phba->sli4_hba.bmbx.dma_address;
7896 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7897
2d843edc
JS
7898 /* wait for bootstrap mbox register for hi-address write done */
7899 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7900 if (rc)
7901 goto exit;
da0436e9
JS
7902
7903 /* Post the low mailbox dma address to the port. */
7904 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 7905
2d843edc
JS
7906 /* wait for bootstrap mbox register for low address write done */
7907 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7908 if (rc)
7909 goto exit;
da0436e9
JS
7910
7911 /*
7912 * Read the CQ to ensure the mailbox has completed.
7913 * If so, update the mailbox status so that the upper layers
7914 * can complete the request normally.
7915 */
7916 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7917 sizeof(struct lpfc_mqe));
7918 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7919 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7920 sizeof(struct lpfc_mcqe));
7921 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
7922 /*
7923 * When the CQE status indicates a failure and the mailbox status
7924 * indicates success then copy the CQE status into the mailbox status
7925 * (and prefix it with x4000).
7926 */
da0436e9 7927 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
7928 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7929 bf_set(lpfc_mqe_status, mb,
7930 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 7931 rc = MBXERR_ERROR;
d7c47992
JS
7932 } else
7933 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
7934
7935 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7936 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
7937 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7938 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
7939 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7940 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7941 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7942 bf_get(lpfc_mqe_status, mb),
7943 mb->un.mb_words[0], mb->un.mb_words[1],
7944 mb->un.mb_words[2], mb->un.mb_words[3],
7945 mb->un.mb_words[4], mb->un.mb_words[5],
7946 mb->un.mb_words[6], mb->un.mb_words[7],
7947 mb->un.mb_words[8], mb->un.mb_words[9],
7948 mb->un.mb_words[10], mb->un.mb_words[11],
7949 mb->un.mb_words[12], mboxq->mcqe.word0,
7950 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7951 mboxq->mcqe.trailer);
7952exit:
7953 /* We are holding the token, no needed for lock when release */
7954 spin_lock_irqsave(&phba->hbalock, iflag);
7955 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7956 phba->sli.mbox_active = NULL;
7957 spin_unlock_irqrestore(&phba->hbalock, iflag);
7958 return rc;
7959}
7960
7961/**
7962 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7963 * @phba: Pointer to HBA context object.
7964 * @pmbox: Pointer to mailbox object.
7965 * @flag: Flag indicating how the mailbox need to be processed.
7966 *
7967 * This function is called by discovery code and HBA management code to submit
7968 * a mailbox command to firmware with SLI-4 interface spec.
7969 *
7970 * Return codes the caller owns the mailbox command after the return of the
7971 * function.
7972 **/
7973static int
7974lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7975 uint32_t flag)
7976{
7977 struct lpfc_sli *psli = &phba->sli;
7978 unsigned long iflags;
7979 int rc;
7980
b76f2dc9
JS
7981 /* dump from issue mailbox command if setup */
7982 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7983
8fa38513
JS
7984 rc = lpfc_mbox_dev_check(phba);
7985 if (unlikely(rc)) {
7986 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7987 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
7988 "cannot issue Data: x%x x%x\n",
7989 mboxq->vport ? mboxq->vport->vpi : 0,
7990 mboxq->u.mb.mbxCommand,
a183a15f
JS
7991 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7992 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
7993 psli->sli_flag, flag);
7994 goto out_not_finished;
7995 }
7996
da0436e9
JS
7997 /* Detect polling mode and jump to a handler */
7998 if (!phba->sli4_hba.intr_enable) {
7999 if (flag == MBX_POLL)
8000 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8001 else
8002 rc = -EIO;
8003 if (rc != MBX_SUCCESS)
0558056c 8004 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8005 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8006 "(x%x/x%x) failure: "
8007 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8008 "Data: x%x x%x\n,",
da0436e9
JS
8009 mboxq->vport ? mboxq->vport->vpi : 0,
8010 mboxq->u.mb.mbxCommand,
a183a15f
JS
8011 lpfc_sli_config_mbox_subsys_get(phba,
8012 mboxq),
8013 lpfc_sli_config_mbox_opcode_get(phba,
8014 mboxq),
cc459f19
JS
8015 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8016 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8017 bf_get(lpfc_mcqe_ext_status,
8018 &mboxq->mcqe),
da0436e9
JS
8019 psli->sli_flag, flag);
8020 return rc;
8021 } else if (flag == MBX_POLL) {
f1126688
JS
8022 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8023 "(%d):2542 Try to issue mailbox command "
a183a15f 8024 "x%x (x%x/x%x) synchronously ahead of async"
f1126688 8025 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8026 mboxq->vport ? mboxq->vport->vpi : 0,
8027 mboxq->u.mb.mbxCommand,
a183a15f
JS
8028 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8029 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8030 psli->sli_flag, flag);
f1126688
JS
8031 /* Try to block the asynchronous mailbox posting */
8032 rc = lpfc_sli4_async_mbox_block(phba);
8033 if (!rc) {
8034 /* Successfully blocked, now issue sync mbox cmd */
8035 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8036 if (rc != MBX_SUCCESS)
cc459f19 8037 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8038 LOG_MBOX | LOG_SLI,
cc459f19
JS
8039 "(%d):2597 Sync Mailbox command "
8040 "x%x (x%x/x%x) failure: "
8041 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8042 "Data: x%x x%x\n,",
8043 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8044 mboxq->u.mb.mbxCommand,
8045 lpfc_sli_config_mbox_subsys_get(phba,
8046 mboxq),
8047 lpfc_sli_config_mbox_opcode_get(phba,
8048 mboxq),
cc459f19
JS
8049 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8050 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8051 bf_get(lpfc_mcqe_ext_status,
8052 &mboxq->mcqe),
a183a15f 8053 psli->sli_flag, flag);
f1126688
JS
8054 /* Unblock the async mailbox posting afterward */
8055 lpfc_sli4_async_mbox_unblock(phba);
8056 }
8057 return rc;
da0436e9
JS
8058 }
8059
8060 /* Now, interrupt mode asynchrous mailbox command */
8061 rc = lpfc_mbox_cmd_check(phba, mboxq);
8062 if (rc) {
8063 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8064 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8065 "cannot issue Data: x%x x%x\n",
8066 mboxq->vport ? mboxq->vport->vpi : 0,
8067 mboxq->u.mb.mbxCommand,
a183a15f
JS
8068 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8069 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8070 psli->sli_flag, flag);
8071 goto out_not_finished;
8072 }
da0436e9
JS
8073
8074 /* Put the mailbox command to the driver internal FIFO */
8075 psli->slistat.mbox_busy++;
8076 spin_lock_irqsave(&phba->hbalock, iflags);
8077 lpfc_mbox_put(phba, mboxq);
8078 spin_unlock_irqrestore(&phba->hbalock, iflags);
8079 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8080 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8081 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8082 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8083 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8084 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8085 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8086 phba->pport->port_state,
8087 psli->sli_flag, MBX_NOWAIT);
8088 /* Wake up worker thread to transport mailbox command from head */
8089 lpfc_worker_wake_up(phba);
8090
8091 return MBX_BUSY;
8092
8093out_not_finished:
8094 return MBX_NOT_FINISHED;
8095}
8096
8097/**
8098 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8099 * @phba: Pointer to HBA context object.
8100 *
8101 * This function is called by worker thread to send a mailbox command to
8102 * SLI4 HBA firmware.
8103 *
8104 **/
8105int
8106lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8107{
8108 struct lpfc_sli *psli = &phba->sli;
8109 LPFC_MBOXQ_t *mboxq;
8110 int rc = MBX_SUCCESS;
8111 unsigned long iflags;
8112 struct lpfc_mqe *mqe;
8113 uint32_t mbx_cmnd;
8114
8115 /* Check interrupt mode before post async mailbox command */
8116 if (unlikely(!phba->sli4_hba.intr_enable))
8117 return MBX_NOT_FINISHED;
8118
8119 /* Check for mailbox command service token */
8120 spin_lock_irqsave(&phba->hbalock, iflags);
8121 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8122 spin_unlock_irqrestore(&phba->hbalock, iflags);
8123 return MBX_NOT_FINISHED;
8124 }
8125 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8126 spin_unlock_irqrestore(&phba->hbalock, iflags);
8127 return MBX_NOT_FINISHED;
8128 }
8129 if (unlikely(phba->sli.mbox_active)) {
8130 spin_unlock_irqrestore(&phba->hbalock, iflags);
8131 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8132 "0384 There is pending active mailbox cmd\n");
8133 return MBX_NOT_FINISHED;
8134 }
8135 /* Take the mailbox command service token */
8136 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8137
8138 /* Get the next mailbox command from head of queue */
8139 mboxq = lpfc_mbox_get(phba);
8140
8141 /* If no more mailbox command waiting for post, we're done */
8142 if (!mboxq) {
8143 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8144 spin_unlock_irqrestore(&phba->hbalock, iflags);
8145 return MBX_SUCCESS;
8146 }
8147 phba->sli.mbox_active = mboxq;
8148 spin_unlock_irqrestore(&phba->hbalock, iflags);
8149
8150 /* Check device readiness for posting mailbox command */
8151 rc = lpfc_mbox_dev_check(phba);
8152 if (unlikely(rc))
8153 /* Driver clean routine will clean up pending mailbox */
8154 goto out_not_finished;
8155
8156 /* Prepare the mbox command to be posted */
8157 mqe = &mboxq->u.mqe;
8158 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8159
8160 /* Start timer for the mbox_tmo and log some mailbox post messages */
8161 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 8162 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
8163
8164 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8165 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
8166 "x%x x%x\n",
8167 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
8168 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8169 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8170 phba->pport->port_state, psli->sli_flag);
8171
8172 if (mbx_cmnd != MBX_HEARTBEAT) {
8173 if (mboxq->vport) {
8174 lpfc_debugfs_disc_trc(mboxq->vport,
8175 LPFC_DISC_TRC_MBOX_VPORT,
8176 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8177 mbx_cmnd, mqe->un.mb_words[0],
8178 mqe->un.mb_words[1]);
8179 } else {
8180 lpfc_debugfs_disc_trc(phba->pport,
8181 LPFC_DISC_TRC_MBOX,
8182 "MBOX Send: cmd:x%x mb:x%x x%x",
8183 mbx_cmnd, mqe->un.mb_words[0],
8184 mqe->un.mb_words[1]);
8185 }
8186 }
8187 psli->slistat.mbox_cmd++;
8188
8189 /* Post the mailbox command to the port */
8190 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8191 if (rc != MBX_SUCCESS) {
8192 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8193 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8194 "cannot issue Data: x%x x%x\n",
8195 mboxq->vport ? mboxq->vport->vpi : 0,
8196 mboxq->u.mb.mbxCommand,
a183a15f
JS
8197 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8198 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8199 psli->sli_flag, MBX_NOWAIT);
8200 goto out_not_finished;
8201 }
8202
8203 return rc;
8204
8205out_not_finished:
8206 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
8207 if (phba->sli.mbox_active) {
8208 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8209 __lpfc_mbox_cmpl_put(phba, mboxq);
8210 /* Release the token */
8211 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8212 phba->sli.mbox_active = NULL;
8213 }
da0436e9
JS
8214 spin_unlock_irqrestore(&phba->hbalock, iflags);
8215
8216 return MBX_NOT_FINISHED;
8217}
8218
8219/**
8220 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8221 * @phba: Pointer to HBA context object.
8222 * @pmbox: Pointer to mailbox object.
8223 * @flag: Flag indicating how the mailbox need to be processed.
8224 *
8225 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8226 * the API jump table function pointer from the lpfc_hba struct.
8227 *
8228 * Return codes the caller owns the mailbox command after the return of the
8229 * function.
8230 **/
8231int
8232lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8233{
8234 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8235}
8236
8237/**
25985edc 8238 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
8239 * @phba: The hba struct for which this call is being executed.
8240 * @dev_grp: The HBA PCI-Device group number.
8241 *
8242 * This routine sets up the mbox interface API function jump table in @phba
8243 * struct.
8244 * Returns: 0 - success, -ENODEV - failure.
8245 **/
8246int
8247lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8248{
8249
8250 switch (dev_grp) {
8251 case LPFC_PCI_DEV_LP:
8252 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8253 phba->lpfc_sli_handle_slow_ring_event =
8254 lpfc_sli_handle_slow_ring_event_s3;
8255 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8256 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8257 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8258 break;
8259 case LPFC_PCI_DEV_OC:
8260 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8261 phba->lpfc_sli_handle_slow_ring_event =
8262 lpfc_sli_handle_slow_ring_event_s4;
8263 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8264 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8265 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8266 break;
8267 default:
8268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8269 "1420 Invalid HBA PCI-device group: 0x%x\n",
8270 dev_grp);
8271 return -ENODEV;
8272 break;
8273 }
8274 return 0;
8275}
8276
e59058c4 8277/**
3621a710 8278 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
8279 * @phba: Pointer to HBA context object.
8280 * @pring: Pointer to driver SLI ring object.
8281 * @piocb: Pointer to address of newly added command iocb.
8282 *
8283 * This function is called with hbalock held to add a command
8284 * iocb to the txq when SLI layer cannot submit the command iocb
8285 * to the ring.
8286 **/
2a9bf3d0 8287void
92d7f7b0 8288__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8289 struct lpfc_iocbq *piocb)
dea3101e 8290{
1c2ba475 8291 lockdep_assert_held(&phba->hbalock);
dea3101e 8292 /* Insert the caller's iocb in the txq tail for later processing. */
8293 list_add_tail(&piocb->list, &pring->txq);
dea3101e 8294}
8295
e59058c4 8296/**
3621a710 8297 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
8298 * @phba: Pointer to HBA context object.
8299 * @pring: Pointer to driver SLI ring object.
8300 * @piocb: Pointer to address of newly added command iocb.
8301 *
8302 * This function is called with hbalock held before a new
8303 * iocb is submitted to the firmware. This function checks
8304 * txq to flush the iocbs in txq to Firmware before
8305 * submitting new iocbs to the Firmware.
8306 * If there are iocbs in the txq which need to be submitted
8307 * to firmware, lpfc_sli_next_iocb returns the first element
8308 * of the txq after dequeuing it from txq.
8309 * If there is no iocb in the txq then the function will return
8310 * *piocb and *piocb is set to NULL. Caller needs to check
8311 * *piocb to find if there are more commands in the txq.
8312 **/
dea3101e 8313static struct lpfc_iocbq *
8314lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8315 struct lpfc_iocbq **piocb)
dea3101e 8316{
8317 struct lpfc_iocbq * nextiocb;
8318
1c2ba475
JT
8319 lockdep_assert_held(&phba->hbalock);
8320
dea3101e 8321 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8322 if (!nextiocb) {
8323 nextiocb = *piocb;
8324 *piocb = NULL;
8325 }
8326
8327 return nextiocb;
8328}
8329
e59058c4 8330/**
3772a991 8331 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 8332 * @phba: Pointer to HBA context object.
3772a991 8333 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
8334 * @piocb: Pointer to command iocb.
8335 * @flag: Flag indicating if this command can be put into txq.
8336 *
3772a991
JS
8337 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8338 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8339 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8340 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8341 * this function allows only iocbs for posting buffers. This function finds
8342 * next available slot in the command ring and posts the command to the
8343 * available slot and writes the port attention register to request HBA start
8344 * processing new iocb. If there is no slot available in the ring and
8345 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8346 * the function returns IOCB_BUSY.
e59058c4 8347 *
3772a991
JS
8348 * This function is called with hbalock held. The function will return success
8349 * after it successfully submit the iocb to firmware or after adding to the
8350 * txq.
e59058c4 8351 **/
98c9ea5c 8352static int
3772a991 8353__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 8354 struct lpfc_iocbq *piocb, uint32_t flag)
8355{
8356 struct lpfc_iocbq *nextiocb;
8357 IOCB_t *iocb;
895427bd 8358 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 8359
1c2ba475
JT
8360 lockdep_assert_held(&phba->hbalock);
8361
92d7f7b0
JS
8362 if (piocb->iocb_cmpl && (!piocb->vport) &&
8363 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8364 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8365 lpfc_printf_log(phba, KERN_ERR,
8366 LOG_SLI | LOG_VPORT,
e8b62011 8367 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
8368 piocb->iocb.ulpCommand);
8369 dump_stack();
8370 return IOCB_ERROR;
8371 }
8372
8373
8d63f375
LV
8374 /* If the PCI channel is in offline state, do not post iocbs. */
8375 if (unlikely(pci_channel_offline(phba->pcidev)))
8376 return IOCB_ERROR;
8377
a257bf90
JS
8378 /* If HBA has a deferred error attention, fail the iocb. */
8379 if (unlikely(phba->hba_flag & DEFER_ERATT))
8380 return IOCB_ERROR;
8381
dea3101e 8382 /*
8383 * We should never get an IOCB if we are in a < LINK_DOWN state
8384 */
2e0fef85 8385 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 8386 return IOCB_ERROR;
8387
8388 /*
8389 * Check to see if we are blocking IOCB processing because of a
0b727fea 8390 * outstanding event.
dea3101e 8391 */
0b727fea 8392 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 8393 goto iocb_busy;
8394
2e0fef85 8395 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 8396 /*
2680eeaa 8397 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 8398 * can be issued if the link is not up.
8399 */
8400 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
8401 case CMD_GEN_REQUEST64_CR:
8402 case CMD_GEN_REQUEST64_CX:
8403 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8404 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 8405 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
8406 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8407 MENLO_TRANSPORT_TYPE))
8408
8409 goto iocb_busy;
8410 break;
dea3101e 8411 case CMD_QUE_RING_BUF_CN:
8412 case CMD_QUE_RING_BUF64_CN:
dea3101e 8413 /*
8414 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8415 * completion, iocb_cmpl MUST be 0.
8416 */
8417 if (piocb->iocb_cmpl)
8418 piocb->iocb_cmpl = NULL;
8419 /*FALLTHROUGH*/
8420 case CMD_CREATE_XRI_CR:
2680eeaa
JS
8421 case CMD_CLOSE_XRI_CN:
8422 case CMD_CLOSE_XRI_CX:
dea3101e 8423 break;
8424 default:
8425 goto iocb_busy;
8426 }
8427
8428 /*
8429 * For FCP commands, we must be in a state where we can process link
8430 * attention events.
8431 */
895427bd 8432 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 8433 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 8434 goto iocb_busy;
92d7f7b0 8435 }
dea3101e 8436
dea3101e 8437 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8438 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8439 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8440
8441 if (iocb)
8442 lpfc_sli_update_ring(phba, pring);
8443 else
8444 lpfc_sli_update_full_ring(phba, pring);
8445
8446 if (!piocb)
8447 return IOCB_SUCCESS;
8448
8449 goto out_busy;
8450
8451 iocb_busy:
8452 pring->stats.iocb_cmd_delay++;
8453
8454 out_busy:
8455
8456 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 8457 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 8458 return IOCB_SUCCESS;
8459 }
8460
8461 return IOCB_BUSY;
8462}
8463
3772a991 8464/**
4f774513
JS
8465 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8466 * @phba: Pointer to HBA context object.
8467 * @piocb: Pointer to command iocb.
8468 * @sglq: Pointer to the scatter gather queue object.
8469 *
8470 * This routine converts the bpl or bde that is in the IOCB
8471 * to a sgl list for the sli4 hardware. The physical address
8472 * of the bpl/bde is converted back to a virtual address.
8473 * If the IOCB contains a BPL then the list of BDE's is
8474 * converted to sli4_sge's. If the IOCB contains a single
8475 * BDE then it is converted to a single sli_sge.
8476 * The IOCB is still in cpu endianess so the contents of
8477 * the bpl can be used without byte swapping.
8478 *
8479 * Returns valid XRI = Success, NO_XRI = Failure.
8480**/
8481static uint16_t
8482lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8483 struct lpfc_sglq *sglq)
3772a991 8484{
4f774513
JS
8485 uint16_t xritag = NO_XRI;
8486 struct ulp_bde64 *bpl = NULL;
8487 struct ulp_bde64 bde;
8488 struct sli4_sge *sgl = NULL;
1b51197d 8489 struct lpfc_dmabuf *dmabuf;
4f774513
JS
8490 IOCB_t *icmd;
8491 int numBdes = 0;
8492 int i = 0;
63e801ce
JS
8493 uint32_t offset = 0; /* accumulated offset in the sg request list */
8494 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 8495
4f774513
JS
8496 if (!piocbq || !sglq)
8497 return xritag;
8498
8499 sgl = (struct sli4_sge *)sglq->sgl;
8500 icmd = &piocbq->iocb;
6b5151fd
JS
8501 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8502 return sglq->sli4_xritag;
4f774513
JS
8503 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8504 numBdes = icmd->un.genreq64.bdl.bdeSize /
8505 sizeof(struct ulp_bde64);
8506 /* The addrHigh and addrLow fields within the IOCB
8507 * have not been byteswapped yet so there is no
8508 * need to swap them back.
8509 */
1b51197d
JS
8510 if (piocbq->context3)
8511 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8512 else
8513 return xritag;
4f774513 8514
1b51197d 8515 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
8516 if (!bpl)
8517 return xritag;
8518
8519 for (i = 0; i < numBdes; i++) {
8520 /* Should already be byte swapped. */
28baac74
JS
8521 sgl->addr_hi = bpl->addrHigh;
8522 sgl->addr_lo = bpl->addrLow;
8523
0558056c 8524 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
8525 if ((i+1) == numBdes)
8526 bf_set(lpfc_sli4_sge_last, sgl, 1);
8527 else
8528 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
8529 /* swap the size field back to the cpu so we
8530 * can assign it to the sgl.
8531 */
8532 bde.tus.w = le32_to_cpu(bpl->tus.w);
8533 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
8534 /* The offsets in the sgl need to be accumulated
8535 * separately for the request and reply lists.
8536 * The request is always first, the reply follows.
8537 */
8538 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8539 /* add up the reply sg entries */
8540 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8541 inbound++;
8542 /* first inbound? reset the offset */
8543 if (inbound == 1)
8544 offset = 0;
8545 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
8546 bf_set(lpfc_sli4_sge_type, sgl,
8547 LPFC_SGE_TYPE_DATA);
63e801ce
JS
8548 offset += bde.tus.f.bdeSize;
8549 }
546fc854 8550 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
8551 bpl++;
8552 sgl++;
8553 }
8554 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8555 /* The addrHigh and addrLow fields of the BDE have not
8556 * been byteswapped yet so they need to be swapped
8557 * before putting them in the sgl.
8558 */
8559 sgl->addr_hi =
8560 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8561 sgl->addr_lo =
8562 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 8563 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
8564 bf_set(lpfc_sli4_sge_last, sgl, 1);
8565 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
8566 sgl->sge_len =
8567 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
8568 }
8569 return sglq->sli4_xritag;
3772a991 8570}
92d7f7b0 8571
e59058c4 8572/**
4f774513 8573 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 8574 * @phba: Pointer to HBA context object.
4f774513
JS
8575 * @piocb: Pointer to command iocb.
8576 * @wqe: Pointer to the work queue entry.
e59058c4 8577 *
4f774513
JS
8578 * This routine converts the iocb command to its Work Queue Entry
8579 * equivalent. The wqe pointer should not have any fields set when
8580 * this routine is called because it will memcpy over them.
8581 * This routine does not set the CQ_ID or the WQEC bits in the
8582 * wqe.
e59058c4 8583 *
4f774513 8584 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 8585 **/
cf5bf97e 8586static int
4f774513
JS
8587lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8588 union lpfc_wqe *wqe)
cf5bf97e 8589{
5ffc266e 8590 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
8591 uint8_t ct = 0;
8592 uint32_t fip;
8593 uint32_t abort_tag;
8594 uint8_t command_type = ELS_COMMAND_NON_FIP;
8595 uint8_t cmnd;
8596 uint16_t xritag;
dcf2a4e0
JS
8597 uint16_t abrt_iotag;
8598 struct lpfc_iocbq *abrtiocbq;
4f774513 8599 struct ulp_bde64 *bpl = NULL;
f0d9bccc 8600 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
8601 int numBdes, i;
8602 struct ulp_bde64 bde;
c31098ce 8603 struct lpfc_nodelist *ndlp;
ff78d8f9 8604 uint32_t *pcmd;
1b51197d 8605 uint32_t if_type;
4f774513 8606
45ed1190 8607 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 8608 /* The fcp commands will set command type */
0c287589 8609 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 8610 command_type = FCP_COMMAND;
c868595d 8611 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
8612 command_type = ELS_COMMAND_FIP;
8613 else
8614 command_type = ELS_COMMAND_NON_FIP;
8615
b5c53958
JS
8616 if (phba->fcp_embed_io)
8617 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
8618 /* Some of the fields are in the right position already */
8619 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
f0d9bccc 8620 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
28d7f3df 8621 wqe->generic.wqe_com.word10 = 0;
b5c53958
JS
8622
8623 abort_tag = (uint32_t) iocbq->iotag;
8624 xritag = iocbq->sli4_xritag;
4f774513
JS
8625 /* words0-2 bpl convert bde */
8626 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
8627 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8628 sizeof(struct ulp_bde64);
4f774513
JS
8629 bpl = (struct ulp_bde64 *)
8630 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8631 if (!bpl)
8632 return IOCB_ERROR;
cf5bf97e 8633
4f774513
JS
8634 /* Should already be byte swapped. */
8635 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8636 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8637 /* swap the size field back to the cpu so we
8638 * can assign it to the sgl.
8639 */
8640 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
8641 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8642 total_len = 0;
8643 for (i = 0; i < numBdes; i++) {
8644 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8645 total_len += bde.tus.f.bdeSize;
8646 }
4f774513 8647 } else
5ffc266e 8648 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 8649
4f774513
JS
8650 iocbq->iocb.ulpIoTag = iocbq->iotag;
8651 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 8652
4f774513
JS
8653 switch (iocbq->iocb.ulpCommand) {
8654 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
8655 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8656 ndlp = iocbq->context_un.ndlp;
8657 else
8658 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
8659 if (!iocbq->iocb.ulpLe) {
8660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8661 "2007 Only Limited Edition cmd Format"
8662 " supported 0x%x\n",
8663 iocbq->iocb.ulpCommand);
8664 return IOCB_ERROR;
8665 }
ff78d8f9 8666
5ffc266e 8667 wqe->els_req.payload_len = xmit_len;
4f774513
JS
8668 /* Els_reguest64 has a TMO */
8669 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8670 iocbq->iocb.ulpTimeout);
8671 /* Need a VF for word 4 set the vf bit*/
8672 bf_set(els_req64_vf, &wqe->els_req, 0);
8673 /* And a VFID for word 12 */
8674 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 8675 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
8676 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8677 iocbq->iocb.ulpContext);
8678 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8679 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 8680 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 8681 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
8682 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8683 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
8684 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8685 iocbq->context2)->virt);
1b51197d
JS
8686 if_type = bf_get(lpfc_sli_intf_if_type,
8687 &phba->sli4_hba.sli_intf);
8688 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 8689 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 8690 *pcmd == ELS_CMD_SCR ||
6b5151fd 8691 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 8692 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
8693 *pcmd == ELS_CMD_PLOGI)) {
8694 bf_set(els_req64_sp, &wqe->els_req, 1);
8695 bf_set(els_req64_sid, &wqe->els_req,
8696 iocbq->vport->fc_myDID);
939723a4
JS
8697 if ((*pcmd == ELS_CMD_FLOGI) &&
8698 !(phba->fc_topology ==
8699 LPFC_TOPOLOGY_LOOP))
8700 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
8701 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8702 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 8703 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 8704 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
8705 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8706 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8707 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8708 }
c868595d 8709 }
6d368e53
JS
8710 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8711 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
8712 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8713 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8714 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8715 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8716 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8717 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 8718 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 8719 break;
5ffc266e 8720 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
8721 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8722 iocbq->iocb.un.ulpWord[3]);
8723 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 8724 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
8725 /* The entire sequence is transmitted for this IOCB */
8726 xmit_len = total_len;
8727 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
8728 if (phba->link_flag & LS_LOOPBACK_MODE)
8729 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 8730 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
8731 /* word3 iocb=io_tag32 wqe=reserved */
8732 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
8733 /* word4 relative_offset memcpy */
8734 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
8735 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8736 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8737 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8738 LPFC_WQE_IOD_WRITE);
8739 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8740 LPFC_WQE_LENLOC_WORD12);
8741 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
8742 wqe->xmit_sequence.xmit_len = xmit_len;
8743 command_type = OTHER_COMMAND;
7851fe2c 8744 break;
4f774513 8745 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
8746 /* word3 iocb=iotag32 wqe=seq_payload_len */
8747 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
8748 /* word4 iocb=rsvd wqe=rsvd */
8749 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8750 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 8751 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 8752 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
8753 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8754 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8755 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8756 LPFC_WQE_LENLOC_WORD3);
8757 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 8758 break;
4f774513
JS
8759 case CMD_FCP_IWRITE64_CR:
8760 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
8761 /* word3 iocb=iotag wqe=payload_offset_len */
8762 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
8763 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8764 xmit_len + sizeof(struct fcp_rsp));
8765 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8766 0);
f0d9bccc
JS
8767 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8768 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8769 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8770 iocbq->iocb.ulpFCP2Rcvy);
8771 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8772 /* Always open the exchange */
f0d9bccc
JS
8773 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8774 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8775 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 8776 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8777 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
8778 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8779 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
8780 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8781 if (iocbq->priority) {
8782 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8783 (iocbq->priority << 1));
8784 } else {
1ba981fd
JS
8785 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8786 (phba->cfg_XLanePriority << 1));
8787 }
8788 }
b5c53958
JS
8789 /* Note, word 10 is already initialized to 0 */
8790
8791 if (phba->fcp_embed_io) {
8792 struct lpfc_scsi_buf *lpfc_cmd;
8793 struct sli4_sge *sgl;
8794 union lpfc_wqe128 *wqe128;
8795 struct fcp_cmnd *fcp_cmnd;
8796 uint32_t *ptr;
8797
8798 /* 128 byte wqe support here */
8799 wqe128 = (union lpfc_wqe128 *)wqe;
8800
8801 lpfc_cmd = iocbq->context1;
8802 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8803 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8804
8805 /* Word 0-2 - FCP_CMND */
8806 wqe128->generic.bde.tus.f.bdeFlags =
8807 BUFF_TYPE_BDE_IMMED;
8808 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8809 wqe128->generic.bde.addrHigh = 0;
8810 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8811
8812 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8813
8814 /* Word 22-29 FCP CMND Payload */
8815 ptr = &wqe128->words[22];
8816 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8817 }
7851fe2c 8818 break;
4f774513 8819 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
8820 /* word3 iocb=iotag wqe=payload_offset_len */
8821 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
8822 bf_set(payload_offset_len, &wqe->fcp_iread,
8823 xmit_len + sizeof(struct fcp_rsp));
8824 bf_set(cmd_buff_len, &wqe->fcp_iread,
8825 0);
f0d9bccc
JS
8826 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8827 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8828 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8829 iocbq->iocb.ulpFCP2Rcvy);
8830 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 8831 /* Always open the exchange */
f0d9bccc
JS
8832 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8833 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8834 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 8835 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8836 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
8837 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8838 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
8839 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8840 if (iocbq->priority) {
8841 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8842 (iocbq->priority << 1));
8843 } else {
1ba981fd
JS
8844 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8845 (phba->cfg_XLanePriority << 1));
8846 }
8847 }
b5c53958
JS
8848 /* Note, word 10 is already initialized to 0 */
8849
8850 if (phba->fcp_embed_io) {
8851 struct lpfc_scsi_buf *lpfc_cmd;
8852 struct sli4_sge *sgl;
8853 union lpfc_wqe128 *wqe128;
8854 struct fcp_cmnd *fcp_cmnd;
8855 uint32_t *ptr;
8856
8857 /* 128 byte wqe support here */
8858 wqe128 = (union lpfc_wqe128 *)wqe;
8859
8860 lpfc_cmd = iocbq->context1;
8861 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8862 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8863
8864 /* Word 0-2 - FCP_CMND */
8865 wqe128->generic.bde.tus.f.bdeFlags =
8866 BUFF_TYPE_BDE_IMMED;
8867 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8868 wqe128->generic.bde.addrHigh = 0;
8869 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8870
8871 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8872
8873 /* Word 22-29 FCP CMND Payload */
8874 ptr = &wqe128->words[22];
8875 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8876 }
7851fe2c 8877 break;
4f774513 8878 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
8879 /* word3 iocb=iotag wqe=payload_offset_len */
8880 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8881 bf_set(payload_offset_len, &wqe->fcp_icmd,
8882 xmit_len + sizeof(struct fcp_rsp));
8883 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8884 0);
f0d9bccc 8885 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 8886 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 8887 /* Always open the exchange */
f0d9bccc
JS
8888 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8889 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8890 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8891 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8892 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
8893 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8894 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
8895 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8896 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
8897 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8898 if (iocbq->priority) {
8899 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8900 (iocbq->priority << 1));
8901 } else {
1ba981fd
JS
8902 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8903 (phba->cfg_XLanePriority << 1));
8904 }
8905 }
b5c53958
JS
8906 /* Note, word 10 is already initialized to 0 */
8907
8908 if (phba->fcp_embed_io) {
8909 struct lpfc_scsi_buf *lpfc_cmd;
8910 struct sli4_sge *sgl;
8911 union lpfc_wqe128 *wqe128;
8912 struct fcp_cmnd *fcp_cmnd;
8913 uint32_t *ptr;
8914
8915 /* 128 byte wqe support here */
8916 wqe128 = (union lpfc_wqe128 *)wqe;
8917
8918 lpfc_cmd = iocbq->context1;
8919 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8920 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8921
8922 /* Word 0-2 - FCP_CMND */
8923 wqe128->generic.bde.tus.f.bdeFlags =
8924 BUFF_TYPE_BDE_IMMED;
8925 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8926 wqe128->generic.bde.addrHigh = 0;
8927 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8928
8929 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8930
8931 /* Word 22-29 FCP CMND Payload */
8932 ptr = &wqe128->words[22];
8933 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8934 }
7851fe2c 8935 break;
4f774513 8936 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
8937 /* For this command calculate the xmit length of the
8938 * request bde.
8939 */
8940 xmit_len = 0;
8941 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8942 sizeof(struct ulp_bde64);
8943 for (i = 0; i < numBdes; i++) {
63e801ce 8944 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
8945 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8946 break;
63e801ce
JS
8947 xmit_len += bde.tus.f.bdeSize;
8948 }
f0d9bccc
JS
8949 /* word3 iocb=IO_TAG wqe=request_payload_len */
8950 wqe->gen_req.request_payload_len = xmit_len;
8951 /* word4 iocb=parameter wqe=relative_offset memcpy */
8952 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
8953 /* word6 context tag copied in memcpy */
8954 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8955 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8957 "2015 Invalid CT %x command 0x%x\n",
8958 ct, iocbq->iocb.ulpCommand);
8959 return IOCB_ERROR;
8960 }
f0d9bccc
JS
8961 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8962 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8963 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8964 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8965 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8966 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8967 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8968 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 8969 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 8970 command_type = OTHER_COMMAND;
7851fe2c 8971 break;
4f774513 8972 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 8973 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 8974 /* words0-2 BDE memcpy */
f0d9bccc
JS
8975 /* word3 iocb=iotag32 wqe=response_payload_len */
8976 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
8977 /* word4 */
8978 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
8979 /* word5 iocb=rsvd wge=did */
8980 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
8981 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8982
8983 if_type = bf_get(lpfc_sli_intf_if_type,
8984 &phba->sli4_hba.sli_intf);
8985 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8986 if (iocbq->vport->fc_flag & FC_PT2PT) {
8987 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8988 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8989 iocbq->vport->fc_myDID);
8990 if (iocbq->vport->fc_myDID == Fabric_DID) {
8991 bf_set(wqe_els_did,
8992 &wqe->xmit_els_rsp.wqe_dest, 0);
8993 }
8994 }
8995 }
f0d9bccc
JS
8996 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8997 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8998 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8999 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9000 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9001 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9002 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9003 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9004 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9005 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9006 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9007 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9008 LPFC_WQE_LENLOC_WORD3);
9009 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9010 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9011 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9012 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9013 iocbq->context2)->virt);
9014 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9015 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9016 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9017 iocbq->vport->fc_myDID);
939723a4
JS
9018 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9019 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9020 phba->vpi_ids[phba->pport->vpi]);
9021 }
4f774513 9022 command_type = OTHER_COMMAND;
7851fe2c 9023 break;
4f774513
JS
9024 case CMD_CLOSE_XRI_CN:
9025 case CMD_ABORT_XRI_CN:
9026 case CMD_ABORT_XRI_CX:
9027 /* words 0-2 memcpy should be 0 rserved */
9028 /* port will send abts */
dcf2a4e0
JS
9029 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9030 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9031 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9032 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9033 } else
9034 fip = 0;
9035
9036 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9037 /*
dcf2a4e0
JS
9038 * The link is down, or the command was ELS_FIP
9039 * so the fw does not need to send abts
4f774513
JS
9040 * on the wire.
9041 */
9042 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9043 else
9044 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9045 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9046 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9047 wqe->abort_cmd.rsrvd5 = 0;
9048 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9049 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9050 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9051 /*
9052 * The abort handler will send us CMD_ABORT_XRI_CN or
9053 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9054 */
f0d9bccc
JS
9055 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9056 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9057 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9058 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9059 cmnd = CMD_ABORT_XRI_CX;
9060 command_type = OTHER_COMMAND;
9061 xritag = 0;
7851fe2c 9062 break;
6669f9bb 9063 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9064 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9065 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9066 * we re-construct this WQE here based on information in
9067 * iocbq from scratch.
9068 */
9069 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 9070 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9071 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9072 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9073 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9074 LPFC_ABTS_UNSOL_INT) {
9075 /* ABTS sent by initiator to CT exchange, the
9076 * RX_ID field will be filled with the newly
9077 * allocated responder XRI.
9078 */
9079 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9080 iocbq->sli4_xritag);
9081 } else {
9082 /* ABTS sent by responder to CT exchange, the
9083 * RX_ID field will be filled with the responder
9084 * RX_ID from ABTS.
9085 */
9086 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9087 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9088 }
6669f9bb
JS
9089 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9090 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9091
9092 /* Use CT=VPI */
9093 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9094 ndlp->nlp_DID);
9095 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9096 iocbq->iocb.ulpContext);
9097 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9098 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9099 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9100 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9101 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9102 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9103 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9104 command_type = OTHER_COMMAND;
546fc854
JS
9105 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9106 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9107 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9108 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9109 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9110 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9111 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9112 }
9113
7851fe2c 9114 break;
4f774513
JS
9115 case CMD_XRI_ABORTED_CX:
9116 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
9117 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9118 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9119 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9120 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9121 default:
9122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9123 "2014 Invalid command 0x%x\n",
9124 iocbq->iocb.ulpCommand);
9125 return IOCB_ERROR;
7851fe2c 9126 break;
4f774513 9127 }
6d368e53 9128
8012cc38
JS
9129 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9130 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9131 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9132 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9133 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9134 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9135 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9136 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
9137 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9138 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9139 wqe->generic.wqe_com.abort_tag = abort_tag;
9140 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9141 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9142 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9143 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
9144 return 0;
9145}
9146
9147/**
9148 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9149 * @phba: Pointer to HBA context object.
9150 * @ring_number: SLI ring number to issue iocb on.
9151 * @piocb: Pointer to command iocb.
9152 * @flag: Flag indicating if this command can be put into txq.
9153 *
9154 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9155 * an iocb command to an HBA with SLI-4 interface spec.
9156 *
9157 * This function is called with hbalock held. The function will return success
9158 * after it successfully submit the iocb to firmware or after adding to the
9159 * txq.
9160 **/
9161static int
9162__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9163 struct lpfc_iocbq *piocb, uint32_t flag)
9164{
9165 struct lpfc_sglq *sglq;
b5c53958
JS
9166 union lpfc_wqe *wqe;
9167 union lpfc_wqe128 wqe128;
1ba981fd 9168 struct lpfc_queue *wq;
895427bd 9169 struct lpfc_sli_ring *pring;
4f774513 9170
895427bd
JS
9171 /* Get the WQ */
9172 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9173 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9174 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9175 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9176 else
9177 wq = phba->sli4_hba.oas_wq;
9178 } else {
9179 wq = phba->sli4_hba.els_wq;
9180 }
9181
9182 /* Get corresponding ring */
9183 pring = wq->pring;
1c2ba475 9184
b5c53958
JS
9185 /*
9186 * The WQE can be either 64 or 128 bytes,
9187 * so allocate space on the stack assuming the largest.
9188 */
9189 wqe = (union lpfc_wqe *)&wqe128;
9190
895427bd
JS
9191 lockdep_assert_held(&phba->hbalock);
9192
4f774513
JS
9193 if (piocb->sli4_xritag == NO_XRI) {
9194 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 9195 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
9196 sglq = NULL;
9197 else {
0e9bb8d7 9198 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
9199 if (!(flag & SLI_IOCB_RET_IOCB)) {
9200 __lpfc_sli_ringtx_put(phba,
9201 pring, piocb);
9202 return IOCB_SUCCESS;
9203 } else {
9204 return IOCB_BUSY;
9205 }
9206 } else {
895427bd 9207 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
9208 if (!sglq) {
9209 if (!(flag & SLI_IOCB_RET_IOCB)) {
9210 __lpfc_sli_ringtx_put(phba,
9211 pring,
9212 piocb);
9213 return IOCB_SUCCESS;
9214 } else
9215 return IOCB_BUSY;
9216 }
9217 }
4f774513 9218 }
2ea259ee 9219 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
9220 /* These IO's already have an XRI and a mapped sgl. */
9221 sglq = NULL;
2ea259ee 9222 else {
6d368e53
JS
9223 /*
9224 * This is a continuation of a commandi,(CX) so this
4f774513
JS
9225 * sglq is on the active list
9226 */
edccdc17 9227 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
9228 if (!sglq)
9229 return IOCB_ERROR;
9230 }
9231
9232 if (sglq) {
6d368e53 9233 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 9234 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 9235 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
9236 return IOCB_ERROR;
9237 }
9238
b5c53958 9239 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
4f774513
JS
9240 return IOCB_ERROR;
9241
895427bd
JS
9242 if (lpfc_sli4_wq_put(wq, wqe))
9243 return IOCB_ERROR;
4f774513
JS
9244 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9245
9246 return 0;
9247}
9248
9249/**
9250 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9251 *
9252 * This routine wraps the actual lockless version for issusing IOCB function
9253 * pointer from the lpfc_hba struct.
9254 *
9255 * Return codes:
b5c53958
JS
9256 * IOCB_ERROR - Error
9257 * IOCB_SUCCESS - Success
9258 * IOCB_BUSY - Busy
4f774513 9259 **/
2a9bf3d0 9260int
4f774513
JS
9261__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9262 struct lpfc_iocbq *piocb, uint32_t flag)
9263{
9264 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9265}
9266
9267/**
25985edc 9268 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
9269 * @phba: The hba struct for which this call is being executed.
9270 * @dev_grp: The HBA PCI-Device group number.
9271 *
9272 * This routine sets up the SLI interface API function jump table in @phba
9273 * struct.
9274 * Returns: 0 - success, -ENODEV - failure.
9275 **/
9276int
9277lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9278{
9279
9280 switch (dev_grp) {
9281 case LPFC_PCI_DEV_LP:
9282 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9283 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9284 break;
9285 case LPFC_PCI_DEV_OC:
9286 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9287 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9288 break;
9289 default:
9290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9291 "1419 Invalid HBA PCI-device group: 0x%x\n",
9292 dev_grp);
9293 return -ENODEV;
9294 break;
9295 }
9296 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9297 return 0;
9298}
9299
a1efe163 9300/**
895427bd 9301 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 9302 * @phba: Pointer to HBA context object.
a1efe163
JS
9303 * @piocb: Pointer to command iocb.
9304 *
895427bd
JS
9305 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9306 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 9307 * Since ABORTS must go on the same WQ of the command they are
895427bd 9308 * aborting, we use command's hba_wqidx.
a1efe163 9309 */
895427bd
JS
9310struct lpfc_sli_ring *
9311lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 9312{
895427bd 9313 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8b0dff14 9314 if (!(phba->cfg_fof) ||
895427bd 9315 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
8b0dff14 9316 if (unlikely(!phba->sli4_hba.fcp_wq))
895427bd 9317 return NULL;
8b0dff14 9318 /*
895427bd 9319 * for abort iocb hba_wqidx should already
8b0dff14
JS
9320 * be setup based on what work queue we used.
9321 */
9322 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
895427bd 9323 piocb->hba_wqidx =
8b0dff14
JS
9324 lpfc_sli4_scmd_to_wqidx_distr(phba,
9325 piocb->context1);
895427bd 9326 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
8b0dff14
JS
9327 } else {
9328 if (unlikely(!phba->sli4_hba.oas_wq))
895427bd
JS
9329 return NULL;
9330 piocb->hba_wqidx = 0;
9331 return phba->sli4_hba.oas_wq->pring;
9bd2bff5 9332 }
895427bd
JS
9333 } else {
9334 if (unlikely(!phba->sli4_hba.els_wq))
9335 return NULL;
9336 piocb->hba_wqidx = 0;
9337 return phba->sli4_hba.els_wq->pring;
9bd2bff5 9338 }
9bd2bff5
JS
9339}
9340
4f774513
JS
9341/**
9342 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9343 * @phba: Pointer to HBA context object.
9344 * @pring: Pointer to driver SLI ring object.
9345 * @piocb: Pointer to command iocb.
9346 * @flag: Flag indicating if this command can be put into txq.
9347 *
9348 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9349 * function. This function gets the hbalock and calls
9350 * __lpfc_sli_issue_iocb function and will return the error returned
9351 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9352 * functions which do not hold hbalock.
9353 **/
9354int
9355lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9356 struct lpfc_iocbq *piocb, uint32_t flag)
9357{
895427bd 9358 struct lpfc_hba_eq_hdl *hba_eq_hdl;
2a76a283 9359 struct lpfc_sli_ring *pring;
ba20c853
JS
9360 struct lpfc_queue *fpeq;
9361 struct lpfc_eqe *eqe;
4f774513 9362 unsigned long iflags;
2a76a283 9363 int rc, idx;
4f774513 9364
7e56aa25 9365 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
9366 pring = lpfc_sli4_calc_ring(phba, piocb);
9367 if (unlikely(pring == NULL))
9bd2bff5 9368 return IOCB_ERROR;
ba20c853 9369
9bd2bff5
JS
9370 spin_lock_irqsave(&pring->ring_lock, iflags);
9371 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9372 spin_unlock_irqrestore(&pring->ring_lock, iflags);
ba20c853 9373
9bd2bff5 9374 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
895427bd
JS
9375 idx = piocb->hba_wqidx;
9376 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
4f774513 9377
895427bd 9378 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
ba20c853 9379
9bd2bff5
JS
9380 /* Get associated EQ with this index */
9381 fpeq = phba->sli4_hba.hba_eq[idx];
ba20c853 9382
9bd2bff5
JS
9383 /* Turn off interrupts from this EQ */
9384 lpfc_sli4_eq_clr_intr(fpeq);
ba20c853 9385
9bd2bff5
JS
9386 /*
9387 * Process all the events on FCP EQ
9388 */
9389 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9390 lpfc_sli4_hba_handle_eqe(phba,
9391 eqe, idx);
9392 fpeq->EQ_processed++;
ba20c853 9393 }
ba20c853 9394
9bd2bff5
JS
9395 /* Always clear and re-arm the EQ */
9396 lpfc_sli4_eq_release(fpeq,
9397 LPFC_QUEUE_REARM);
9398 }
895427bd 9399 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
2a76a283 9400 }
7e56aa25
JS
9401 } else {
9402 /* For now, SLI2/3 will still use hbalock */
9403 spin_lock_irqsave(&phba->hbalock, iflags);
9404 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9405 spin_unlock_irqrestore(&phba->hbalock, iflags);
9406 }
4f774513
JS
9407 return rc;
9408}
9409
9410/**
9411 * lpfc_extra_ring_setup - Extra ring setup function
9412 * @phba: Pointer to HBA context object.
9413 *
9414 * This function is called while driver attaches with the
9415 * HBA to setup the extra ring. The extra ring is used
9416 * only when driver needs to support target mode functionality
9417 * or IP over FC functionalities.
9418 *
895427bd 9419 * This function is called with no lock held. SLI3 only.
4f774513
JS
9420 **/
9421static int
9422lpfc_extra_ring_setup( struct lpfc_hba *phba)
9423{
9424 struct lpfc_sli *psli;
9425 struct lpfc_sli_ring *pring;
9426
9427 psli = &phba->sli;
9428
9429 /* Adjust cmd/rsp ring iocb entries more evenly */
9430
9431 /* Take some away from the FCP ring */
895427bd 9432 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
9433 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9434 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9435 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9436 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 9437
a4bc3379 9438 /* and give them to the extra ring */
895427bd 9439 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 9440
7e56aa25
JS
9441 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9442 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9443 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9444 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
9445
9446 /* Setup default profile for this ring */
9447 pring->iotag_max = 4096;
9448 pring->num_mask = 1;
9449 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
9450 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9451 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
9452 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9453 return 0;
9454}
9455
cb69f7de
JS
9456/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9457 * @phba: Pointer to HBA context object.
9458 * @iocbq: Pointer to iocb object.
9459 *
9460 * The async_event handler calls this routine when it receives
9461 * an ASYNC_STATUS_CN event from the port. The port generates
9462 * this event when an Abort Sequence request to an rport fails
9463 * twice in succession. The abort could be originated by the
9464 * driver or by the port. The ABTS could have been for an ELS
9465 * or FCP IO. The port only generates this event when an ABTS
9466 * fails to complete after one retry.
9467 */
9468static void
9469lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9470 struct lpfc_iocbq *iocbq)
9471{
9472 struct lpfc_nodelist *ndlp = NULL;
9473 uint16_t rpi = 0, vpi = 0;
9474 struct lpfc_vport *vport = NULL;
9475
9476 /* The rpi in the ulpContext is vport-sensitive. */
9477 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9478 rpi = iocbq->iocb.ulpContext;
9479
9480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9481 "3092 Port generated ABTS async event "
9482 "on vpi %d rpi %d status 0x%x\n",
9483 vpi, rpi, iocbq->iocb.ulpStatus);
9484
9485 vport = lpfc_find_vport_by_vpid(phba, vpi);
9486 if (!vport)
9487 goto err_exit;
9488 ndlp = lpfc_findnode_rpi(vport, rpi);
9489 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9490 goto err_exit;
9491
9492 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9493 lpfc_sli_abts_recover_port(vport, ndlp);
9494 return;
9495
9496 err_exit:
9497 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9498 "3095 Event Context not found, no "
9499 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9500 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9501 vpi, rpi);
9502}
9503
9504/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9505 * @phba: pointer to HBA context object.
9506 * @ndlp: nodelist pointer for the impacted rport.
9507 * @axri: pointer to the wcqe containing the failed exchange.
9508 *
9509 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9510 * port. The port generates this event when an abort exchange request to an
9511 * rport fails twice in succession with no reply. The abort could be originated
9512 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9513 */
9514void
9515lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9516 struct lpfc_nodelist *ndlp,
9517 struct sli4_wcqe_xri_aborted *axri)
9518{
9519 struct lpfc_vport *vport;
5c1db2ac 9520 uint32_t ext_status = 0;
cb69f7de 9521
6b5151fd 9522 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
9523 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9524 "3115 Node Context not found, driver "
9525 "ignoring abts err event\n");
6b5151fd
JS
9526 return;
9527 }
9528
cb69f7de
JS
9529 vport = ndlp->vport;
9530 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9531 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 9532 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 9533 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 9534 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
9535 bf_get(lpfc_wcqe_xa_status, axri),
9536 axri->parameter);
cb69f7de 9537
5c1db2ac
JS
9538 /*
9539 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9540 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9541 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9542 */
e3d2b802 9543 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
9544 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9545 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
9546 lpfc_sli_abts_recover_port(vport, ndlp);
9547}
9548
e59058c4 9549/**
3621a710 9550 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
9551 * @phba: Pointer to HBA context object.
9552 * @pring: Pointer to driver SLI ring object.
9553 * @iocbq: Pointer to iocb object.
9554 *
9555 * This function is called by the slow ring event handler
9556 * function when there is an ASYNC event iocb in the ring.
9557 * This function is called with no lock held.
9558 * Currently this function handles only temperature related
9559 * ASYNC events. The function decodes the temperature sensor
9560 * event message and posts events for the management applications.
9561 **/
98c9ea5c 9562static void
57127f15
JS
9563lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9564 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9565{
9566 IOCB_t *icmd;
9567 uint16_t evt_code;
57127f15
JS
9568 struct temp_event temp_event_data;
9569 struct Scsi_Host *shost;
a257bf90 9570 uint32_t *iocb_w;
57127f15
JS
9571
9572 icmd = &iocbq->iocb;
9573 evt_code = icmd->un.asyncstat.evt_code;
57127f15 9574
cb69f7de
JS
9575 switch (evt_code) {
9576 case ASYNC_TEMP_WARN:
9577 case ASYNC_TEMP_SAFE:
9578 temp_event_data.data = (uint32_t) icmd->ulpContext;
9579 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9580 if (evt_code == ASYNC_TEMP_WARN) {
9581 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9582 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9583 "0347 Adapter is very hot, please take "
9584 "corrective action. temperature : %d Celsius\n",
9585 (uint32_t) icmd->ulpContext);
9586 } else {
9587 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9588 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9589 "0340 Adapter temperature is OK now. "
9590 "temperature : %d Celsius\n",
9591 (uint32_t) icmd->ulpContext);
9592 }
9593
9594 /* Send temperature change event to applications */
9595 shost = lpfc_shost_from_vport(phba->pport);
9596 fc_host_post_vendor_event(shost, fc_get_event_number(),
9597 sizeof(temp_event_data), (char *) &temp_event_data,
9598 LPFC_NL_VENDOR_ID);
9599 break;
9600 case ASYNC_STATUS_CN:
9601 lpfc_sli_abts_err_handler(phba, iocbq);
9602 break;
9603 default:
a257bf90 9604 iocb_w = (uint32_t *) icmd;
cb69f7de 9605 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 9606 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 9607 " evt_code 0x%x\n"
a257bf90
JS
9608 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9609 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9610 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9611 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 9612 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
9613 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9614 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9615 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9616 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9617
cb69f7de 9618 break;
57127f15 9619 }
57127f15
JS
9620}
9621
9622
e59058c4 9623/**
895427bd 9624 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
9625 * @phba: Pointer to HBA context object.
9626 *
9627 * lpfc_sli_setup sets up rings of the SLI interface with
9628 * number of iocbs per ring and iotags. This function is
9629 * called while driver attach to the HBA and before the
9630 * interrupts are enabled. So there is no need for locking.
9631 *
9632 * This function always returns 0.
9633 **/
dea3101e 9634int
895427bd
JS
9635lpfc_sli4_setup(struct lpfc_hba *phba)
9636{
9637 struct lpfc_sli_ring *pring;
9638
9639 pring = phba->sli4_hba.els_wq->pring;
9640 pring->num_mask = LPFC_MAX_RING_MASK;
9641 pring->prt[0].profile = 0; /* Mask 0 */
9642 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9643 pring->prt[0].type = FC_TYPE_ELS;
9644 pring->prt[0].lpfc_sli_rcv_unsol_event =
9645 lpfc_els_unsol_event;
9646 pring->prt[1].profile = 0; /* Mask 1 */
9647 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9648 pring->prt[1].type = FC_TYPE_ELS;
9649 pring->prt[1].lpfc_sli_rcv_unsol_event =
9650 lpfc_els_unsol_event;
9651 pring->prt[2].profile = 0; /* Mask 2 */
9652 /* NameServer Inquiry */
9653 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9654 /* NameServer */
9655 pring->prt[2].type = FC_TYPE_CT;
9656 pring->prt[2].lpfc_sli_rcv_unsol_event =
9657 lpfc_ct_unsol_event;
9658 pring->prt[3].profile = 0; /* Mask 3 */
9659 /* NameServer response */
9660 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9661 /* NameServer */
9662 pring->prt[3].type = FC_TYPE_CT;
9663 pring->prt[3].lpfc_sli_rcv_unsol_event =
9664 lpfc_ct_unsol_event;
9665 return 0;
9666}
9667
9668/**
9669 * lpfc_sli_setup - SLI ring setup function
9670 * @phba: Pointer to HBA context object.
9671 *
9672 * lpfc_sli_setup sets up rings of the SLI interface with
9673 * number of iocbs per ring and iotags. This function is
9674 * called while driver attach to the HBA and before the
9675 * interrupts are enabled. So there is no need for locking.
9676 *
9677 * This function always returns 0. SLI3 only.
9678 **/
9679int
dea3101e 9680lpfc_sli_setup(struct lpfc_hba *phba)
9681{
ed957684 9682 int i, totiocbsize = 0;
dea3101e 9683 struct lpfc_sli *psli = &phba->sli;
9684 struct lpfc_sli_ring *pring;
9685
2a76a283 9686 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 9687 psli->sli_flag = 0;
dea3101e 9688
604a3e30
JB
9689 psli->iocbq_lookup = NULL;
9690 psli->iocbq_lookup_len = 0;
9691 psli->last_iotag = 0;
9692
dea3101e 9693 for (i = 0; i < psli->num_rings; i++) {
895427bd 9694 pring = &psli->sli3_ring[i];
dea3101e 9695 switch (i) {
9696 case LPFC_FCP_RING: /* ring 0 - FCP */
9697 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
9698 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9699 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9700 pring->sli.sli3.numCiocb +=
9701 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9702 pring->sli.sli3.numRiocb +=
9703 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9704 pring->sli.sli3.numCiocb +=
9705 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9706 pring->sli.sli3.numRiocb +=
9707 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9708 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9709 SLI3_IOCB_CMD_SIZE :
9710 SLI2_IOCB_CMD_SIZE;
7e56aa25 9711 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9712 SLI3_IOCB_RSP_SIZE :
9713 SLI2_IOCB_RSP_SIZE;
dea3101e 9714 pring->iotag_ctr = 0;
9715 pring->iotag_max =
92d7f7b0 9716 (phba->cfg_hba_queue_depth * 2);
dea3101e 9717 pring->fast_iotag = pring->iotag_max;
9718 pring->num_mask = 0;
9719 break;
a4bc3379 9720 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 9721 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
9722 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9723 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9724 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9725 SLI3_IOCB_CMD_SIZE :
9726 SLI2_IOCB_CMD_SIZE;
7e56aa25 9727 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9728 SLI3_IOCB_RSP_SIZE :
9729 SLI2_IOCB_RSP_SIZE;
2e0fef85 9730 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 9731 pring->num_mask = 0;
9732 break;
9733 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
9734 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
9735 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9736 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9737 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9738 SLI3_IOCB_CMD_SIZE :
9739 SLI2_IOCB_CMD_SIZE;
7e56aa25 9740 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
9741 SLI3_IOCB_RSP_SIZE :
9742 SLI2_IOCB_RSP_SIZE;
dea3101e 9743 pring->fast_iotag = 0;
9744 pring->iotag_ctr = 0;
9745 pring->iotag_max = 4096;
57127f15
JS
9746 pring->lpfc_sli_rcv_async_status =
9747 lpfc_sli_async_event_handler;
6669f9bb 9748 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 9749 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
9750 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9751 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 9752 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 9753 lpfc_els_unsol_event;
dea3101e 9754 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
9755 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9756 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 9757 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 9758 lpfc_els_unsol_event;
dea3101e 9759 pring->prt[2].profile = 0; /* Mask 2 */
9760 /* NameServer Inquiry */
6a9c52cf 9761 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 9762 /* NameServer */
6a9c52cf 9763 pring->prt[2].type = FC_TYPE_CT;
dea3101e 9764 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 9765 lpfc_ct_unsol_event;
dea3101e 9766 pring->prt[3].profile = 0; /* Mask 3 */
9767 /* NameServer response */
6a9c52cf 9768 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 9769 /* NameServer */
6a9c52cf 9770 pring->prt[3].type = FC_TYPE_CT;
dea3101e 9771 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 9772 lpfc_ct_unsol_event;
dea3101e 9773 break;
9774 }
7e56aa25
JS
9775 totiocbsize += (pring->sli.sli3.numCiocb *
9776 pring->sli.sli3.sizeCiocb) +
9777 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 9778 }
ed957684 9779 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 9780 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
9781 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9782 "SLI2 SLIM Data: x%x x%lx\n",
9783 phba->brd_no, totiocbsize,
9784 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 9785 }
cf5bf97e
JW
9786 if (phba->cfg_multi_ring_support == 2)
9787 lpfc_extra_ring_setup(phba);
dea3101e 9788
9789 return 0;
9790}
9791
e59058c4 9792/**
895427bd 9793 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
9794 * @phba: Pointer to HBA context object.
9795 *
895427bd 9796 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
9797 * ring. This function also initializes ring indices of each ring.
9798 * This function is called during the initialization of the SLI
9799 * interface of an HBA.
9800 * This function is called with no lock held and always returns
9801 * 1.
9802 **/
895427bd
JS
9803void
9804lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e 9805{
9806 struct lpfc_sli *psli;
9807 struct lpfc_sli_ring *pring;
604a3e30 9808 int i;
dea3101e 9809
9810 psli = &phba->sli;
2e0fef85 9811 spin_lock_irq(&phba->hbalock);
dea3101e 9812 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 9813 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 9814 /* Initialize list headers for txq and txcmplq as double linked lists */
895427bd
JS
9815 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9816 pring = phba->sli4_hba.fcp_wq[i]->pring;
68e814f5 9817 pring->flag = 0;
895427bd 9818 pring->ringno = LPFC_FCP_RING;
dea3101e 9819 INIT_LIST_HEAD(&pring->txq);
9820 INIT_LIST_HEAD(&pring->txcmplq);
9821 INIT_LIST_HEAD(&pring->iocb_continueq);
7e56aa25 9822 spin_lock_init(&pring->ring_lock);
dea3101e 9823 }
895427bd
JS
9824 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9825 pring = phba->sli4_hba.nvme_wq[i]->pring;
9826 pring->flag = 0;
9827 pring->ringno = LPFC_FCP_RING;
9828 INIT_LIST_HEAD(&pring->txq);
9829 INIT_LIST_HEAD(&pring->txcmplq);
9830 INIT_LIST_HEAD(&pring->iocb_continueq);
9831 spin_lock_init(&pring->ring_lock);
9832 }
9833 pring = phba->sli4_hba.els_wq->pring;
9834 pring->flag = 0;
9835 pring->ringno = LPFC_ELS_RING;
9836 INIT_LIST_HEAD(&pring->txq);
9837 INIT_LIST_HEAD(&pring->txcmplq);
9838 INIT_LIST_HEAD(&pring->iocb_continueq);
9839 spin_lock_init(&pring->ring_lock);
dea3101e 9840
895427bd
JS
9841 if (phba->cfg_nvme_io_channel) {
9842 pring = phba->sli4_hba.nvmels_wq->pring;
9843 pring->flag = 0;
9844 pring->ringno = LPFC_ELS_RING;
9845 INIT_LIST_HEAD(&pring->txq);
9846 INIT_LIST_HEAD(&pring->txcmplq);
9847 INIT_LIST_HEAD(&pring->iocb_continueq);
9848 spin_lock_init(&pring->ring_lock);
9849 }
9850
9851 if (phba->cfg_fof) {
9852 pring = phba->sli4_hba.oas_wq->pring;
9853 pring->flag = 0;
9854 pring->ringno = LPFC_FCP_RING;
9855 INIT_LIST_HEAD(&pring->txq);
9856 INIT_LIST_HEAD(&pring->txcmplq);
9857 INIT_LIST_HEAD(&pring->iocb_continueq);
9858 spin_lock_init(&pring->ring_lock);
9859 }
9860
9861 spin_unlock_irq(&phba->hbalock);
9862}
9863
9864/**
9865 * lpfc_sli_queue_init - Queue initialization function
9866 * @phba: Pointer to HBA context object.
9867 *
9868 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9869 * ring. This function also initializes ring indices of each ring.
9870 * This function is called during the initialization of the SLI
9871 * interface of an HBA.
9872 * This function is called with no lock held and always returns
9873 * 1.
9874 **/
9875void
9876lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e 9877{
9878 struct lpfc_sli *psli;
9879 struct lpfc_sli_ring *pring;
604a3e30 9880 int i;
dea3101e 9881
9882 psli = &phba->sli;
2e0fef85 9883 spin_lock_irq(&phba->hbalock);
dea3101e 9884 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 9885 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 9886 /* Initialize list headers for txq and txcmplq as double linked lists */
9887 for (i = 0; i < psli->num_rings; i++) {
895427bd 9888 pring = &psli->sli3_ring[i];
dea3101e 9889 pring->ringno = i;
7e56aa25
JS
9890 pring->sli.sli3.next_cmdidx = 0;
9891 pring->sli.sli3.local_getidx = 0;
9892 pring->sli.sli3.cmdidx = 0;
dea3101e 9893 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 9894 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 9895 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
9896 pring->flag = 0;
9897 INIT_LIST_HEAD(&pring->txq);
9898 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 9899 spin_lock_init(&pring->ring_lock);
dea3101e 9900 }
2e0fef85 9901 spin_unlock_irq(&phba->hbalock);
dea3101e 9902}
9903
04c68496
JS
9904/**
9905 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9906 * @phba: Pointer to HBA context object.
9907 *
9908 * This routine flushes the mailbox command subsystem. It will unconditionally
9909 * flush all the mailbox commands in the three possible stages in the mailbox
9910 * command sub-system: pending mailbox command queue; the outstanding mailbox
9911 * command; and completed mailbox command queue. It is caller's responsibility
9912 * to make sure that the driver is in the proper state to flush the mailbox
9913 * command sub-system. Namely, the posting of mailbox commands into the
9914 * pending mailbox command queue from the various clients must be stopped;
9915 * either the HBA is in a state that it will never works on the outstanding
9916 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9917 * mailbox command has been completed.
9918 **/
9919static void
9920lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9921{
9922 LIST_HEAD(completions);
9923 struct lpfc_sli *psli = &phba->sli;
9924 LPFC_MBOXQ_t *pmb;
9925 unsigned long iflag;
9926
9927 /* Flush all the mailbox commands in the mbox system */
9928 spin_lock_irqsave(&phba->hbalock, iflag);
9929 /* The pending mailbox command queue */
9930 list_splice_init(&phba->sli.mboxq, &completions);
9931 /* The outstanding active mailbox command */
9932 if (psli->mbox_active) {
9933 list_add_tail(&psli->mbox_active->list, &completions);
9934 psli->mbox_active = NULL;
9935 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9936 }
9937 /* The completed mailbox command queue */
9938 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9939 spin_unlock_irqrestore(&phba->hbalock, iflag);
9940
9941 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9942 while (!list_empty(&completions)) {
9943 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9944 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9945 if (pmb->mbox_cmpl)
9946 pmb->mbox_cmpl(phba, pmb);
9947 }
9948}
9949
e59058c4 9950/**
3621a710 9951 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
9952 * @vport: Pointer to virtual port object.
9953 *
9954 * lpfc_sli_host_down is called to clean up the resources
9955 * associated with a vport before destroying virtual
9956 * port data structures.
9957 * This function does following operations:
9958 * - Free discovery resources associated with this virtual
9959 * port.
9960 * - Free iocbs associated with this virtual port in
9961 * the txq.
9962 * - Send abort for all iocb commands associated with this
9963 * vport in txcmplq.
9964 *
9965 * This function is called with no lock held and always returns 1.
9966 **/
92d7f7b0
JS
9967int
9968lpfc_sli_host_down(struct lpfc_vport *vport)
9969{
858c9f6c 9970 LIST_HEAD(completions);
92d7f7b0
JS
9971 struct lpfc_hba *phba = vport->phba;
9972 struct lpfc_sli *psli = &phba->sli;
895427bd 9973 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
9974 struct lpfc_sli_ring *pring;
9975 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
9976 int i;
9977 unsigned long flags = 0;
9978 uint16_t prev_pring_flag;
9979
9980 lpfc_cleanup_discovery_resources(vport);
9981
9982 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 9983
895427bd
JS
9984 /*
9985 * Error everything on the txq since these iocbs
9986 * have not been given to the FW yet.
9987 * Also issue ABTS for everything on the txcmplq
9988 */
9989 if (phba->sli_rev != LPFC_SLI_REV4) {
9990 for (i = 0; i < psli->num_rings; i++) {
9991 pring = &psli->sli3_ring[i];
9992 prev_pring_flag = pring->flag;
9993 /* Only slow rings */
9994 if (pring->ringno == LPFC_ELS_RING) {
9995 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9996 /* Set the lpfc data pending flag */
9997 set_bit(LPFC_DATA_READY, &phba->data_flags);
9998 }
9999 list_for_each_entry_safe(iocb, next_iocb,
10000 &pring->txq, list) {
10001 if (iocb->vport != vport)
10002 continue;
10003 list_move_tail(&iocb->list, &completions);
10004 }
10005 list_for_each_entry_safe(iocb, next_iocb,
10006 &pring->txcmplq, list) {
10007 if (iocb->vport != vport)
10008 continue;
10009 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10010 }
10011 pring->flag = prev_pring_flag;
10012 }
10013 } else {
10014 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10015 pring = qp->pring;
10016 if (!pring)
92d7f7b0 10017 continue;
895427bd
JS
10018 if (pring == phba->sli4_hba.els_wq->pring) {
10019 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10020 /* Set the lpfc data pending flag */
10021 set_bit(LPFC_DATA_READY, &phba->data_flags);
10022 }
10023 prev_pring_flag = pring->flag;
10024 spin_lock_irq(&pring->ring_lock);
10025 list_for_each_entry_safe(iocb, next_iocb,
10026 &pring->txq, list) {
10027 if (iocb->vport != vport)
10028 continue;
10029 list_move_tail(&iocb->list, &completions);
10030 }
10031 spin_unlock_irq(&pring->ring_lock);
10032 list_for_each_entry_safe(iocb, next_iocb,
10033 &pring->txcmplq, list) {
10034 if (iocb->vport != vport)
10035 continue;
10036 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10037 }
10038 pring->flag = prev_pring_flag;
92d7f7b0 10039 }
92d7f7b0 10040 }
92d7f7b0
JS
10041 spin_unlock_irqrestore(&phba->hbalock, flags);
10042
a257bf90
JS
10043 /* Cancel all the IOCBs from the completions list */
10044 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10045 IOERR_SLI_DOWN);
92d7f7b0
JS
10046 return 1;
10047}
10048
e59058c4 10049/**
3621a710 10050 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10051 * @phba: Pointer to HBA context object.
10052 *
10053 * This function cleans up all iocb, buffers, mailbox commands
10054 * while shutting down the HBA. This function is called with no
10055 * lock held and always returns 1.
10056 * This function does the following to cleanup driver resources:
10057 * - Free discovery resources for each virtual port
10058 * - Cleanup any pending fabric iocbs
10059 * - Iterate through the iocb txq and free each entry
10060 * in the list.
10061 * - Free up any buffer posted to the HBA
10062 * - Free mailbox commands in the mailbox queue.
10063 **/
dea3101e 10064int
2e0fef85 10065lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10066{
2534ba75 10067 LIST_HEAD(completions);
2e0fef85 10068 struct lpfc_sli *psli = &phba->sli;
895427bd 10069 struct lpfc_queue *qp = NULL;
dea3101e 10070 struct lpfc_sli_ring *pring;
0ff10d46 10071 struct lpfc_dmabuf *buf_ptr;
dea3101e 10072 unsigned long flags = 0;
04c68496
JS
10073 int i;
10074
10075 /* Shutdown the mailbox command sub-system */
618a5230 10076 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10077
dea3101e 10078 lpfc_hba_down_prep(phba);
10079
92d7f7b0
JS
10080 lpfc_fabric_abort_hba(phba);
10081
2e0fef85 10082 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10083
895427bd
JS
10084 /*
10085 * Error everything on the txq since these iocbs
10086 * have not been given to the FW yet.
10087 */
10088 if (phba->sli_rev != LPFC_SLI_REV4) {
10089 for (i = 0; i < psli->num_rings; i++) {
10090 pring = &psli->sli3_ring[i];
10091 /* Only slow rings */
10092 if (pring->ringno == LPFC_ELS_RING) {
10093 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10094 /* Set the lpfc data pending flag */
10095 set_bit(LPFC_DATA_READY, &phba->data_flags);
10096 }
10097 list_splice_init(&pring->txq, &completions);
10098 }
10099 } else {
10100 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10101 pring = qp->pring;
10102 if (!pring)
10103 continue;
10104 spin_lock_irq(&pring->ring_lock);
10105 list_splice_init(&pring->txq, &completions);
10106 spin_unlock_irq(&pring->ring_lock);
10107 if (pring == phba->sli4_hba.els_wq->pring) {
10108 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10109 /* Set the lpfc data pending flag */
10110 set_bit(LPFC_DATA_READY, &phba->data_flags);
10111 }
10112 }
2534ba75 10113 }
2e0fef85 10114 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10115
a257bf90
JS
10116 /* Cancel all the IOCBs from the completions list */
10117 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10118 IOERR_SLI_DOWN);
dea3101e 10119
0ff10d46
JS
10120 spin_lock_irqsave(&phba->hbalock, flags);
10121 list_splice_init(&phba->elsbuf, &completions);
10122 phba->elsbuf_cnt = 0;
10123 phba->elsbuf_prev_cnt = 0;
10124 spin_unlock_irqrestore(&phba->hbalock, flags);
10125
10126 while (!list_empty(&completions)) {
10127 list_remove_head(&completions, buf_ptr,
10128 struct lpfc_dmabuf, list);
10129 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10130 kfree(buf_ptr);
10131 }
10132
dea3101e 10133 /* Return any active mbox cmds */
10134 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10135
da0436e9 10136 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10137 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10138 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10139
da0436e9
JS
10140 return 1;
10141}
10142
e59058c4 10143/**
3621a710 10144 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10145 * @srcp: Source memory pointer.
10146 * @destp: Destination memory pointer.
10147 * @cnt: Number of words required to be copied.
10148 *
10149 * This function is used for copying data between driver memory
10150 * and the SLI memory. This function also changes the endianness
10151 * of each word if native endianness is different from SLI
10152 * endianness. This function can be called with or without
10153 * lock.
10154 **/
dea3101e 10155void
10156lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10157{
10158 uint32_t *src = srcp;
10159 uint32_t *dest = destp;
10160 uint32_t ldata;
10161 int i;
10162
10163 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10164 ldata = *src;
10165 ldata = le32_to_cpu(ldata);
10166 *dest = ldata;
10167 src++;
10168 dest++;
10169 }
10170}
10171
e59058c4 10172
a0c87cbd
JS
10173/**
10174 * lpfc_sli_bemem_bcopy - SLI memory copy function
10175 * @srcp: Source memory pointer.
10176 * @destp: Destination memory pointer.
10177 * @cnt: Number of words required to be copied.
10178 *
10179 * This function is used for copying data between a data structure
10180 * with big endian representation to local endianness.
10181 * This function can be called with or without lock.
10182 **/
10183void
10184lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10185{
10186 uint32_t *src = srcp;
10187 uint32_t *dest = destp;
10188 uint32_t ldata;
10189 int i;
10190
10191 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10192 ldata = *src;
10193 ldata = be32_to_cpu(ldata);
10194 *dest = ldata;
10195 src++;
10196 dest++;
10197 }
10198}
10199
e59058c4 10200/**
3621a710 10201 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
10202 * @phba: Pointer to HBA context object.
10203 * @pring: Pointer to driver SLI ring object.
10204 * @mp: Pointer to driver buffer object.
10205 *
10206 * This function is called with no lock held.
10207 * It always return zero after adding the buffer to the postbufq
10208 * buffer list.
10209 **/
dea3101e 10210int
2e0fef85
JS
10211lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10212 struct lpfc_dmabuf *mp)
dea3101e 10213{
10214 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10215 later */
2e0fef85 10216 spin_lock_irq(&phba->hbalock);
dea3101e 10217 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 10218 pring->postbufq_cnt++;
2e0fef85 10219 spin_unlock_irq(&phba->hbalock);
dea3101e 10220 return 0;
10221}
10222
e59058c4 10223/**
3621a710 10224 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
10225 * @phba: Pointer to HBA context object.
10226 *
10227 * When HBQ is enabled, buffers are searched based on tags. This function
10228 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10229 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10230 * does not conflict with tags of buffer posted for unsolicited events.
10231 * The function returns the allocated tag. The function is called with
10232 * no locks held.
10233 **/
76bb24ef
JS
10234uint32_t
10235lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10236{
10237 spin_lock_irq(&phba->hbalock);
10238 phba->buffer_tag_count++;
10239 /*
10240 * Always set the QUE_BUFTAG_BIT to distiguish between
10241 * a tag assigned by HBQ.
10242 */
10243 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10244 spin_unlock_irq(&phba->hbalock);
10245 return phba->buffer_tag_count;
10246}
10247
e59058c4 10248/**
3621a710 10249 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
10250 * @phba: Pointer to HBA context object.
10251 * @pring: Pointer to driver SLI ring object.
10252 * @tag: Buffer tag.
10253 *
10254 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10255 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10256 * iocb is posted to the response ring with the tag of the buffer.
10257 * This function searches the pring->postbufq list using the tag
10258 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10259 * iocb. If the buffer is found then lpfc_dmabuf object of the
10260 * buffer is returned to the caller else NULL is returned.
10261 * This function is called with no lock held.
10262 **/
76bb24ef
JS
10263struct lpfc_dmabuf *
10264lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10265 uint32_t tag)
10266{
10267 struct lpfc_dmabuf *mp, *next_mp;
10268 struct list_head *slp = &pring->postbufq;
10269
25985edc 10270 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
10271 spin_lock_irq(&phba->hbalock);
10272 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10273 if (mp->buffer_tag == tag) {
10274 list_del_init(&mp->list);
10275 pring->postbufq_cnt--;
10276 spin_unlock_irq(&phba->hbalock);
10277 return mp;
10278 }
10279 }
10280
10281 spin_unlock_irq(&phba->hbalock);
10282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 10283 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
10284 "ring %d Data x%lx x%p x%p x%x\n",
10285 pring->ringno, (unsigned long) tag,
10286 slp->next, slp->prev, pring->postbufq_cnt);
10287
10288 return NULL;
10289}
dea3101e 10290
e59058c4 10291/**
3621a710 10292 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
10293 * @phba: Pointer to HBA context object.
10294 * @pring: Pointer to driver SLI ring object.
10295 * @phys: DMA address of the buffer.
10296 *
10297 * This function searches the buffer list using the dma_address
10298 * of unsolicited event to find the driver's lpfc_dmabuf object
10299 * corresponding to the dma_address. The function returns the
10300 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10301 * This function is called by the ct and els unsolicited event
10302 * handlers to get the buffer associated with the unsolicited
10303 * event.
10304 *
10305 * This function is called with no lock held.
10306 **/
dea3101e 10307struct lpfc_dmabuf *
10308lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10309 dma_addr_t phys)
10310{
10311 struct lpfc_dmabuf *mp, *next_mp;
10312 struct list_head *slp = &pring->postbufq;
10313
25985edc 10314 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 10315 spin_lock_irq(&phba->hbalock);
dea3101e 10316 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10317 if (mp->phys == phys) {
10318 list_del_init(&mp->list);
10319 pring->postbufq_cnt--;
2e0fef85 10320 spin_unlock_irq(&phba->hbalock);
dea3101e 10321 return mp;
10322 }
10323 }
10324
2e0fef85 10325 spin_unlock_irq(&phba->hbalock);
dea3101e 10326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 10327 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 10328 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 10329 pring->ringno, (unsigned long long)phys,
dea3101e 10330 slp->next, slp->prev, pring->postbufq_cnt);
10331 return NULL;
10332}
10333
e59058c4 10334/**
3621a710 10335 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
10336 * @phba: Pointer to HBA context object.
10337 * @cmdiocb: Pointer to driver command iocb object.
10338 * @rspiocb: Pointer to driver response iocb object.
10339 *
10340 * This function is the completion handler for the abort iocbs for
10341 * ELS commands. This function is called from the ELS ring event
10342 * handler with no lock held. This function frees memory resources
10343 * associated with the abort iocb.
10344 **/
dea3101e 10345static void
2e0fef85
JS
10346lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10347 struct lpfc_iocbq *rspiocb)
dea3101e 10348{
2e0fef85 10349 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 10350 uint16_t abort_iotag, abort_context;
ff78d8f9 10351 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
10352
10353 if (irsp->ulpStatus) {
ff78d8f9
JS
10354
10355 /*
10356 * Assume that the port already completed and returned, or
10357 * will return the iocb. Just Log the message.
10358 */
2680eeaa
JS
10359 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10360 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10361
2e0fef85 10362 spin_lock_irq(&phba->hbalock);
45ed1190
JS
10363 if (phba->sli_rev < LPFC_SLI_REV4) {
10364 if (abort_iotag != 0 &&
10365 abort_iotag <= phba->sli.last_iotag)
10366 abort_iocb =
10367 phba->sli.iocbq_lookup[abort_iotag];
10368 } else
10369 /* For sli4 the abort_tag is the XRI,
10370 * so the abort routine puts the iotag of the iocb
10371 * being aborted in the context field of the abort
10372 * IOCB.
10373 */
10374 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 10375
2a9bf3d0
JS
10376 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10377 "0327 Cannot abort els iocb %p "
10378 "with tag %x context %x, abort status %x, "
10379 "abort code %x\n",
10380 abort_iocb, abort_iotag, abort_context,
10381 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 10382
ff78d8f9 10383 spin_unlock_irq(&phba->hbalock);
2680eeaa 10384 }
604a3e30 10385 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 10386 return;
10387}
10388
e59058c4 10389/**
3621a710 10390 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
10391 * @phba: Pointer to HBA context object.
10392 * @cmdiocb: Pointer to driver command iocb object.
10393 * @rspiocb: Pointer to driver response iocb object.
10394 *
10395 * The function is called from SLI ring event handler with no
10396 * lock held. This function is the completion handler for ELS commands
10397 * which are aborted. The function frees memory resources used for
10398 * the aborted ELS commands.
10399 **/
92d7f7b0
JS
10400static void
10401lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10402 struct lpfc_iocbq *rspiocb)
10403{
10404 IOCB_t *irsp = &rspiocb->iocb;
10405
10406 /* ELS cmd tag <ulpIoTag> completes */
10407 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 10408 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 10409 "x%x x%x x%x\n",
e8b62011 10410 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 10411 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
10412 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10413 lpfc_ct_free_iocb(phba, cmdiocb);
10414 else
10415 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
10416 return;
10417}
10418
e59058c4 10419/**
5af5eee7 10420 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
10421 * @phba: Pointer to HBA context object.
10422 * @pring: Pointer to driver SLI ring object.
10423 * @cmdiocb: Pointer to driver command iocb object.
10424 *
5af5eee7
JS
10425 * This function issues an abort iocb for the provided command iocb down to
10426 * the port. Other than the case the outstanding command iocb is an abort
10427 * request, this function issues abort out unconditionally. This function is
10428 * called with hbalock held. The function returns 0 when it fails due to
10429 * memory allocation failure or when the command iocb is an abort request.
e59058c4 10430 **/
5af5eee7
JS
10431static int
10432lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 10433 struct lpfc_iocbq *cmdiocb)
dea3101e 10434{
2e0fef85 10435 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 10436 struct lpfc_iocbq *abtsiocbp;
dea3101e 10437 IOCB_t *icmd = NULL;
10438 IOCB_t *iabt = NULL;
5af5eee7 10439 int retval;
7e56aa25 10440 unsigned long iflags;
07951076 10441
1c2ba475
JT
10442 lockdep_assert_held(&phba->hbalock);
10443
92d7f7b0
JS
10444 /*
10445 * There are certain command types we don't want to abort. And we
10446 * don't want to abort commands that are already in the process of
10447 * being aborted.
07951076
JS
10448 */
10449 icmd = &cmdiocb->iocb;
2e0fef85 10450 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
10451 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10452 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
10453 return 0;
10454
dea3101e 10455 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 10456 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 10457 if (abtsiocbp == NULL)
10458 return 0;
dea3101e 10459
07951076 10460 /* This signals the response to set the correct status
341af102 10461 * before calling the completion handler
07951076
JS
10462 */
10463 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10464
dea3101e 10465 iabt = &abtsiocbp->iocb;
07951076
JS
10466 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10467 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 10468 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 10469 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
10470 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10471 }
da0436e9
JS
10472 else
10473 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
10474 iabt->ulpLe = 1;
10475 iabt->ulpClass = icmd->ulpClass;
dea3101e 10476
5ffc266e 10477 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 10478 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
10479 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10480 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
10481 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10482 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 10483
2e0fef85 10484 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
10485 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10486 else
10487 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 10488
07951076 10489 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 10490 abtsiocbp->vport = vport;
5b8bd0c9 10491
e8b62011
JS
10492 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10493 "0339 Abort xri x%x, original iotag x%x, "
10494 "abort cmd iotag x%x\n",
2a9bf3d0 10495 iabt->un.acxri.abortIoTag,
e8b62011 10496 iabt->un.acxri.abortContextTag,
2a9bf3d0 10497 abtsiocbp->iotag);
7e56aa25
JS
10498
10499 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
10500 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10501 if (unlikely(pring == NULL))
9bd2bff5 10502 return 0;
7e56aa25
JS
10503 /* Note: both hbalock and ring_lock need to be set here */
10504 spin_lock_irqsave(&pring->ring_lock, iflags);
10505 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10506 abtsiocbp, 0);
10507 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10508 } else {
10509 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10510 abtsiocbp, 0);
10511 }
dea3101e 10512
d7c255b2
JS
10513 if (retval)
10514 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
10515
10516 /*
10517 * Caller to this routine should check for IOCB_ERROR
10518 * and handle it properly. This routine no longer removes
10519 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10520 */
10521 return retval;
10522}
10523
10524/**
10525 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10526 * @phba: Pointer to HBA context object.
10527 * @pring: Pointer to driver SLI ring object.
10528 * @cmdiocb: Pointer to driver command iocb object.
10529 *
10530 * This function issues an abort iocb for the provided command iocb. In case
10531 * of unloading, the abort iocb will not be issued to commands on the ELS
10532 * ring. Instead, the callback function shall be changed to those commands
10533 * so that nothing happens when them finishes. This function is called with
10534 * hbalock held. The function returns 0 when the command iocb is an abort
10535 * request.
10536 **/
10537int
10538lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10539 struct lpfc_iocbq *cmdiocb)
10540{
10541 struct lpfc_vport *vport = cmdiocb->vport;
10542 int retval = IOCB_ERROR;
10543 IOCB_t *icmd = NULL;
10544
1c2ba475
JT
10545 lockdep_assert_held(&phba->hbalock);
10546
5af5eee7
JS
10547 /*
10548 * There are certain command types we don't want to abort. And we
10549 * don't want to abort commands that are already in the process of
10550 * being aborted.
10551 */
10552 icmd = &cmdiocb->iocb;
10553 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10554 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10555 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10556 return 0;
10557
10558 /*
10559 * If we're unloading, don't abort iocb on the ELS ring, but change
10560 * the callback so that nothing happens when it finishes.
10561 */
10562 if ((vport->load_flag & FC_UNLOADING) &&
10563 (pring->ringno == LPFC_ELS_RING)) {
10564 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10565 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10566 else
10567 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10568 goto abort_iotag_exit;
10569 }
10570
10571 /* Now, we try to issue the abort to the cmdiocb out */
10572 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10573
07951076 10574abort_iotag_exit:
2e0fef85
JS
10575 /*
10576 * Caller to this routine should check for IOCB_ERROR
10577 * and handle it properly. This routine no longer removes
10578 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 10579 */
2e0fef85 10580 return retval;
dea3101e 10581}
10582
895427bd
JS
10583/**
10584 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10585 * @phba: Pointer to HBA context object.
10586 * @pring: Pointer to driver SLI ring object.
10587 * @cmdiocb: Pointer to driver command iocb object.
10588 *
10589 * This function issues an abort iocb for the provided command iocb down to
10590 * the port. Other than the case the outstanding command iocb is an abort
10591 * request, this function issues abort out unconditionally. This function is
10592 * called with hbalock held. The function returns 0 when it fails due to
10593 * memory allocation failure or when the command iocb is an abort request.
10594 **/
10595static int
10596lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10597 struct lpfc_iocbq *cmdiocb)
10598{
10599 struct lpfc_vport *vport = cmdiocb->vport;
10600 struct lpfc_iocbq *abtsiocbp;
10601 union lpfc_wqe *abts_wqe;
10602 int retval;
10603
10604 /*
10605 * There are certain command types we don't want to abort. And we
10606 * don't want to abort commands that are already in the process of
10607 * being aborted.
10608 */
10609 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10610 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10611 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10612 return 0;
10613
10614 /* issue ABTS for this io based on iotag */
10615 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10616 if (abtsiocbp == NULL)
10617 return 0;
10618
10619 /* This signals the response to set the correct status
10620 * before calling the completion handler
10621 */
10622 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10623
10624 /* Complete prepping the abort wqe and issue to the FW. */
10625 abts_wqe = &abtsiocbp->wqe;
10626 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10627 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10628
10629 /* Explicitly set reserved fields to zero.*/
10630 abts_wqe->abort_cmd.rsrvd4 = 0;
10631 abts_wqe->abort_cmd.rsrvd5 = 0;
10632
10633 /* WQE Common - word 6. Context is XRI tag. Set 0. */
10634 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10635 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10636
10637 /* word 7 */
10638 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10639 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10640 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10641 cmdiocb->iocb.ulpClass);
10642
10643 /* word 8 - tell the FW to abort the IO associated with this
10644 * outstanding exchange ID.
10645 */
10646 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10647
10648 /* word 9 - this is the iotag for the abts_wqe completion. */
10649 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10650 abtsiocbp->iotag);
10651
10652 /* word 10 */
10653 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10654 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10655 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10656
10657 /* word 11 */
10658 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10659 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10660 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10661
10662 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10663 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10664 abtsiocbp->vport = vport;
01649561 10665 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
895427bd
JS
10666 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10667 if (retval == IOCB_ERROR) {
10668 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10669 "6147 Failed abts issue_wqe with status x%x "
10670 "for oxid x%x\n",
10671 retval, cmdiocb->sli4_xritag);
10672 lpfc_sli_release_iocbq(phba, abtsiocbp);
10673 return retval;
10674 }
10675
10676 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10677 "6148 Drv Abort NVME Request Issued for "
10678 "ox_id x%x on reqtag x%x\n",
10679 cmdiocb->sli4_xritag,
10680 abtsiocbp->iotag);
10681
10682 return retval;
10683}
10684
5af5eee7
JS
10685/**
10686 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10687 * @phba: pointer to lpfc HBA data structure.
10688 *
10689 * This routine will abort all pending and outstanding iocbs to an HBA.
10690 **/
10691void
10692lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10693{
10694 struct lpfc_sli *psli = &phba->sli;
10695 struct lpfc_sli_ring *pring;
895427bd 10696 struct lpfc_queue *qp = NULL;
5af5eee7
JS
10697 int i;
10698
895427bd
JS
10699 if (phba->sli_rev != LPFC_SLI_REV4) {
10700 for (i = 0; i < psli->num_rings; i++) {
10701 pring = &psli->sli3_ring[i];
10702 lpfc_sli_abort_iocb_ring(phba, pring);
10703 }
10704 return;
10705 }
10706 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10707 pring = qp->pring;
10708 if (!pring)
10709 continue;
db55fba8 10710 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
10711 }
10712}
10713
e59058c4 10714/**
3621a710 10715 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
10716 * @iocbq: Pointer to driver iocb object.
10717 * @vport: Pointer to driver virtual port object.
10718 * @tgt_id: SCSI ID of the target.
10719 * @lun_id: LUN ID of the scsi device.
10720 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10721 *
3621a710 10722 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
10723 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10724 * 0 if the filtering criteria is met for the given iocb and will return
10725 * 1 if the filtering criteria is not met.
10726 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10727 * given iocb is for the SCSI device specified by vport, tgt_id and
10728 * lun_id parameter.
10729 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
10730 * given iocb is for the SCSI target specified by vport and tgt_id
10731 * parameters.
10732 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10733 * given iocb is for the SCSI host associated with the given vport.
10734 * This function is called with no locks held.
10735 **/
dea3101e 10736static int
51ef4c26
JS
10737lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10738 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 10739 lpfc_ctx_cmd ctx_cmd)
dea3101e 10740{
0bd4ca25 10741 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 10742 int rc = 1;
10743
0bd4ca25
JSEC
10744 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10745 return rc;
10746
51ef4c26
JS
10747 if (iocbq->vport != vport)
10748 return rc;
10749
0bd4ca25 10750 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 10751
495a714c 10752 if (lpfc_cmd->pCmd == NULL)
dea3101e 10753 return rc;
10754
10755 switch (ctx_cmd) {
10756 case LPFC_CTX_LUN:
495a714c
JS
10757 if ((lpfc_cmd->rdata->pnode) &&
10758 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10759 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 10760 rc = 0;
10761 break;
10762 case LPFC_CTX_TGT:
495a714c
JS
10763 if ((lpfc_cmd->rdata->pnode) &&
10764 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 10765 rc = 0;
10766 break;
dea3101e 10767 case LPFC_CTX_HOST:
10768 rc = 0;
10769 break;
10770 default:
10771 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 10772 __func__, ctx_cmd);
dea3101e 10773 break;
10774 }
10775
10776 return rc;
10777}
10778
e59058c4 10779/**
3621a710 10780 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
10781 * @vport: Pointer to virtual port.
10782 * @tgt_id: SCSI ID of the target.
10783 * @lun_id: LUN ID of the scsi device.
10784 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10785 *
10786 * This function returns number of FCP commands pending for the vport.
10787 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10788 * commands pending on the vport associated with SCSI device specified
10789 * by tgt_id and lun_id parameters.
10790 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10791 * commands pending on the vport associated with SCSI target specified
10792 * by tgt_id parameter.
10793 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10794 * commands pending on the vport.
10795 * This function returns the number of iocbs which satisfy the filter.
10796 * This function is called without any lock held.
10797 **/
dea3101e 10798int
51ef4c26
JS
10799lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10800 lpfc_ctx_cmd ctx_cmd)
dea3101e 10801{
51ef4c26 10802 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
10803 struct lpfc_iocbq *iocbq;
10804 int sum, i;
dea3101e 10805
31979008 10806 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
10807 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10808 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 10809
51ef4c26
JS
10810 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10811 ctx_cmd) == 0)
0bd4ca25 10812 sum++;
dea3101e 10813 }
31979008 10814 spin_unlock_irq(&phba->hbalock);
0bd4ca25 10815
dea3101e 10816 return sum;
10817}
10818
e59058c4 10819/**
3621a710 10820 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
10821 * @phba: Pointer to HBA context object
10822 * @cmdiocb: Pointer to command iocb object.
10823 * @rspiocb: Pointer to response iocb object.
10824 *
10825 * This function is called when an aborted FCP iocb completes. This
10826 * function is called by the ring event handler with no lock held.
10827 * This function frees the iocb.
10828 **/
5eb95af0 10829void
2e0fef85
JS
10830lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10831 struct lpfc_iocbq *rspiocb)
5eb95af0 10832{
cb69f7de 10833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 10834 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
10835 "original iotag x%x, abort cmd iotag x%x "
10836 "status 0x%x, reason 0x%x\n",
10837 cmdiocb->iocb.un.acxri.abortContextTag,
10838 cmdiocb->iocb.un.acxri.abortIoTag,
10839 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10840 rspiocb->iocb.un.ulpWord[4]);
604a3e30 10841 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
10842 return;
10843}
10844
e59058c4 10845/**
3621a710 10846 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
10847 * @vport: Pointer to virtual port.
10848 * @pring: Pointer to driver SLI ring object.
10849 * @tgt_id: SCSI ID of the target.
10850 * @lun_id: LUN ID of the scsi device.
10851 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10852 *
10853 * This function sends an abort command for every SCSI command
10854 * associated with the given virtual port pending on the ring
10855 * filtered by lpfc_sli_validate_fcp_iocb function.
10856 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10857 * FCP iocbs associated with lun specified by tgt_id and lun_id
10858 * parameters
10859 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10860 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10861 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10862 * FCP iocbs associated with virtual port.
10863 * This function returns number of iocbs it failed to abort.
10864 * This function is called with no locks held.
10865 **/
dea3101e 10866int
51ef4c26
JS
10867lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10868 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 10869{
51ef4c26 10870 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
10871 struct lpfc_iocbq *iocbq;
10872 struct lpfc_iocbq *abtsiocb;
dea3101e 10873 IOCB_t *cmd = NULL;
dea3101e 10874 int errcnt = 0, ret_val = 0;
0bd4ca25 10875 int i;
dea3101e 10876
0bd4ca25
JSEC
10877 for (i = 1; i <= phba->sli.last_iotag; i++) {
10878 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 10879
51ef4c26 10880 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 10881 abort_cmd) != 0)
dea3101e 10882 continue;
10883
afbd8d88
JS
10884 /*
10885 * If the iocbq is already being aborted, don't take a second
10886 * action, but do count it.
10887 */
10888 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10889 continue;
10890
dea3101e 10891 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 10892 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 10893 if (abtsiocb == NULL) {
10894 errcnt++;
10895 continue;
10896 }
dea3101e 10897
afbd8d88
JS
10898 /* indicate the IO is being aborted by the driver. */
10899 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10900
0bd4ca25 10901 cmd = &iocbq->iocb;
dea3101e 10902 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10903 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
10904 if (phba->sli_rev == LPFC_SLI_REV4)
10905 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10906 else
10907 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 10908 abtsiocb->iocb.ulpLe = 1;
10909 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 10910 abtsiocb->vport = vport;
dea3101e 10911
5ffc266e 10912 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 10913 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
10914 if (iocbq->iocb_flag & LPFC_IO_FCP)
10915 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
10916 if (iocbq->iocb_flag & LPFC_IO_FOF)
10917 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 10918
2e0fef85 10919 if (lpfc_is_link_up(phba))
dea3101e 10920 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10921 else
10922 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10923
5eb95af0
JSEC
10924 /* Setup callback routine and issue the command. */
10925 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
10926 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10927 abtsiocb, 0);
dea3101e 10928 if (ret_val == IOCB_ERROR) {
604a3e30 10929 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 10930 errcnt++;
10931 continue;
10932 }
10933 }
10934
10935 return errcnt;
10936}
10937
98912dda
JS
10938/**
10939 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10940 * @vport: Pointer to virtual port.
10941 * @pring: Pointer to driver SLI ring object.
10942 * @tgt_id: SCSI ID of the target.
10943 * @lun_id: LUN ID of the scsi device.
10944 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10945 *
10946 * This function sends an abort command for every SCSI command
10947 * associated with the given virtual port pending on the ring
10948 * filtered by lpfc_sli_validate_fcp_iocb function.
10949 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10950 * FCP iocbs associated with lun specified by tgt_id and lun_id
10951 * parameters
10952 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10953 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10954 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10955 * FCP iocbs associated with virtual port.
10956 * This function returns number of iocbs it aborted .
10957 * This function is called with no locks held right after a taskmgmt
10958 * command is sent.
10959 **/
10960int
10961lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10962 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10963{
10964 struct lpfc_hba *phba = vport->phba;
8c50d25c 10965 struct lpfc_scsi_buf *lpfc_cmd;
98912dda 10966 struct lpfc_iocbq *abtsiocbq;
8c50d25c 10967 struct lpfc_nodelist *ndlp;
98912dda
JS
10968 struct lpfc_iocbq *iocbq;
10969 IOCB_t *icmd;
10970 int sum, i, ret_val;
10971 unsigned long iflags;
10972 struct lpfc_sli_ring *pring_s4;
98912dda
JS
10973
10974 spin_lock_irq(&phba->hbalock);
10975
10976 /* all I/Os are in process of being flushed */
10977 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10978 spin_unlock_irq(&phba->hbalock);
10979 return 0;
10980 }
10981 sum = 0;
10982
10983 for (i = 1; i <= phba->sli.last_iotag; i++) {
10984 iocbq = phba->sli.iocbq_lookup[i];
10985
10986 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10987 cmd) != 0)
10988 continue;
10989
10990 /*
10991 * If the iocbq is already being aborted, don't take a second
10992 * action, but do count it.
10993 */
10994 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10995 continue;
10996
10997 /* issue ABTS for this IOCB based on iotag */
10998 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10999 if (abtsiocbq == NULL)
11000 continue;
11001
11002 icmd = &iocbq->iocb;
11003 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11004 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11005 if (phba->sli_rev == LPFC_SLI_REV4)
11006 abtsiocbq->iocb.un.acxri.abortIoTag =
11007 iocbq->sli4_xritag;
11008 else
11009 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11010 abtsiocbq->iocb.ulpLe = 1;
11011 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11012 abtsiocbq->vport = vport;
11013
11014 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11015 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11016 if (iocbq->iocb_flag & LPFC_IO_FCP)
11017 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11018 if (iocbq->iocb_flag & LPFC_IO_FOF)
11019 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11020
8c50d25c
JS
11021 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11022 ndlp = lpfc_cmd->rdata->pnode;
11023
11024 if (lpfc_is_link_up(phba) &&
11025 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11026 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11027 else
11028 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11029
11030 /* Setup callback routine and issue the command. */
11031 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11032
11033 /*
11034 * Indicate the IO is being aborted by the driver and set
11035 * the caller's flag into the aborted IO.
11036 */
11037 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11038
11039 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11040 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11041 if (pring_s4 == NULL)
11042 continue;
98912dda
JS
11043 /* Note: both hbalock and ring_lock must be set here */
11044 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11045 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11046 abtsiocbq, 0);
11047 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11048 } else {
11049 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11050 abtsiocbq, 0);
11051 }
11052
11053
11054 if (ret_val == IOCB_ERROR)
11055 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11056 else
11057 sum++;
11058 }
11059 spin_unlock_irq(&phba->hbalock);
11060 return sum;
11061}
11062
e59058c4 11063/**
3621a710 11064 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11065 * @phba: Pointer to HBA context object.
11066 * @cmdiocbq: Pointer to command iocb.
11067 * @rspiocbq: Pointer to response iocb.
11068 *
11069 * This function is the completion handler for iocbs issued using
11070 * lpfc_sli_issue_iocb_wait function. This function is called by the
11071 * ring event handler function without any lock held. This function
11072 * can be called from both worker thread context and interrupt
11073 * context. This function also can be called from other thread which
11074 * cleans up the SLI layer objects.
11075 * This function copy the contents of the response iocb to the
11076 * response iocb memory object provided by the caller of
11077 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11078 * sleeps for the iocb completion.
11079 **/
68876920
JSEC
11080static void
11081lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11082 struct lpfc_iocbq *cmdiocbq,
11083 struct lpfc_iocbq *rspiocbq)
dea3101e 11084{
68876920
JSEC
11085 wait_queue_head_t *pdone_q;
11086 unsigned long iflags;
0f65ff68 11087 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 11088
2e0fef85 11089 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11090 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11091
11092 /*
11093 * A time out has occurred for the iocb. If a time out
11094 * completion handler has been supplied, call it. Otherwise,
11095 * just free the iocbq.
11096 */
11097
11098 spin_unlock_irqrestore(&phba->hbalock, iflags);
11099 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11100 cmdiocbq->wait_iocb_cmpl = NULL;
11101 if (cmdiocbq->iocb_cmpl)
11102 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11103 else
11104 lpfc_sli_release_iocbq(phba, cmdiocbq);
11105 return;
11106 }
11107
68876920
JSEC
11108 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11109 if (cmdiocbq->context2 && rspiocbq)
11110 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11111 &rspiocbq->iocb, sizeof(IOCB_t));
11112
0f65ff68
JS
11113 /* Set the exchange busy flag for task management commands */
11114 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11115 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11116 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11117 cur_iocbq);
11118 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11119 }
11120
68876920 11121 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11122 if (pdone_q)
11123 wake_up(pdone_q);
858c9f6c 11124 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 11125 return;
11126}
11127
d11e31dd
JS
11128/**
11129 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11130 * @phba: Pointer to HBA context object..
11131 * @piocbq: Pointer to command iocb.
11132 * @flag: Flag to test.
11133 *
11134 * This routine grabs the hbalock and then test the iocb_flag to
11135 * see if the passed in flag is set.
11136 * Returns:
11137 * 1 if flag is set.
11138 * 0 if flag is not set.
11139 **/
11140static int
11141lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11142 struct lpfc_iocbq *piocbq, uint32_t flag)
11143{
11144 unsigned long iflags;
11145 int ret;
11146
11147 spin_lock_irqsave(&phba->hbalock, iflags);
11148 ret = piocbq->iocb_flag & flag;
11149 spin_unlock_irqrestore(&phba->hbalock, iflags);
11150 return ret;
11151
11152}
11153
e59058c4 11154/**
3621a710 11155 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11156 * @phba: Pointer to HBA context object..
11157 * @pring: Pointer to sli ring.
11158 * @piocb: Pointer to command iocb.
11159 * @prspiocbq: Pointer to response iocb.
11160 * @timeout: Timeout in number of seconds.
11161 *
11162 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11163 * iocb to complete. The iocb_cmpl field of the shall be used
11164 * to handle iocbs which time out. If the field is NULL, the
11165 * function shall free the iocbq structure. If more clean up is
11166 * needed, the caller is expected to provide a completion function
11167 * that will provide the needed clean up. If the iocb command is
11168 * not completed within timeout seconds, the function will either
11169 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11170 * completion function set in the iocb_cmpl field and then return
11171 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11172 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11173 * The function waits for the iocb completion using an
11174 * non-interruptible wait.
11175 * This function will sleep while waiting for iocb completion.
11176 * So, this function should not be called from any context which
11177 * does not allow sleeping. Due to the same reason, this function
11178 * cannot be called with interrupt disabled.
11179 * This function assumes that the iocb completions occur while
11180 * this function sleep. So, this function cannot be called from
11181 * the thread which process iocb completion for this ring.
11182 * This function clears the iocb_flag of the iocb object before
11183 * issuing the iocb and the iocb completion handler sets this
11184 * flag and wakes this thread when the iocb completes.
11185 * The contents of the response iocb will be copied to prspiocbq
11186 * by the completion handler when the command completes.
11187 * This function returns IOCB_SUCCESS when success.
11188 * This function is called with no lock held.
11189 **/
dea3101e 11190int
2e0fef85 11191lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11192 uint32_t ring_number,
2e0fef85
JS
11193 struct lpfc_iocbq *piocb,
11194 struct lpfc_iocbq *prspiocbq,
68876920 11195 uint32_t timeout)
dea3101e 11196{
7259f0d0 11197 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
11198 long timeleft, timeout_req = 0;
11199 int retval = IOCB_SUCCESS;
875fbdfe 11200 uint32_t creg_val;
0e9bb8d7
JS
11201 struct lpfc_iocbq *iocb;
11202 int txq_cnt = 0;
11203 int txcmplq_cnt = 0;
895427bd 11204 struct lpfc_sli_ring *pring;
5a0916b4
JS
11205 unsigned long iflags;
11206 bool iocb_completed = true;
11207
895427bd
JS
11208 if (phba->sli_rev >= LPFC_SLI_REV4)
11209 pring = lpfc_sli4_calc_ring(phba, piocb);
11210 else
11211 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 11212 /*
68876920
JSEC
11213 * If the caller has provided a response iocbq buffer, then context2
11214 * is NULL or its an error.
dea3101e 11215 */
68876920
JSEC
11216 if (prspiocbq) {
11217 if (piocb->context2)
11218 return IOCB_ERROR;
11219 piocb->context2 = prspiocbq;
dea3101e 11220 }
11221
5a0916b4 11222 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
11223 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11224 piocb->context_un.wait_queue = &done_q;
5a0916b4 11225 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 11226
875fbdfe 11227 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11228 if (lpfc_readl(phba->HCregaddr, &creg_val))
11229 return IOCB_ERROR;
875fbdfe
JSEC
11230 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11231 writel(creg_val, phba->HCregaddr);
11232 readl(phba->HCregaddr); /* flush */
11233 }
11234
2a9bf3d0
JS
11235 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11236 SLI_IOCB_RET_IOCB);
68876920 11237 if (retval == IOCB_SUCCESS) {
256ec0d0 11238 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 11239 timeleft = wait_event_timeout(done_q,
d11e31dd 11240 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 11241 timeout_req);
5a0916b4
JS
11242 spin_lock_irqsave(&phba->hbalock, iflags);
11243 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11244
11245 /*
11246 * IOCB timed out. Inform the wake iocb wait
11247 * completion function and set local status
11248 */
dea3101e 11249
5a0916b4
JS
11250 iocb_completed = false;
11251 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11252 }
11253 spin_unlock_irqrestore(&phba->hbalock, iflags);
11254 if (iocb_completed) {
7054a606 11255 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 11256 "0331 IOCB wake signaled\n");
53151bbb
JS
11257 /* Note: we are not indicating if the IOCB has a success
11258 * status or not - that's for the caller to check.
11259 * IOCB_SUCCESS means just that the command was sent and
11260 * completed. Not that it completed successfully.
11261 * */
7054a606 11262 } else if (timeleft == 0) {
68876920 11263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11264 "0338 IOCB wait timeout error - no "
11265 "wake response Data x%x\n", timeout);
68876920 11266 retval = IOCB_TIMEDOUT;
7054a606 11267 } else {
68876920 11268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11269 "0330 IOCB wake NOT set, "
11270 "Data x%x x%lx\n",
68876920
JSEC
11271 timeout, (timeleft / jiffies));
11272 retval = IOCB_TIMEDOUT;
dea3101e 11273 }
2a9bf3d0 11274 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
11275 if (phba->cfg_log_verbose & LOG_SLI) {
11276 list_for_each_entry(iocb, &pring->txq, list) {
11277 txq_cnt++;
11278 }
11279 list_for_each_entry(iocb, &pring->txcmplq, list) {
11280 txcmplq_cnt++;
11281 }
11282 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11283 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11284 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11285 }
2a9bf3d0 11286 return retval;
68876920
JSEC
11287 } else {
11288 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 11289 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 11290 retval);
68876920 11291 retval = IOCB_ERROR;
dea3101e 11292 }
11293
875fbdfe 11294 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11295 if (lpfc_readl(phba->HCregaddr, &creg_val))
11296 return IOCB_ERROR;
875fbdfe
JSEC
11297 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11298 writel(creg_val, phba->HCregaddr);
11299 readl(phba->HCregaddr); /* flush */
11300 }
11301
68876920
JSEC
11302 if (prspiocbq)
11303 piocb->context2 = NULL;
11304
11305 piocb->context_un.wait_queue = NULL;
11306 piocb->iocb_cmpl = NULL;
dea3101e 11307 return retval;
11308}
68876920 11309
e59058c4 11310/**
3621a710 11311 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
11312 * @phba: Pointer to HBA context object.
11313 * @pmboxq: Pointer to driver mailbox object.
11314 * @timeout: Timeout in number of seconds.
11315 *
11316 * This function issues the mailbox to firmware and waits for the
11317 * mailbox command to complete. If the mailbox command is not
11318 * completed within timeout seconds, it returns MBX_TIMEOUT.
11319 * The function waits for the mailbox completion using an
11320 * interruptible wait. If the thread is woken up due to a
11321 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11322 * should not free the mailbox resources, if this function returns
11323 * MBX_TIMEOUT.
11324 * This function will sleep while waiting for mailbox completion.
11325 * So, this function should not be called from any context which
11326 * does not allow sleeping. Due to the same reason, this function
11327 * cannot be called with interrupt disabled.
11328 * This function assumes that the mailbox completion occurs while
11329 * this function sleep. So, this function cannot be called from
11330 * the worker thread which processes mailbox completion.
11331 * This function is called in the context of HBA management
11332 * applications.
11333 * This function returns MBX_SUCCESS when successful.
11334 * This function is called with no lock held.
11335 **/
dea3101e 11336int
2e0fef85 11337lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 11338 uint32_t timeout)
11339{
7259f0d0 11340 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
b230b8a2 11341 MAILBOX_t *mb = NULL;
dea3101e 11342 int retval;
858c9f6c 11343 unsigned long flag;
dea3101e 11344
b230b8a2 11345 /* The caller might set context1 for extended buffer */
98c9ea5c 11346 if (pmboxq->context1)
b230b8a2 11347 mb = (MAILBOX_t *)pmboxq->context1;
dea3101e 11348
495a714c 11349 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 11350 /* setup wake call as IOCB callback */
11351 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11352 /* setup context field to pass wait_queue pointer to wake function */
11353 pmboxq->context1 = &done_q;
11354
dea3101e 11355 /* now issue the command */
11356 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 11357 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
11358 wait_event_interruptible_timeout(done_q,
11359 pmboxq->mbox_flag & LPFC_MBX_WAKE,
256ec0d0 11360 msecs_to_jiffies(timeout * 1000));
7054a606 11361
858c9f6c 11362 spin_lock_irqsave(&phba->hbalock, flag);
b230b8a2
JS
11363 /* restore the possible extended buffer for free resource */
11364 pmboxq->context1 = (uint8_t *)mb;
7054a606
JS
11365 /*
11366 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11367 * else do not free the resources.
11368 */
d7c47992 11369 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 11370 retval = MBX_SUCCESS;
d7c47992 11371 } else {
7054a606 11372 retval = MBX_TIMEOUT;
858c9f6c
JS
11373 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11374 }
11375 spin_unlock_irqrestore(&phba->hbalock, flag);
b230b8a2
JS
11376 } else {
11377 /* restore the possible extended buffer for free resource */
11378 pmboxq->context1 = (uint8_t *)mb;
dea3101e 11379 }
11380
dea3101e 11381 return retval;
11382}
11383
e59058c4 11384/**
3772a991 11385 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
11386 * @phba: Pointer to HBA context.
11387 *
3772a991
JS
11388 * This function is called to shutdown the driver's mailbox sub-system.
11389 * It first marks the mailbox sub-system is in a block state to prevent
11390 * the asynchronous mailbox command from issued off the pending mailbox
11391 * command queue. If the mailbox command sub-system shutdown is due to
11392 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11393 * the mailbox sub-system flush routine to forcefully bring down the
11394 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11395 * as with offline or HBA function reset), this routine will wait for the
11396 * outstanding mailbox command to complete before invoking the mailbox
11397 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 11398 **/
3772a991 11399void
618a5230 11400lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 11401{
3772a991 11402 struct lpfc_sli *psli = &phba->sli;
3772a991 11403 unsigned long timeout;
b4c02652 11404
618a5230
JS
11405 if (mbx_action == LPFC_MBX_NO_WAIT) {
11406 /* delay 100ms for port state */
11407 msleep(100);
11408 lpfc_sli_mbox_sys_flush(phba);
11409 return;
11410 }
a183a15f 11411 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 11412
3772a991
JS
11413 spin_lock_irq(&phba->hbalock);
11414 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 11415
3772a991 11416 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
11417 /* Determine how long we might wait for the active mailbox
11418 * command to be gracefully completed by firmware.
11419 */
a183a15f
JS
11420 if (phba->sli.mbox_active)
11421 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11422 phba->sli.mbox_active) *
11423 1000) + jiffies;
11424 spin_unlock_irq(&phba->hbalock);
11425
3772a991
JS
11426 while (phba->sli.mbox_active) {
11427 /* Check active mailbox complete status every 2ms */
11428 msleep(2);
11429 if (time_after(jiffies, timeout))
11430 /* Timeout, let the mailbox flush routine to
11431 * forcefully release active mailbox command
11432 */
11433 break;
11434 }
d7069f09
JS
11435 } else
11436 spin_unlock_irq(&phba->hbalock);
11437
3772a991
JS
11438 lpfc_sli_mbox_sys_flush(phba);
11439}
ed957684 11440
3772a991
JS
11441/**
11442 * lpfc_sli_eratt_read - read sli-3 error attention events
11443 * @phba: Pointer to HBA context.
11444 *
11445 * This function is called to read the SLI3 device error attention registers
11446 * for possible error attention events. The caller must hold the hostlock
11447 * with spin_lock_irq().
11448 *
25985edc 11449 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
11450 * Register and returns 0 otherwise.
11451 **/
11452static int
11453lpfc_sli_eratt_read(struct lpfc_hba *phba)
11454{
11455 uint32_t ha_copy;
b4c02652 11456
3772a991 11457 /* Read chip Host Attention (HA) register */
9940b97b
JS
11458 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11459 goto unplug_err;
11460
3772a991
JS
11461 if (ha_copy & HA_ERATT) {
11462 /* Read host status register to retrieve error event */
9940b97b
JS
11463 if (lpfc_sli_read_hs(phba))
11464 goto unplug_err;
b4c02652 11465
3772a991
JS
11466 /* Check if there is a deferred error condition is active */
11467 if ((HS_FFER1 & phba->work_hs) &&
11468 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 11469 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 11470 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
11471 /* Clear all interrupt enable conditions */
11472 writel(0, phba->HCregaddr);
11473 readl(phba->HCregaddr);
11474 }
11475
11476 /* Set the driver HA work bitmap */
3772a991
JS
11477 phba->work_ha |= HA_ERATT;
11478 /* Indicate polling handles this ERATT */
11479 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
11480 return 1;
11481 }
11482 return 0;
9940b97b
JS
11483
11484unplug_err:
11485 /* Set the driver HS work bitmap */
11486 phba->work_hs |= UNPLUG_ERR;
11487 /* Set the driver HA work bitmap */
11488 phba->work_ha |= HA_ERATT;
11489 /* Indicate polling handles this ERATT */
11490 phba->hba_flag |= HBA_ERATT_HANDLED;
11491 return 1;
b4c02652
JS
11492}
11493
da0436e9
JS
11494/**
11495 * lpfc_sli4_eratt_read - read sli-4 error attention events
11496 * @phba: Pointer to HBA context.
11497 *
11498 * This function is called to read the SLI4 device error attention registers
11499 * for possible error attention events. The caller must hold the hostlock
11500 * with spin_lock_irq().
11501 *
25985edc 11502 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
11503 * Register and returns 0 otherwise.
11504 **/
11505static int
11506lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11507{
11508 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
11509 uint32_t if_type, portsmphr;
11510 struct lpfc_register portstat_reg;
da0436e9 11511
2fcee4bf
JS
11512 /*
11513 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
11514 * registers for error attention. This can be changed later.
11515 */
2fcee4bf
JS
11516 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11517 switch (if_type) {
11518 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
11519 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11520 &uerr_sta_lo) ||
11521 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11522 &uerr_sta_hi)) {
11523 phba->work_hs |= UNPLUG_ERR;
11524 phba->work_ha |= HA_ERATT;
11525 phba->hba_flag |= HBA_ERATT_HANDLED;
11526 return 1;
11527 }
2fcee4bf
JS
11528 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11529 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11531 "1423 HBA Unrecoverable error: "
11532 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11533 "ue_mask_lo_reg=0x%x, "
11534 "ue_mask_hi_reg=0x%x\n",
11535 uerr_sta_lo, uerr_sta_hi,
11536 phba->sli4_hba.ue_mask_lo,
11537 phba->sli4_hba.ue_mask_hi);
11538 phba->work_status[0] = uerr_sta_lo;
11539 phba->work_status[1] = uerr_sta_hi;
11540 phba->work_ha |= HA_ERATT;
11541 phba->hba_flag |= HBA_ERATT_HANDLED;
11542 return 1;
11543 }
11544 break;
11545 case LPFC_SLI_INTF_IF_TYPE_2:
9940b97b
JS
11546 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11547 &portstat_reg.word0) ||
11548 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11549 &portsmphr)){
11550 phba->work_hs |= UNPLUG_ERR;
11551 phba->work_ha |= HA_ERATT;
11552 phba->hba_flag |= HBA_ERATT_HANDLED;
11553 return 1;
11554 }
2fcee4bf
JS
11555 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11556 phba->work_status[0] =
11557 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11558 phba->work_status[1] =
11559 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 11561 "2885 Port Status Event: "
2fcee4bf
JS
11562 "port status reg 0x%x, "
11563 "port smphr reg 0x%x, "
11564 "error 1=0x%x, error 2=0x%x\n",
11565 portstat_reg.word0,
11566 portsmphr,
11567 phba->work_status[0],
11568 phba->work_status[1]);
11569 phba->work_ha |= HA_ERATT;
11570 phba->hba_flag |= HBA_ERATT_HANDLED;
11571 return 1;
11572 }
11573 break;
11574 case LPFC_SLI_INTF_IF_TYPE_1:
11575 default:
a747c9ce 11576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
11577 "2886 HBA Error Attention on unsupported "
11578 "if type %d.", if_type);
a747c9ce 11579 return 1;
da0436e9 11580 }
2fcee4bf 11581
da0436e9
JS
11582 return 0;
11583}
11584
e59058c4 11585/**
3621a710 11586 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
11587 * @phba: Pointer to HBA context.
11588 *
3772a991 11589 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
11590 * error attention register bit for error attention events.
11591 *
25985edc 11592 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
11593 * Register and returns 0 otherwise.
11594 **/
11595int
11596lpfc_sli_check_eratt(struct lpfc_hba *phba)
11597{
11598 uint32_t ha_copy;
11599
11600 /* If somebody is waiting to handle an eratt, don't process it
11601 * here. The brdkill function will do this.
11602 */
11603 if (phba->link_flag & LS_IGNORE_ERATT)
11604 return 0;
11605
11606 /* Check if interrupt handler handles this ERATT */
11607 spin_lock_irq(&phba->hbalock);
11608 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11609 /* Interrupt handler has handled ERATT */
11610 spin_unlock_irq(&phba->hbalock);
11611 return 0;
11612 }
11613
a257bf90
JS
11614 /*
11615 * If there is deferred error attention, do not check for error
11616 * attention
11617 */
11618 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11619 spin_unlock_irq(&phba->hbalock);
11620 return 0;
11621 }
11622
3772a991
JS
11623 /* If PCI channel is offline, don't process it */
11624 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 11625 spin_unlock_irq(&phba->hbalock);
3772a991
JS
11626 return 0;
11627 }
11628
11629 switch (phba->sli_rev) {
11630 case LPFC_SLI_REV2:
11631 case LPFC_SLI_REV3:
11632 /* Read chip Host Attention (HA) register */
11633 ha_copy = lpfc_sli_eratt_read(phba);
11634 break;
da0436e9 11635 case LPFC_SLI_REV4:
2fcee4bf 11636 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
11637 ha_copy = lpfc_sli4_eratt_read(phba);
11638 break;
3772a991
JS
11639 default:
11640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11641 "0299 Invalid SLI revision (%d)\n",
11642 phba->sli_rev);
11643 ha_copy = 0;
11644 break;
9399627f
JS
11645 }
11646 spin_unlock_irq(&phba->hbalock);
3772a991
JS
11647
11648 return ha_copy;
11649}
11650
11651/**
11652 * lpfc_intr_state_check - Check device state for interrupt handling
11653 * @phba: Pointer to HBA context.
11654 *
11655 * This inline routine checks whether a device or its PCI slot is in a state
11656 * that the interrupt should be handled.
11657 *
11658 * This function returns 0 if the device or the PCI slot is in a state that
11659 * interrupt should be handled, otherwise -EIO.
11660 */
11661static inline int
11662lpfc_intr_state_check(struct lpfc_hba *phba)
11663{
11664 /* If the pci channel is offline, ignore all the interrupts */
11665 if (unlikely(pci_channel_offline(phba->pcidev)))
11666 return -EIO;
11667
11668 /* Update device level interrupt statistics */
11669 phba->sli.slistat.sli_intr++;
11670
11671 /* Ignore all interrupts during initialization. */
11672 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11673 return -EIO;
11674
9399627f
JS
11675 return 0;
11676}
11677
11678/**
3772a991 11679 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
11680 * @irq: Interrupt number.
11681 * @dev_id: The device context pointer.
11682 *
9399627f 11683 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
11684 * service routine when device with SLI-3 interface spec is enabled with
11685 * MSI-X multi-message interrupt mode and there are slow-path events in
11686 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11687 * interrupt mode, this function is called as part of the device-level
11688 * interrupt handler. When the PCI slot is in error recovery or the HBA
11689 * is undergoing initialization, the interrupt handler will not process
11690 * the interrupt. The link attention and ELS ring attention events are
11691 * handled by the worker thread. The interrupt handler signals the worker
11692 * thread and returns for these events. This function is called without
11693 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
11694 * structures.
11695 *
11696 * This function returns IRQ_HANDLED when interrupt is handled else it
11697 * returns IRQ_NONE.
e59058c4 11698 **/
dea3101e 11699irqreturn_t
3772a991 11700lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 11701{
2e0fef85 11702 struct lpfc_hba *phba;
a747c9ce 11703 uint32_t ha_copy, hc_copy;
dea3101e 11704 uint32_t work_ha_copy;
11705 unsigned long status;
5b75da2f 11706 unsigned long iflag;
dea3101e 11707 uint32_t control;
11708
92d7f7b0 11709 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
11710 struct lpfc_vport *vport;
11711 struct lpfc_nodelist *ndlp;
11712 struct lpfc_dmabuf *mp;
92d7f7b0
JS
11713 LPFC_MBOXQ_t *pmb;
11714 int rc;
11715
dea3101e 11716 /*
11717 * Get the driver's phba structure from the dev_id and
11718 * assume the HBA is not interrupting.
11719 */
9399627f 11720 phba = (struct lpfc_hba *)dev_id;
dea3101e 11721
11722 if (unlikely(!phba))
11723 return IRQ_NONE;
11724
dea3101e 11725 /*
9399627f
JS
11726 * Stuff needs to be attented to when this function is invoked as an
11727 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 11728 */
9399627f 11729 if (phba->intr_type == MSIX) {
3772a991
JS
11730 /* Check device state for handling interrupt */
11731 if (lpfc_intr_state_check(phba))
9399627f
JS
11732 return IRQ_NONE;
11733 /* Need to read HA REG for slow-path events */
5b75da2f 11734 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
11735 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11736 goto unplug_error;
9399627f
JS
11737 /* If somebody is waiting to handle an eratt don't process it
11738 * here. The brdkill function will do this.
11739 */
11740 if (phba->link_flag & LS_IGNORE_ERATT)
11741 ha_copy &= ~HA_ERATT;
11742 /* Check the need for handling ERATT in interrupt handler */
11743 if (ha_copy & HA_ERATT) {
11744 if (phba->hba_flag & HBA_ERATT_HANDLED)
11745 /* ERATT polling has handled ERATT */
11746 ha_copy &= ~HA_ERATT;
11747 else
11748 /* Indicate interrupt handler handles ERATT */
11749 phba->hba_flag |= HBA_ERATT_HANDLED;
11750 }
a257bf90
JS
11751
11752 /*
11753 * If there is deferred error attention, do not check for any
11754 * interrupt.
11755 */
11756 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 11757 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
11758 return IRQ_NONE;
11759 }
11760
9399627f 11761 /* Clear up only attention source related to slow-path */
9940b97b
JS
11762 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11763 goto unplug_error;
11764
a747c9ce
JS
11765 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11766 HC_LAINT_ENA | HC_ERINT_ENA),
11767 phba->HCregaddr);
9399627f
JS
11768 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11769 phba->HAregaddr);
a747c9ce 11770 writel(hc_copy, phba->HCregaddr);
9399627f 11771 readl(phba->HAregaddr); /* flush */
5b75da2f 11772 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
11773 } else
11774 ha_copy = phba->ha_copy;
dea3101e 11775
dea3101e 11776 work_ha_copy = ha_copy & phba->work_ha_mask;
11777
9399627f 11778 if (work_ha_copy) {
dea3101e 11779 if (work_ha_copy & HA_LATT) {
11780 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11781 /*
11782 * Turn off Link Attention interrupts
11783 * until CLEAR_LA done
11784 */
5b75da2f 11785 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 11786 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
11787 if (lpfc_readl(phba->HCregaddr, &control))
11788 goto unplug_error;
dea3101e 11789 control &= ~HC_LAINT_ENA;
11790 writel(control, phba->HCregaddr);
11791 readl(phba->HCregaddr); /* flush */
5b75da2f 11792 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 11793 }
11794 else
11795 work_ha_copy &= ~HA_LATT;
11796 }
11797
9399627f 11798 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
11799 /*
11800 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11801 * the only slow ring.
11802 */
11803 status = (work_ha_copy &
11804 (HA_RXMASK << (4*LPFC_ELS_RING)));
11805 status >>= (4*LPFC_ELS_RING);
11806 if (status & HA_RXMASK) {
5b75da2f 11807 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
11808 if (lpfc_readl(phba->HCregaddr, &control))
11809 goto unplug_error;
a58cbd52
JS
11810
11811 lpfc_debugfs_slow_ring_trc(phba,
11812 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11813 control, status,
11814 (uint32_t)phba->sli.slistat.sli_intr);
11815
858c9f6c 11816 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
11817 lpfc_debugfs_slow_ring_trc(phba,
11818 "ISR Disable ring:"
11819 "pwork:x%x hawork:x%x wait:x%x",
11820 phba->work_ha, work_ha_copy,
11821 (uint32_t)((unsigned long)
5e9d9b82 11822 &phba->work_waitq));
a58cbd52 11823
858c9f6c
JS
11824 control &=
11825 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 11826 writel(control, phba->HCregaddr);
11827 readl(phba->HCregaddr); /* flush */
dea3101e 11828 }
a58cbd52
JS
11829 else {
11830 lpfc_debugfs_slow_ring_trc(phba,
11831 "ISR slow ring: pwork:"
11832 "x%x hawork:x%x wait:x%x",
11833 phba->work_ha, work_ha_copy,
11834 (uint32_t)((unsigned long)
5e9d9b82 11835 &phba->work_waitq));
a58cbd52 11836 }
5b75da2f 11837 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 11838 }
11839 }
5b75da2f 11840 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 11841 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
11842 if (lpfc_sli_read_hs(phba))
11843 goto unplug_error;
a257bf90
JS
11844 /*
11845 * Check if there is a deferred error condition
11846 * is active
11847 */
11848 if ((HS_FFER1 & phba->work_hs) &&
11849 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
11850 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11851 phba->work_hs)) {
a257bf90
JS
11852 phba->hba_flag |= DEFER_ERATT;
11853 /* Clear all interrupt enable conditions */
11854 writel(0, phba->HCregaddr);
11855 readl(phba->HCregaddr);
11856 }
11857 }
11858
9399627f 11859 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 11860 pmb = phba->sli.mbox_active;
04c68496 11861 pmbox = &pmb->u.mb;
34b02dcd 11862 mbox = phba->mbox;
858c9f6c 11863 vport = pmb->vport;
92d7f7b0
JS
11864
11865 /* First check out the status word */
11866 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11867 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 11868 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
11869 /*
11870 * Stray Mailbox Interrupt, mbxCommand <cmd>
11871 * mbxStatus <status>
11872 */
09372820 11873 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 11874 LOG_SLI,
e8b62011 11875 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
11876 "Interrupt mbxCommand x%x "
11877 "mbxStatus x%x\n",
e8b62011 11878 (vport ? vport->vpi : 0),
92d7f7b0
JS
11879 pmbox->mbxCommand,
11880 pmbox->mbxStatus);
09372820
JS
11881 /* clear mailbox attention bit */
11882 work_ha_copy &= ~HA_MBATT;
11883 } else {
97eab634 11884 phba->sli.mbox_active = NULL;
5b75da2f 11885 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
11886 phba->last_completion_time = jiffies;
11887 del_timer(&phba->sli.mbox_tmo);
09372820
JS
11888 if (pmb->mbox_cmpl) {
11889 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11890 MAILBOX_CMD_SIZE);
7a470277
JS
11891 if (pmb->out_ext_byte_len &&
11892 pmb->context2)
11893 lpfc_sli_pcimem_bcopy(
11894 phba->mbox_ext,
11895 pmb->context2,
11896 pmb->out_ext_byte_len);
09372820
JS
11897 }
11898 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11899 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11900
11901 lpfc_debugfs_disc_trc(vport,
11902 LPFC_DISC_TRC_MBOX_VPORT,
11903 "MBOX dflt rpi: : "
11904 "status:x%x rpi:x%x",
11905 (uint32_t)pmbox->mbxStatus,
11906 pmbox->un.varWords[0], 0);
11907
11908 if (!pmbox->mbxStatus) {
11909 mp = (struct lpfc_dmabuf *)
11910 (pmb->context1);
11911 ndlp = (struct lpfc_nodelist *)
11912 pmb->context2;
11913
11914 /* Reg_LOGIN of dflt RPI was
11915 * successful. new lets get
11916 * rid of the RPI using the
11917 * same mbox buffer.
11918 */
11919 lpfc_unreg_login(phba,
11920 vport->vpi,
11921 pmbox->un.varWords[0],
11922 pmb);
11923 pmb->mbox_cmpl =
11924 lpfc_mbx_cmpl_dflt_rpi;
11925 pmb->context1 = mp;
11926 pmb->context2 = ndlp;
11927 pmb->vport = vport;
58da1ffb
JS
11928 rc = lpfc_sli_issue_mbox(phba,
11929 pmb,
11930 MBX_NOWAIT);
11931 if (rc != MBX_BUSY)
11932 lpfc_printf_log(phba,
11933 KERN_ERR,
11934 LOG_MBOX | LOG_SLI,
d7c255b2 11935 "0350 rc should have"
6a9c52cf 11936 "been MBX_BUSY\n");
3772a991
JS
11937 if (rc != MBX_NOT_FINISHED)
11938 goto send_current_mbox;
09372820 11939 }
858c9f6c 11940 }
5b75da2f
JS
11941 spin_lock_irqsave(
11942 &phba->pport->work_port_lock,
11943 iflag);
09372820
JS
11944 phba->pport->work_port_events &=
11945 ~WORKER_MBOX_TMO;
5b75da2f
JS
11946 spin_unlock_irqrestore(
11947 &phba->pport->work_port_lock,
11948 iflag);
09372820 11949 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 11950 }
97eab634 11951 } else
5b75da2f 11952 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 11953
92d7f7b0
JS
11954 if ((work_ha_copy & HA_MBATT) &&
11955 (phba->sli.mbox_active == NULL)) {
858c9f6c 11956send_current_mbox:
92d7f7b0 11957 /* Process next mailbox command if there is one */
58da1ffb
JS
11958 do {
11959 rc = lpfc_sli_issue_mbox(phba, NULL,
11960 MBX_NOWAIT);
11961 } while (rc == MBX_NOT_FINISHED);
11962 if (rc != MBX_SUCCESS)
11963 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11964 LOG_SLI, "0349 rc should be "
6a9c52cf 11965 "MBX_SUCCESS\n");
92d7f7b0
JS
11966 }
11967
5b75da2f 11968 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 11969 phba->work_ha |= work_ha_copy;
5b75da2f 11970 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 11971 lpfc_worker_wake_up(phba);
dea3101e 11972 }
9399627f 11973 return IRQ_HANDLED;
9940b97b
JS
11974unplug_error:
11975 spin_unlock_irqrestore(&phba->hbalock, iflag);
11976 return IRQ_HANDLED;
dea3101e 11977
3772a991 11978} /* lpfc_sli_sp_intr_handler */
9399627f
JS
11979
11980/**
3772a991 11981 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
11982 * @irq: Interrupt number.
11983 * @dev_id: The device context pointer.
11984 *
11985 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
11986 * service routine when device with SLI-3 interface spec is enabled with
11987 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11988 * ring event in the HBA. However, when the device is enabled with either
11989 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11990 * device-level interrupt handler. When the PCI slot is in error recovery
11991 * or the HBA is undergoing initialization, the interrupt handler will not
11992 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11993 * the intrrupt context. This function is called without any lock held.
11994 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
11995 *
11996 * This function returns IRQ_HANDLED when interrupt is handled else it
11997 * returns IRQ_NONE.
11998 **/
11999irqreturn_t
3772a991 12000lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12001{
12002 struct lpfc_hba *phba;
12003 uint32_t ha_copy;
12004 unsigned long status;
5b75da2f 12005 unsigned long iflag;
895427bd 12006 struct lpfc_sli_ring *pring;
9399627f
JS
12007
12008 /* Get the driver's phba structure from the dev_id and
12009 * assume the HBA is not interrupting.
12010 */
12011 phba = (struct lpfc_hba *) dev_id;
12012
12013 if (unlikely(!phba))
12014 return IRQ_NONE;
12015
12016 /*
12017 * Stuff needs to be attented to when this function is invoked as an
12018 * individual interrupt handler in MSI-X multi-message interrupt mode
12019 */
12020 if (phba->intr_type == MSIX) {
3772a991
JS
12021 /* Check device state for handling interrupt */
12022 if (lpfc_intr_state_check(phba))
9399627f
JS
12023 return IRQ_NONE;
12024 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12025 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12026 return IRQ_HANDLED;
9399627f 12027 /* Clear up only attention source related to fast-path */
5b75da2f 12028 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12029 /*
12030 * If there is deferred error attention, do not check for
12031 * any interrupt.
12032 */
12033 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12034 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12035 return IRQ_NONE;
12036 }
9399627f
JS
12037 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12038 phba->HAregaddr);
12039 readl(phba->HAregaddr); /* flush */
5b75da2f 12040 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12041 } else
12042 ha_copy = phba->ha_copy;
dea3101e 12043
12044 /*
9399627f 12045 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12046 */
9399627f
JS
12047 ha_copy &= ~(phba->work_ha_mask);
12048
12049 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12050 status >>= (4*LPFC_FCP_RING);
895427bd 12051 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12052 if (status & HA_RXMASK)
895427bd 12053 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12054
12055 if (phba->cfg_multi_ring_support == 2) {
12056 /*
9399627f
JS
12057 * Process all events on extra ring. Take the optimized path
12058 * for extra ring IO.
a4bc3379 12059 */
9399627f 12060 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12061 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12062 if (status & HA_RXMASK) {
a4bc3379 12063 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12064 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12065 status);
12066 }
12067 }
dea3101e 12068 return IRQ_HANDLED;
3772a991 12069} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12070
12071/**
3772a991 12072 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12073 * @irq: Interrupt number.
12074 * @dev_id: The device context pointer.
12075 *
3772a991
JS
12076 * This function is the HBA device-level interrupt handler to device with
12077 * SLI-3 interface spec, called from the PCI layer when either MSI or
12078 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12079 * requires driver attention. This function invokes the slow-path interrupt
12080 * attention handling function and fast-path interrupt attention handling
12081 * function in turn to process the relevant HBA attention events. This
12082 * function is called without any lock held. It gets the hbalock to access
12083 * and update SLI data structures.
9399627f
JS
12084 *
12085 * This function returns IRQ_HANDLED when interrupt is handled, else it
12086 * returns IRQ_NONE.
12087 **/
12088irqreturn_t
3772a991 12089lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12090{
12091 struct lpfc_hba *phba;
12092 irqreturn_t sp_irq_rc, fp_irq_rc;
12093 unsigned long status1, status2;
a747c9ce 12094 uint32_t hc_copy;
9399627f
JS
12095
12096 /*
12097 * Get the driver's phba structure from the dev_id and
12098 * assume the HBA is not interrupting.
12099 */
12100 phba = (struct lpfc_hba *) dev_id;
12101
12102 if (unlikely(!phba))
12103 return IRQ_NONE;
12104
3772a991
JS
12105 /* Check device state for handling interrupt */
12106 if (lpfc_intr_state_check(phba))
9399627f
JS
12107 return IRQ_NONE;
12108
12109 spin_lock(&phba->hbalock);
9940b97b
JS
12110 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12111 spin_unlock(&phba->hbalock);
12112 return IRQ_HANDLED;
12113 }
12114
9399627f
JS
12115 if (unlikely(!phba->ha_copy)) {
12116 spin_unlock(&phba->hbalock);
12117 return IRQ_NONE;
12118 } else if (phba->ha_copy & HA_ERATT) {
12119 if (phba->hba_flag & HBA_ERATT_HANDLED)
12120 /* ERATT polling has handled ERATT */
12121 phba->ha_copy &= ~HA_ERATT;
12122 else
12123 /* Indicate interrupt handler handles ERATT */
12124 phba->hba_flag |= HBA_ERATT_HANDLED;
12125 }
12126
a257bf90
JS
12127 /*
12128 * If there is deferred error attention, do not check for any interrupt.
12129 */
12130 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12131 spin_unlock(&phba->hbalock);
a257bf90
JS
12132 return IRQ_NONE;
12133 }
12134
9399627f 12135 /* Clear attention sources except link and error attentions */
9940b97b
JS
12136 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12137 spin_unlock(&phba->hbalock);
12138 return IRQ_HANDLED;
12139 }
a747c9ce
JS
12140 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12141 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12142 phba->HCregaddr);
9399627f 12143 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12144 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12145 readl(phba->HAregaddr); /* flush */
12146 spin_unlock(&phba->hbalock);
12147
12148 /*
12149 * Invokes slow-path host attention interrupt handling as appropriate.
12150 */
12151
12152 /* status of events with mailbox and link attention */
12153 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12154
12155 /* status of events with ELS ring */
12156 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12157 status2 >>= (4*LPFC_ELS_RING);
12158
12159 if (status1 || (status2 & HA_RXMASK))
3772a991 12160 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12161 else
12162 sp_irq_rc = IRQ_NONE;
12163
12164 /*
12165 * Invoke fast-path host attention interrupt handling as appropriate.
12166 */
12167
12168 /* status of events with FCP ring */
12169 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12170 status1 >>= (4*LPFC_FCP_RING);
12171
12172 /* status of events with extra ring */
12173 if (phba->cfg_multi_ring_support == 2) {
12174 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12175 status2 >>= (4*LPFC_EXTRA_RING);
12176 } else
12177 status2 = 0;
12178
12179 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12180 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12181 else
12182 fp_irq_rc = IRQ_NONE;
dea3101e 12183
9399627f
JS
12184 /* Return device-level interrupt handling status */
12185 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12186} /* lpfc_sli_intr_handler */
4f774513
JS
12187
12188/**
12189 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12190 * @phba: pointer to lpfc hba data structure.
12191 *
12192 * This routine is invoked by the worker thread to process all the pending
12193 * SLI4 FCP abort XRI events.
12194 **/
12195void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12196{
12197 struct lpfc_cq_event *cq_event;
12198
12199 /* First, declare the fcp xri abort event has been handled */
12200 spin_lock_irq(&phba->hbalock);
12201 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12202 spin_unlock_irq(&phba->hbalock);
12203 /* Now, handle all the fcp xri abort events */
12204 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12205 /* Get the first event from the head of the event queue */
12206 spin_lock_irq(&phba->hbalock);
12207 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12208 cq_event, struct lpfc_cq_event, list);
12209 spin_unlock_irq(&phba->hbalock);
12210 /* Notify aborted XRI for FCP work queue */
12211 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12212 /* Free the event processed back to the free pool */
12213 lpfc_sli4_cq_event_release(phba, cq_event);
12214 }
12215}
12216
318083ad
JS
12217/**
12218 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12219 * @phba: pointer to lpfc hba data structure.
12220 *
12221 * This routine is invoked by the worker thread to process all the pending
12222 * SLI4 NVME abort XRI events.
12223 **/
12224void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12225{
12226 struct lpfc_cq_event *cq_event;
12227
12228 /* First, declare the fcp xri abort event has been handled */
12229 spin_lock_irq(&phba->hbalock);
12230 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12231 spin_unlock_irq(&phba->hbalock);
12232 /* Now, handle all the fcp xri abort events */
12233 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12234 /* Get the first event from the head of the event queue */
12235 spin_lock_irq(&phba->hbalock);
12236 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12237 cq_event, struct lpfc_cq_event, list);
12238 spin_unlock_irq(&phba->hbalock);
12239 /* Notify aborted XRI for NVME work queue */
12240 if (phba->nvmet_support) {
12241 lpfc_sli4_nvmet_xri_aborted(phba,
12242 &cq_event->cqe.wcqe_axri);
12243 } else {
12244 lpfc_sli4_nvme_xri_aborted(phba,
12245 &cq_event->cqe.wcqe_axri);
12246 }
12247 /* Free the event processed back to the free pool */
12248 lpfc_sli4_cq_event_release(phba, cq_event);
12249 }
12250}
12251
4f774513
JS
12252/**
12253 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12254 * @phba: pointer to lpfc hba data structure.
12255 *
12256 * This routine is invoked by the worker thread to process all the pending
12257 * SLI4 els abort xri events.
12258 **/
12259void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12260{
12261 struct lpfc_cq_event *cq_event;
12262
12263 /* First, declare the els xri abort event has been handled */
12264 spin_lock_irq(&phba->hbalock);
12265 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12266 spin_unlock_irq(&phba->hbalock);
12267 /* Now, handle all the els xri abort events */
12268 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12269 /* Get the first event from the head of the event queue */
12270 spin_lock_irq(&phba->hbalock);
12271 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12272 cq_event, struct lpfc_cq_event, list);
12273 spin_unlock_irq(&phba->hbalock);
12274 /* Notify aborted XRI for ELS work queue */
12275 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12276 /* Free the event processed back to the free pool */
12277 lpfc_sli4_cq_event_release(phba, cq_event);
12278 }
12279}
12280
341af102
JS
12281/**
12282 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12283 * @phba: pointer to lpfc hba data structure
12284 * @pIocbIn: pointer to the rspiocbq
12285 * @pIocbOut: pointer to the cmdiocbq
12286 * @wcqe: pointer to the complete wcqe
12287 *
12288 * This routine transfers the fields of a command iocbq to a response iocbq
12289 * by copying all the IOCB fields from command iocbq and transferring the
12290 * completion status information from the complete wcqe.
12291 **/
4f774513 12292static void
341af102
JS
12293lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12294 struct lpfc_iocbq *pIocbIn,
4f774513
JS
12295 struct lpfc_iocbq *pIocbOut,
12296 struct lpfc_wcqe_complete *wcqe)
12297{
af22741c 12298 int numBdes, i;
341af102 12299 unsigned long iflags;
af22741c
JS
12300 uint32_t status, max_response;
12301 struct lpfc_dmabuf *dmabuf;
12302 struct ulp_bde64 *bpl, bde;
4f774513
JS
12303 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12304
12305 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12306 sizeof(struct lpfc_iocbq) - offset);
4f774513 12307 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
12308 status = bf_get(lpfc_wcqe_c_status, wcqe);
12309 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
12310 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12311 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12312 pIocbIn->iocb.un.fcpi.fcpi_parm =
12313 pIocbOut->iocb.un.fcpi.fcpi_parm -
12314 wcqe->total_data_placed;
12315 else
12316 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 12317 else {
4f774513 12318 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
12319 switch (pIocbOut->iocb.ulpCommand) {
12320 case CMD_ELS_REQUEST64_CR:
12321 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12322 bpl = (struct ulp_bde64 *)dmabuf->virt;
12323 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12324 max_response = bde.tus.f.bdeSize;
12325 break;
12326 case CMD_GEN_REQUEST64_CR:
12327 max_response = 0;
12328 if (!pIocbOut->context3)
12329 break;
12330 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12331 sizeof(struct ulp_bde64);
12332 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12333 bpl = (struct ulp_bde64 *)dmabuf->virt;
12334 for (i = 0; i < numBdes; i++) {
12335 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12336 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12337 max_response += bde.tus.f.bdeSize;
12338 }
12339 break;
12340 default:
12341 max_response = wcqe->total_data_placed;
12342 break;
12343 }
12344 if (max_response < wcqe->total_data_placed)
12345 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12346 else
12347 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12348 wcqe->total_data_placed;
695a814e 12349 }
341af102 12350
acd6859b
JS
12351 /* Convert BG errors for completion status */
12352 if (status == CQE_STATUS_DI_ERROR) {
12353 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12354
12355 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12356 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12357 else
12358 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12359
12360 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12361 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12362 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12363 BGS_GUARD_ERR_MASK;
12364 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12365 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12366 BGS_APPTAG_ERR_MASK;
12367 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12368 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12369 BGS_REFTAG_ERR_MASK;
12370
12371 /* Check to see if there was any good data before the error */
12372 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12373 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12374 BGS_HI_WATER_MARK_PRESENT_MASK;
12375 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12376 wcqe->total_data_placed;
12377 }
12378
12379 /*
12380 * Set ALL the error bits to indicate we don't know what
12381 * type of error it is.
12382 */
12383 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12384 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12385 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12386 BGS_GUARD_ERR_MASK);
12387 }
12388
341af102
JS
12389 /* Pick up HBA exchange busy condition */
12390 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12391 spin_lock_irqsave(&phba->hbalock, iflags);
12392 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12393 spin_unlock_irqrestore(&phba->hbalock, iflags);
12394 }
4f774513
JS
12395}
12396
45ed1190
JS
12397/**
12398 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12399 * @phba: Pointer to HBA context object.
12400 * @wcqe: Pointer to work-queue completion queue entry.
12401 *
12402 * This routine handles an ELS work-queue completion event and construct
12403 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12404 * discovery engine to handle.
12405 *
12406 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12407 **/
12408static struct lpfc_iocbq *
12409lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12410 struct lpfc_iocbq *irspiocbq)
12411{
895427bd 12412 struct lpfc_sli_ring *pring;
45ed1190
JS
12413 struct lpfc_iocbq *cmdiocbq;
12414 struct lpfc_wcqe_complete *wcqe;
12415 unsigned long iflags;
12416
895427bd
JS
12417 pring = lpfc_phba_elsring(phba);
12418
45ed1190 12419 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 12420 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
12421 pring->stats.iocb_event++;
12422 /* Look up the ELS command IOCB and create pseudo response IOCB */
12423 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12424 bf_get(lpfc_wcqe_c_request_tag, wcqe));
89533e9b
JS
12425 /* Put the iocb back on the txcmplq */
12426 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
7e56aa25 12427 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
12428
12429 if (unlikely(!cmdiocbq)) {
12430 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12431 "0386 ELS complete with no corresponding "
12432 "cmdiocb: iotag (%d)\n",
12433 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12434 lpfc_sli_release_iocbq(phba, irspiocbq);
12435 return NULL;
12436 }
12437
12438 /* Fake the irspiocbq and copy necessary response information */
341af102 12439 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
12440
12441 return irspiocbq;
12442}
12443
04c68496
JS
12444/**
12445 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12446 * @phba: Pointer to HBA context object.
12447 * @cqe: Pointer to mailbox completion queue entry.
12448 *
12449 * This routine process a mailbox completion queue entry with asynchrous
12450 * event.
12451 *
12452 * Return: true if work posted to worker thread, otherwise false.
12453 **/
12454static bool
12455lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12456{
12457 struct lpfc_cq_event *cq_event;
12458 unsigned long iflags;
12459
12460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12461 "0392 Async Event: word0:x%x, word1:x%x, "
12462 "word2:x%x, word3:x%x\n", mcqe->word0,
12463 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12464
12465 /* Allocate a new internal CQ_EVENT entry */
12466 cq_event = lpfc_sli4_cq_event_alloc(phba);
12467 if (!cq_event) {
12468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12469 "0394 Failed to allocate CQ_EVENT entry\n");
12470 return false;
12471 }
12472
12473 /* Move the CQE into an asynchronous event entry */
12474 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12475 spin_lock_irqsave(&phba->hbalock, iflags);
12476 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12477 /* Set the async event flag */
12478 phba->hba_flag |= ASYNC_EVENT;
12479 spin_unlock_irqrestore(&phba->hbalock, iflags);
12480
12481 return true;
12482}
12483
12484/**
12485 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12486 * @phba: Pointer to HBA context object.
12487 * @cqe: Pointer to mailbox completion queue entry.
12488 *
12489 * This routine process a mailbox completion queue entry with mailbox
12490 * completion event.
12491 *
12492 * Return: true if work posted to worker thread, otherwise false.
12493 **/
12494static bool
12495lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12496{
12497 uint32_t mcqe_status;
12498 MAILBOX_t *mbox, *pmbox;
12499 struct lpfc_mqe *mqe;
12500 struct lpfc_vport *vport;
12501 struct lpfc_nodelist *ndlp;
12502 struct lpfc_dmabuf *mp;
12503 unsigned long iflags;
12504 LPFC_MBOXQ_t *pmb;
12505 bool workposted = false;
12506 int rc;
12507
12508 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12509 if (!bf_get(lpfc_trailer_completed, mcqe))
12510 goto out_no_mqe_complete;
12511
12512 /* Get the reference to the active mbox command */
12513 spin_lock_irqsave(&phba->hbalock, iflags);
12514 pmb = phba->sli.mbox_active;
12515 if (unlikely(!pmb)) {
12516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12517 "1832 No pending MBOX command to handle\n");
12518 spin_unlock_irqrestore(&phba->hbalock, iflags);
12519 goto out_no_mqe_complete;
12520 }
12521 spin_unlock_irqrestore(&phba->hbalock, iflags);
12522 mqe = &pmb->u.mqe;
12523 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12524 mbox = phba->mbox;
12525 vport = pmb->vport;
12526
12527 /* Reset heartbeat timer */
12528 phba->last_completion_time = jiffies;
12529 del_timer(&phba->sli.mbox_tmo);
12530
12531 /* Move mbox data to caller's mailbox region, do endian swapping */
12532 if (pmb->mbox_cmpl && mbox)
12533 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 12534
73d91e50
JS
12535 /*
12536 * For mcqe errors, conditionally move a modified error code to
12537 * the mbox so that the error will not be missed.
12538 */
12539 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12540 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12541 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12542 bf_set(lpfc_mqe_status, mqe,
12543 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12544 }
04c68496
JS
12545 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12546 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12547 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12548 "MBOX dflt rpi: status:x%x rpi:x%x",
12549 mcqe_status,
12550 pmbox->un.varWords[0], 0);
12551 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12552 mp = (struct lpfc_dmabuf *)(pmb->context1);
12553 ndlp = (struct lpfc_nodelist *)pmb->context2;
12554 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12555 * RID of the PPI using the same mbox buffer.
12556 */
12557 lpfc_unreg_login(phba, vport->vpi,
12558 pmbox->un.varWords[0], pmb);
12559 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12560 pmb->context1 = mp;
12561 pmb->context2 = ndlp;
12562 pmb->vport = vport;
12563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12564 if (rc != MBX_BUSY)
12565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12566 LOG_SLI, "0385 rc should "
12567 "have been MBX_BUSY\n");
12568 if (rc != MBX_NOT_FINISHED)
12569 goto send_current_mbox;
12570 }
12571 }
12572 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12573 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12574 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12575
12576 /* There is mailbox completion work to do */
12577 spin_lock_irqsave(&phba->hbalock, iflags);
12578 __lpfc_mbox_cmpl_put(phba, pmb);
12579 phba->work_ha |= HA_MBATT;
12580 spin_unlock_irqrestore(&phba->hbalock, iflags);
12581 workposted = true;
12582
12583send_current_mbox:
12584 spin_lock_irqsave(&phba->hbalock, iflags);
12585 /* Release the mailbox command posting token */
12586 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12587 /* Setting active mailbox pointer need to be in sync to flag clear */
12588 phba->sli.mbox_active = NULL;
12589 spin_unlock_irqrestore(&phba->hbalock, iflags);
12590 /* Wake up worker thread to post the next pending mailbox command */
12591 lpfc_worker_wake_up(phba);
12592out_no_mqe_complete:
12593 if (bf_get(lpfc_trailer_consumed, mcqe))
12594 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12595 return workposted;
12596}
12597
12598/**
12599 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12600 * @phba: Pointer to HBA context object.
12601 * @cqe: Pointer to mailbox completion queue entry.
12602 *
12603 * This routine process a mailbox completion queue entry, it invokes the
12604 * proper mailbox complete handling or asynchrous event handling routine
12605 * according to the MCQE's async bit.
12606 *
12607 * Return: true if work posted to worker thread, otherwise false.
12608 **/
12609static bool
12610lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12611{
12612 struct lpfc_mcqe mcqe;
12613 bool workposted;
12614
12615 /* Copy the mailbox MCQE and convert endian order as needed */
12616 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12617
12618 /* Invoke the proper event handling routine */
12619 if (!bf_get(lpfc_trailer_async, &mcqe))
12620 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12621 else
12622 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12623 return workposted;
12624}
12625
4f774513
JS
12626/**
12627 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12628 * @phba: Pointer to HBA context object.
2a76a283 12629 * @cq: Pointer to associated CQ
4f774513
JS
12630 * @wcqe: Pointer to work-queue completion queue entry.
12631 *
12632 * This routine handles an ELS work-queue completion event.
12633 *
12634 * Return: true if work posted to worker thread, otherwise false.
12635 **/
12636static bool
2a76a283 12637lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
12638 struct lpfc_wcqe_complete *wcqe)
12639{
4f774513
JS
12640 struct lpfc_iocbq *irspiocbq;
12641 unsigned long iflags;
2a76a283 12642 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
12643 int txq_cnt = 0;
12644 int txcmplq_cnt = 0;
12645 int fcp_txcmplq_cnt = 0;
4f774513 12646
45ed1190 12647 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
12648 irspiocbq = lpfc_sli_get_iocbq(phba);
12649 if (!irspiocbq) {
0e9bb8d7
JS
12650 if (!list_empty(&pring->txq))
12651 txq_cnt++;
12652 if (!list_empty(&pring->txcmplq))
12653 txcmplq_cnt++;
4f774513 12654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
12655 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12656 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
12657 txq_cnt, phba->iocb_cnt,
12658 fcp_txcmplq_cnt,
12659 txcmplq_cnt);
45ed1190 12660 return false;
4f774513 12661 }
4f774513 12662
45ed1190
JS
12663 /* Save off the slow-path queue event for work thread to process */
12664 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 12665 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 12666 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
12667 &phba->sli4_hba.sp_queue_event);
12668 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 12669 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 12670
45ed1190 12671 return true;
4f774513
JS
12672}
12673
12674/**
12675 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12676 * @phba: Pointer to HBA context object.
12677 * @wcqe: Pointer to work-queue completion queue entry.
12678 *
3f8b6fb7 12679 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
12680 * proper WQ release routine to the slow-path WQ.
12681 **/
12682static void
12683lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12684 struct lpfc_wcqe_release *wcqe)
12685{
2e90f4b5
JS
12686 /* sanity check on queue memory */
12687 if (unlikely(!phba->sli4_hba.els_wq))
12688 return;
4f774513
JS
12689 /* Check for the slow-path ELS work queue */
12690 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12691 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12692 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12693 else
12694 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12695 "2579 Slow-path wqe consume event carries "
12696 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12697 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12698 phba->sli4_hba.els_wq->queue_id);
12699}
12700
12701/**
12702 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12703 * @phba: Pointer to HBA context object.
12704 * @cq: Pointer to a WQ completion queue.
12705 * @wcqe: Pointer to work-queue completion queue entry.
12706 *
12707 * This routine handles an XRI abort event.
12708 *
12709 * Return: true if work posted to worker thread, otherwise false.
12710 **/
12711static bool
12712lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12713 struct lpfc_queue *cq,
12714 struct sli4_wcqe_xri_aborted *wcqe)
12715{
12716 bool workposted = false;
12717 struct lpfc_cq_event *cq_event;
12718 unsigned long iflags;
12719
12720 /* Allocate a new internal CQ_EVENT entry */
12721 cq_event = lpfc_sli4_cq_event_alloc(phba);
12722 if (!cq_event) {
12723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12724 "0602 Failed to allocate CQ_EVENT entry\n");
12725 return false;
12726 }
12727
12728 /* Move the CQE into the proper xri abort event list */
12729 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12730 switch (cq->subtype) {
12731 case LPFC_FCP:
12732 spin_lock_irqsave(&phba->hbalock, iflags);
12733 list_add_tail(&cq_event->list,
12734 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12735 /* Set the fcp xri abort event flag */
12736 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12737 spin_unlock_irqrestore(&phba->hbalock, iflags);
12738 workposted = true;
12739 break;
12740 case LPFC_ELS:
12741 spin_lock_irqsave(&phba->hbalock, iflags);
12742 list_add_tail(&cq_event->list,
12743 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12744 /* Set the els xri abort event flag */
12745 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12746 spin_unlock_irqrestore(&phba->hbalock, iflags);
12747 workposted = true;
12748 break;
318083ad
JS
12749 case LPFC_NVME:
12750 spin_lock_irqsave(&phba->hbalock, iflags);
12751 list_add_tail(&cq_event->list,
12752 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12753 /* Set the nvme xri abort event flag */
12754 phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12755 spin_unlock_irqrestore(&phba->hbalock, iflags);
12756 workposted = true;
12757 break;
4f774513
JS
12758 default:
12759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
12760 "0603 Invalid CQ subtype %d: "
12761 "%08x %08x %08x %08x\n",
12762 cq->subtype, wcqe->word0, wcqe->parameter,
12763 wcqe->word2, wcqe->word3);
12764 lpfc_sli4_cq_event_release(phba, cq_event);
4f774513
JS
12765 workposted = false;
12766 break;
12767 }
12768 return workposted;
12769}
12770
4f774513
JS
12771/**
12772 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12773 * @phba: Pointer to HBA context object.
12774 * @rcqe: Pointer to receive-queue completion queue entry.
12775 *
12776 * This routine process a receive-queue completion queue entry.
12777 *
12778 * Return: true if work posted to worker thread, otherwise false.
12779 **/
12780static bool
4d9ab994 12781lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 12782{
4f774513 12783 bool workposted = false;
895427bd 12784 struct fc_frame_header *fc_hdr;
4f774513
JS
12785 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12786 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 12787 struct lpfc_nvmet_tgtport *tgtp;
4f774513 12788 struct hbq_dmabuf *dma_buf;
7851fe2c 12789 uint32_t status, rq_id;
4f774513
JS
12790 unsigned long iflags;
12791
2e90f4b5
JS
12792 /* sanity check on queue memory */
12793 if (unlikely(!hrq) || unlikely(!drq))
12794 return workposted;
12795
7851fe2c
JS
12796 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12797 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12798 else
12799 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12800 if (rq_id != hrq->queue_id)
4f774513
JS
12801 goto out;
12802
4d9ab994 12803 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
12804 switch (status) {
12805 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12806 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12807 "2537 Receive Frame Truncated!!\n");
12808 case FC_STATUS_RQ_SUCCESS:
5ffc266e 12809 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
12810 spin_lock_irqsave(&phba->hbalock, iflags);
12811 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12812 if (!dma_buf) {
b84daac9 12813 hrq->RQ_no_buf_found++;
4f774513
JS
12814 spin_unlock_irqrestore(&phba->hbalock, iflags);
12815 goto out;
12816 }
b84daac9 12817 hrq->RQ_rcv_buf++;
547077a4 12818 hrq->RQ_buf_posted--;
4d9ab994 12819 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd
JS
12820
12821 /* If a NVME LS event (type 0x28), treat it as Fast path */
12822 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12823
4f774513 12824 /* save off the frame for the word thread to process */
4d9ab994 12825 list_add_tail(&dma_buf->cq_event.list,
45ed1190 12826 &phba->sli4_hba.sp_queue_event);
4f774513 12827 /* Frame received */
45ed1190 12828 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
12829 spin_unlock_irqrestore(&phba->hbalock, iflags);
12830 workposted = true;
12831 break;
4f774513 12832 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
12833 if (phba->nvmet_support) {
12834 tgtp = phba->targetport->private;
12835 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
12836 "6402 RQE Error x%x, posted %d err_cnt "
12837 "%d: %x %x %x\n",
12838 status, hrq->RQ_buf_posted,
12839 hrq->RQ_no_posted_buf,
12840 atomic_read(&tgtp->rcv_fcp_cmd_in),
12841 atomic_read(&tgtp->rcv_fcp_cmd_out),
12842 atomic_read(&tgtp->xmt_fcp_release));
12843 }
12844 /* fallthrough */
12845
12846 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 12847 hrq->RQ_no_posted_buf++;
4f774513
JS
12848 /* Post more buffers if possible */
12849 spin_lock_irqsave(&phba->hbalock, iflags);
12850 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12851 spin_unlock_irqrestore(&phba->hbalock, iflags);
12852 workposted = true;
12853 break;
12854 }
12855out:
12856 return workposted;
4f774513
JS
12857}
12858
4d9ab994
JS
12859/**
12860 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12861 * @phba: Pointer to HBA context object.
12862 * @cq: Pointer to the completion queue.
12863 * @wcqe: Pointer to a completion queue entry.
12864 *
25985edc 12865 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
12866 * entry.
12867 *
12868 * Return: true if work posted to worker thread, otherwise false.
12869 **/
12870static bool
12871lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12872 struct lpfc_cqe *cqe)
12873{
45ed1190 12874 struct lpfc_cqe cqevt;
4d9ab994
JS
12875 bool workposted = false;
12876
12877 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 12878 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
12879
12880 /* Check and process for different type of WCQE and dispatch */
45ed1190 12881 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 12882 case CQE_CODE_COMPL_WQE:
45ed1190 12883 /* Process the WQ/RQ complete event */
bc73905a 12884 phba->last_completion_time = jiffies;
2a76a283 12885 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 12886 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
12887 break;
12888 case CQE_CODE_RELEASE_WQE:
12889 /* Process the WQ release event */
12890 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 12891 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
12892 break;
12893 case CQE_CODE_XRI_ABORTED:
12894 /* Process the WQ XRI abort event */
bc73905a 12895 phba->last_completion_time = jiffies;
4d9ab994 12896 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 12897 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
12898 break;
12899 case CQE_CODE_RECEIVE:
7851fe2c 12900 case CQE_CODE_RECEIVE_V1:
4d9ab994 12901 /* Process the RQ event */
bc73905a 12902 phba->last_completion_time = jiffies;
4d9ab994 12903 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 12904 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
12905 break;
12906 default:
12907 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12908 "0388 Not a valid WCQE code: x%x\n",
45ed1190 12909 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
12910 break;
12911 }
12912 return workposted;
12913}
12914
4f774513
JS
12915/**
12916 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12917 * @phba: Pointer to HBA context object.
12918 * @eqe: Pointer to fast-path event queue entry.
12919 *
12920 * This routine process a event queue entry from the slow-path event queue.
12921 * It will check the MajorCode and MinorCode to determine this is for a
12922 * completion event on a completion queue, if not, an error shall be logged
12923 * and just return. Otherwise, it will get to the corresponding completion
12924 * queue and process all the entries on that completion queue, rearm the
12925 * completion queue, and then return.
12926 *
12927 **/
12928static void
67d12733
JS
12929lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12930 struct lpfc_queue *speq)
4f774513 12931{
67d12733 12932 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
12933 struct lpfc_cqe *cqe;
12934 bool workposted = false;
12935 int ecount = 0;
12936 uint16_t cqid;
12937
4f774513 12938 /* Get the reference to the corresponding CQ */
cb5172ea 12939 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 12940
4f774513
JS
12941 list_for_each_entry(childq, &speq->child_list, list) {
12942 if (childq->queue_id == cqid) {
12943 cq = childq;
12944 break;
12945 }
12946 }
12947 if (unlikely(!cq)) {
75baf696
JS
12948 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12950 "0365 Slow-path CQ identifier "
12951 "(%d) does not exist\n", cqid);
4f774513
JS
12952 return;
12953 }
12954
895427bd
JS
12955 /* Save EQ associated with this CQ */
12956 cq->assoc_qp = speq;
12957
4f774513
JS
12958 /* Process all the entries to the CQ */
12959 switch (cq->type) {
12960 case LPFC_MCQ:
12961 while ((cqe = lpfc_sli4_cq_get(cq))) {
12962 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
73d91e50 12963 if (!(++ecount % cq->entry_repost))
4f774513 12964 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
b84daac9 12965 cq->CQ_mbox++;
4f774513
JS
12966 }
12967 break;
12968 case LPFC_WCQ:
12969 while ((cqe = lpfc_sli4_cq_get(cq))) {
895427bd
JS
12970 if ((cq->subtype == LPFC_FCP) ||
12971 (cq->subtype == LPFC_NVME))
12972 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
0558056c
JS
12973 cqe);
12974 else
12975 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12976 cqe);
73d91e50 12977 if (!(++ecount % cq->entry_repost))
4f774513
JS
12978 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12979 }
b84daac9
JS
12980
12981 /* Track the max number of CQEs processed in 1 EQ */
12982 if (ecount > cq->CQ_max_cqe)
12983 cq->CQ_max_cqe = ecount;
4f774513
JS
12984 break;
12985 default:
12986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12987 "0370 Invalid completion queue type (%d)\n",
12988 cq->type);
12989 return;
12990 }
12991
12992 /* Catch the no cq entry condition, log an error */
12993 if (unlikely(ecount == 0))
12994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12995 "0371 No entry from the CQ: identifier "
12996 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12997
12998 /* In any case, flash and re-arm the RCQ */
12999 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13000
13001 /* wake up worker thread if there are works to be done */
13002 if (workposted)
13003 lpfc_worker_wake_up(phba);
13004}
13005
13006/**
13007 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13008 * @phba: Pointer to HBA context object.
13009 * @cq: Pointer to associated CQ
13010 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13011 *
13012 * This routine process a fast-path work queue completion entry from fast-path
13013 * event queue for FCP command response completion.
13014 **/
13015static void
2a76a283 13016lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13017 struct lpfc_wcqe_complete *wcqe)
13018{
2a76a283 13019 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13020 struct lpfc_iocbq *cmdiocbq;
13021 struct lpfc_iocbq irspiocbq;
13022 unsigned long iflags;
13023
4f774513
JS
13024 /* Check for response status */
13025 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13026 /* If resource errors reported from HBA, reduce queue
13027 * depth of the SCSI device.
13028 */
e3d2b802
JS
13029 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13030 IOSTAT_LOCAL_REJECT)) &&
13031 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13032 IOERR_NO_RESOURCES))
4f774513 13033 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13034
4f774513
JS
13035 /* Log the error status */
13036 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13037 "0373 FCP complete error: status=x%x, "
13038 "hw_status=x%x, total_data_specified=%d, "
13039 "parameter=x%x, word3=x%x\n",
13040 bf_get(lpfc_wcqe_c_status, wcqe),
13041 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13042 wcqe->total_data_placed, wcqe->parameter,
13043 wcqe->word3);
13044 }
13045
13046 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13047 spin_lock_irqsave(&pring->ring_lock, iflags);
13048 pring->stats.iocb_event++;
4f774513
JS
13049 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13050 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 13051 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13052 if (unlikely(!cmdiocbq)) {
13053 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13054 "0374 FCP complete with no corresponding "
13055 "cmdiocb: iotag (%d)\n",
13056 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13057 return;
13058 }
895427bd
JS
13059
13060 if (cq->assoc_qp)
13061 cmdiocbq->isr_timestamp =
13062 cq->assoc_qp->isr_timestamp;
13063
13064 if (cmdiocbq->iocb_cmpl == NULL) {
13065 if (cmdiocbq->wqe_cmpl) {
13066 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13067 spin_lock_irqsave(&phba->hbalock, iflags);
13068 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13069 spin_unlock_irqrestore(&phba->hbalock, iflags);
13070 }
13071
13072 /* Pass the cmd_iocb and the wcqe to the upper layer */
13073 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13074 return;
13075 }
4f774513
JS
13076 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13077 "0375 FCP cmdiocb not callback function "
13078 "iotag: (%d)\n",
13079 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13080 return;
13081 }
13082
13083 /* Fake the irspiocb and copy necessary response information */
341af102 13084 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13085
0f65ff68
JS
13086 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13087 spin_lock_irqsave(&phba->hbalock, iflags);
13088 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13089 spin_unlock_irqrestore(&phba->hbalock, iflags);
13090 }
13091
4f774513
JS
13092 /* Pass the cmd_iocb and the rsp state to the upper layer */
13093 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13094}
13095
13096/**
13097 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13098 * @phba: Pointer to HBA context object.
13099 * @cq: Pointer to completion queue.
13100 * @wcqe: Pointer to work-queue completion queue entry.
13101 *
3f8b6fb7 13102 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
13103 * proper WQ release routine to the slow-path WQ.
13104 **/
13105static void
13106lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13107 struct lpfc_wcqe_release *wcqe)
13108{
13109 struct lpfc_queue *childwq;
13110 bool wqid_matched = false;
895427bd 13111 uint16_t hba_wqid;
4f774513
JS
13112
13113 /* Check for fast-path FCP work queue release */
895427bd 13114 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 13115 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 13116 if (childwq->queue_id == hba_wqid) {
4f774513
JS
13117 lpfc_sli4_wq_release(childwq,
13118 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13119 wqid_matched = true;
13120 break;
13121 }
13122 }
13123 /* Report warning log message if no match found */
13124 if (wqid_matched != true)
13125 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13126 "2580 Fast-path wqe consume event carries "
895427bd 13127 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
13128}
13129
13130/**
2d7dbc4c
JS
13131 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13132 * @phba: Pointer to HBA context object.
13133 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 13134 *
2d7dbc4c
JS
13135 * This routine process a receive-queue completion queue entry.
13136 *
13137 * Return: true if work posted to worker thread, otherwise false.
13138 **/
13139static bool
13140lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13141 struct lpfc_rcqe *rcqe)
13142{
13143 bool workposted = false;
13144 struct lpfc_queue *hrq;
13145 struct lpfc_queue *drq;
13146 struct rqb_dmabuf *dma_buf;
13147 struct fc_frame_header *fc_hdr;
547077a4 13148 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
13149 uint32_t status, rq_id;
13150 unsigned long iflags;
13151 uint32_t fctl, idx;
13152
13153 if ((phba->nvmet_support == 0) ||
13154 (phba->sli4_hba.nvmet_cqset == NULL))
13155 return workposted;
13156
13157 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13158 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13159 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13160
13161 /* sanity check on queue memory */
13162 if (unlikely(!hrq) || unlikely(!drq))
13163 return workposted;
13164
13165 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13166 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13167 else
13168 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13169
13170 if ((phba->nvmet_support == 0) ||
13171 (rq_id != hrq->queue_id))
13172 return workposted;
13173
13174 status = bf_get(lpfc_rcqe_status, rcqe);
13175 switch (status) {
13176 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13178 "6126 Receive Frame Truncated!!\n");
2d7dbc4c
JS
13179 case FC_STATUS_RQ_SUCCESS:
13180 lpfc_sli4_rq_release(hrq, drq);
13181 spin_lock_irqsave(&phba->hbalock, iflags);
13182 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13183 if (!dma_buf) {
13184 hrq->RQ_no_buf_found++;
13185 spin_unlock_irqrestore(&phba->hbalock, iflags);
13186 goto out;
13187 }
13188 spin_unlock_irqrestore(&phba->hbalock, iflags);
13189 hrq->RQ_rcv_buf++;
547077a4 13190 hrq->RQ_buf_posted--;
2d7dbc4c
JS
13191 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13192
13193 /* Just some basic sanity checks on FCP Command frame */
13194 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13195 fc_hdr->fh_f_ctl[1] << 8 |
13196 fc_hdr->fh_f_ctl[2]);
13197 if (((fctl &
13198 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13199 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13200 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13201 goto drop;
13202
13203 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13204 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7
JS
13205 lpfc_nvmet_unsol_fcp_event(
13206 phba, phba->sli4_hba.els_wq->pring, dma_buf,
13207 cq->assoc_qp->isr_timestamp);
2d7dbc4c
JS
13208 return false;
13209 }
13210drop:
13211 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13212 break;
2d7dbc4c 13213 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13214 if (phba->nvmet_support) {
13215 tgtp = phba->targetport->private;
13216 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13217 "6401 RQE Error x%x, posted %d err_cnt "
13218 "%d: %x %x %x\n",
13219 status, hrq->RQ_buf_posted,
13220 hrq->RQ_no_posted_buf,
13221 atomic_read(&tgtp->rcv_fcp_cmd_in),
13222 atomic_read(&tgtp->rcv_fcp_cmd_out),
13223 atomic_read(&tgtp->xmt_fcp_release));
13224 }
13225 /* fallthrough */
13226
13227 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
13228 hrq->RQ_no_posted_buf++;
13229 /* Post more buffers if possible */
13230 spin_lock_irqsave(&phba->hbalock, iflags);
13231 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13232 spin_unlock_irqrestore(&phba->hbalock, iflags);
13233 workposted = true;
13234 break;
13235 }
13236out:
13237 return workposted;
13238}
13239
4f774513 13240/**
895427bd 13241 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
4f774513
JS
13242 * @cq: Pointer to the completion queue.
13243 * @eqe: Pointer to fast-path completion queue entry.
13244 *
13245 * This routine process a fast-path work queue completion entry from fast-path
13246 * event queue for FCP command response completion.
13247 **/
13248static int
895427bd 13249lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13250 struct lpfc_cqe *cqe)
13251{
13252 struct lpfc_wcqe_release wcqe;
13253 bool workposted = false;
13254
13255 /* Copy the work queue CQE and convert endian order if needed */
13256 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13257
13258 /* Check and process for different type of WCQE and dispatch */
13259 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13260 case CQE_CODE_COMPL_WQE:
895427bd 13261 case CQE_CODE_NVME_ERSP:
b84daac9 13262 cq->CQ_wq++;
4f774513 13263 /* Process the WQ complete event */
98fc5dd9 13264 phba->last_completion_time = jiffies;
895427bd
JS
13265 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13266 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13267 (struct lpfc_wcqe_complete *)&wcqe);
13268 if (cq->subtype == LPFC_NVME_LS)
13269 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
13270 (struct lpfc_wcqe_complete *)&wcqe);
13271 break;
13272 case CQE_CODE_RELEASE_WQE:
b84daac9 13273 cq->CQ_release_wqe++;
4f774513
JS
13274 /* Process the WQ release event */
13275 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13276 (struct lpfc_wcqe_release *)&wcqe);
13277 break;
13278 case CQE_CODE_XRI_ABORTED:
b84daac9 13279 cq->CQ_xri_aborted++;
4f774513 13280 /* Process the WQ XRI abort event */
bc73905a 13281 phba->last_completion_time = jiffies;
4f774513
JS
13282 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13283 (struct sli4_wcqe_xri_aborted *)&wcqe);
13284 break;
895427bd
JS
13285 case CQE_CODE_RECEIVE_V1:
13286 case CQE_CODE_RECEIVE:
13287 phba->last_completion_time = jiffies;
2d7dbc4c
JS
13288 if (cq->subtype == LPFC_NVMET) {
13289 workposted = lpfc_sli4_nvmet_handle_rcqe(
13290 phba, cq, (struct lpfc_rcqe *)&wcqe);
13291 }
895427bd 13292 break;
4f774513
JS
13293 default:
13294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 13295 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
13296 bf_get(lpfc_wcqe_c_code, &wcqe));
13297 break;
13298 }
13299 return workposted;
13300}
13301
13302/**
67d12733 13303 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
13304 * @phba: Pointer to HBA context object.
13305 * @eqe: Pointer to fast-path event queue entry.
13306 *
13307 * This routine process a event queue entry from the fast-path event queue.
13308 * It will check the MajorCode and MinorCode to determine this is for a
13309 * completion event on a completion queue, if not, an error shall be logged
13310 * and just return. Otherwise, it will get to the corresponding completion
13311 * queue and process all the entries on the completion queue, rearm the
13312 * completion queue, and then return.
13313 **/
13314static void
67d12733
JS
13315lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13316 uint32_t qidx)
4f774513 13317{
895427bd 13318 struct lpfc_queue *cq = NULL;
4f774513
JS
13319 struct lpfc_cqe *cqe;
13320 bool workposted = false;
2d7dbc4c 13321 uint16_t cqid, id;
4f774513
JS
13322 int ecount = 0;
13323
cb5172ea 13324 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 13325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 13326 "0366 Not a valid completion "
4f774513 13327 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
13328 bf_get_le32(lpfc_eqe_major_code, eqe),
13329 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
13330 return;
13331 }
13332
67d12733
JS
13333 /* Get the reference to the corresponding CQ */
13334 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13335
2d7dbc4c
JS
13336 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13337 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13338 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13339 /* Process NVMET unsol rcv */
13340 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13341 goto process_cq;
13342 }
67d12733
JS
13343 }
13344
895427bd
JS
13345 if (phba->sli4_hba.nvme_cq_map &&
13346 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
f358dd0c 13347 /* Process NVME / NVMET command completion */
895427bd
JS
13348 cq = phba->sli4_hba.nvme_cq[qidx];
13349 goto process_cq;
2e90f4b5 13350 }
67d12733 13351
895427bd
JS
13352 if (phba->sli4_hba.fcp_cq_map &&
13353 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13354 /* Process FCP command completion */
13355 cq = phba->sli4_hba.fcp_cq[qidx];
13356 goto process_cq;
2e90f4b5 13357 }
895427bd
JS
13358
13359 if (phba->sli4_hba.nvmels_cq &&
13360 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13361 /* Process NVME unsol rcv */
13362 cq = phba->sli4_hba.nvmels_cq;
13363 }
13364
13365 /* Otherwise this is a Slow path event */
13366 if (cq == NULL) {
13367 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
4f774513
JS
13368 return;
13369 }
13370
895427bd 13371process_cq:
4f774513
JS
13372 if (unlikely(cqid != cq->queue_id)) {
13373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13374 "0368 Miss-matched fast-path completion "
13375 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13376 cqid, cq->queue_id);
13377 return;
13378 }
13379
895427bd
JS
13380 /* Save EQ associated with this CQ */
13381 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13382
4f774513
JS
13383 /* Process all the entries to the CQ */
13384 while ((cqe = lpfc_sli4_cq_get(cq))) {
895427bd 13385 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
73d91e50 13386 if (!(++ecount % cq->entry_repost))
4f774513
JS
13387 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13388 }
13389
b84daac9
JS
13390 /* Track the max number of CQEs processed in 1 EQ */
13391 if (ecount > cq->CQ_max_cqe)
13392 cq->CQ_max_cqe = ecount;
13393
4f774513
JS
13394 /* Catch the no cq entry condition */
13395 if (unlikely(ecount == 0))
13396 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13397 "0369 No entry from fast-path completion "
13398 "queue fcpcqid=%d\n", cq->queue_id);
13399
13400 /* In any case, flash and re-arm the CQ */
13401 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13402
13403 /* wake up worker thread if there are works to be done */
13404 if (workposted)
13405 lpfc_worker_wake_up(phba);
13406}
13407
13408static void
13409lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13410{
13411 struct lpfc_eqe *eqe;
13412
13413 /* walk all the EQ entries and drop on the floor */
13414 while ((eqe = lpfc_sli4_eq_get(eq)))
13415 ;
13416
13417 /* Clear and re-arm the EQ */
13418 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13419}
13420
1ba981fd
JS
13421
13422/**
13423 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13424 * entry
13425 * @phba: Pointer to HBA context object.
13426 * @eqe: Pointer to fast-path event queue entry.
13427 *
13428 * This routine process a event queue entry from the Flash Optimized Fabric
13429 * event queue. It will check the MajorCode and MinorCode to determine this
13430 * is for a completion event on a completion queue, if not, an error shall be
13431 * logged and just return. Otherwise, it will get to the corresponding
13432 * completion queue and process all the entries on the completion queue, rearm
13433 * the completion queue, and then return.
13434 **/
13435static void
13436lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13437{
13438 struct lpfc_queue *cq;
13439 struct lpfc_cqe *cqe;
13440 bool workposted = false;
13441 uint16_t cqid;
13442 int ecount = 0;
13443
13444 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13445 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13446 "9147 Not a valid completion "
13447 "event: majorcode=x%x, minorcode=x%x\n",
13448 bf_get_le32(lpfc_eqe_major_code, eqe),
13449 bf_get_le32(lpfc_eqe_minor_code, eqe));
13450 return;
13451 }
13452
13453 /* Get the reference to the corresponding CQ */
13454 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13455
13456 /* Next check for OAS */
13457 cq = phba->sli4_hba.oas_cq;
13458 if (unlikely(!cq)) {
13459 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13461 "9148 OAS completion queue "
13462 "does not exist\n");
13463 return;
13464 }
13465
13466 if (unlikely(cqid != cq->queue_id)) {
13467 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13468 "9149 Miss-matched fast-path compl "
13469 "queue id: eqcqid=%d, fcpcqid=%d\n",
13470 cqid, cq->queue_id);
13471 return;
13472 }
13473
13474 /* Process all the entries to the OAS CQ */
13475 while ((cqe = lpfc_sli4_cq_get(cq))) {
895427bd 13476 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
1ba981fd
JS
13477 if (!(++ecount % cq->entry_repost))
13478 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13479 }
13480
13481 /* Track the max number of CQEs processed in 1 EQ */
13482 if (ecount > cq->CQ_max_cqe)
13483 cq->CQ_max_cqe = ecount;
13484
13485 /* Catch the no cq entry condition */
13486 if (unlikely(ecount == 0))
13487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13488 "9153 No entry from fast-path completion "
13489 "queue fcpcqid=%d\n", cq->queue_id);
13490
13491 /* In any case, flash and re-arm the CQ */
13492 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13493
13494 /* wake up worker thread if there are works to be done */
13495 if (workposted)
13496 lpfc_worker_wake_up(phba);
13497}
13498
13499/**
13500 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13501 * @irq: Interrupt number.
13502 * @dev_id: The device context pointer.
13503 *
13504 * This function is directly called from the PCI layer as an interrupt
13505 * service routine when device with SLI-4 interface spec is enabled with
13506 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13507 * IOCB ring event in the HBA. However, when the device is enabled with either
13508 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13509 * device-level interrupt handler. When the PCI slot is in error recovery
13510 * or the HBA is undergoing initialization, the interrupt handler will not
13511 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13512 * the intrrupt context. This function is called without any lock held.
13513 * It gets the hbalock to access and update SLI data structures. Note that,
13514 * the EQ to CQ are one-to-one map such that the EQ index is
13515 * equal to that of CQ index.
13516 *
13517 * This function returns IRQ_HANDLED when interrupt is handled else it
13518 * returns IRQ_NONE.
13519 **/
13520irqreturn_t
13521lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13522{
13523 struct lpfc_hba *phba;
895427bd 13524 struct lpfc_hba_eq_hdl *hba_eq_hdl;
1ba981fd
JS
13525 struct lpfc_queue *eq;
13526 struct lpfc_eqe *eqe;
13527 unsigned long iflag;
13528 int ecount = 0;
1ba981fd
JS
13529
13530 /* Get the driver's phba structure from the dev_id */
895427bd
JS
13531 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13532 phba = hba_eq_hdl->phba;
1ba981fd
JS
13533
13534 if (unlikely(!phba))
13535 return IRQ_NONE;
13536
13537 /* Get to the EQ struct associated with this vector */
13538 eq = phba->sli4_hba.fof_eq;
13539 if (unlikely(!eq))
13540 return IRQ_NONE;
13541
13542 /* Check device state for handling interrupt */
13543 if (unlikely(lpfc_intr_state_check(phba))) {
13544 eq->EQ_badstate++;
13545 /* Check again for link_state with lock held */
13546 spin_lock_irqsave(&phba->hbalock, iflag);
13547 if (phba->link_state < LPFC_LINK_DOWN)
13548 /* Flush, clear interrupt, and rearm the EQ */
13549 lpfc_sli4_eq_flush(phba, eq);
13550 spin_unlock_irqrestore(&phba->hbalock, iflag);
13551 return IRQ_NONE;
13552 }
13553
13554 /*
13555 * Process all the event on FCP fast-path EQ
13556 */
13557 while ((eqe = lpfc_sli4_eq_get(eq))) {
13558 lpfc_sli4_fof_handle_eqe(phba, eqe);
13559 if (!(++ecount % eq->entry_repost))
13560 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
13561 eq->EQ_processed++;
13562 }
13563
13564 /* Track the max number of EQEs processed in 1 intr */
13565 if (ecount > eq->EQ_max_eqe)
13566 eq->EQ_max_eqe = ecount;
13567
13568
13569 if (unlikely(ecount == 0)) {
13570 eq->EQ_no_entry++;
13571
13572 if (phba->intr_type == MSIX)
13573 /* MSI-X treated interrupt served as no EQ share INT */
13574 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13575 "9145 MSI-X interrupt with no EQE\n");
13576 else {
13577 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13578 "9146 ISR interrupt with no EQE\n");
13579 /* Non MSI-X treated on interrupt as EQ share INT */
13580 return IRQ_NONE;
13581 }
13582 }
13583 /* Always clear and re-arm the fast-path EQ */
13584 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13585 return IRQ_HANDLED;
13586}
13587
4f774513 13588/**
67d12733 13589 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
13590 * @irq: Interrupt number.
13591 * @dev_id: The device context pointer.
13592 *
13593 * This function is directly called from the PCI layer as an interrupt
13594 * service routine when device with SLI-4 interface spec is enabled with
13595 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13596 * ring event in the HBA. However, when the device is enabled with either
13597 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13598 * device-level interrupt handler. When the PCI slot is in error recovery
13599 * or the HBA is undergoing initialization, the interrupt handler will not
13600 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13601 * the intrrupt context. This function is called without any lock held.
13602 * It gets the hbalock to access and update SLI data structures. Note that,
13603 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13604 * equal to that of FCP CQ index.
13605 *
67d12733
JS
13606 * The link attention and ELS ring attention events are handled
13607 * by the worker thread. The interrupt handler signals the worker thread
13608 * and returns for these events. This function is called without any lock
13609 * held. It gets the hbalock to access and update SLI data structures.
13610 *
4f774513
JS
13611 * This function returns IRQ_HANDLED when interrupt is handled else it
13612 * returns IRQ_NONE.
13613 **/
13614irqreturn_t
67d12733 13615lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
13616{
13617 struct lpfc_hba *phba;
895427bd 13618 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513
JS
13619 struct lpfc_queue *fpeq;
13620 struct lpfc_eqe *eqe;
13621 unsigned long iflag;
13622 int ecount = 0;
895427bd 13623 int hba_eqidx;
4f774513
JS
13624
13625 /* Get the driver's phba structure from the dev_id */
895427bd
JS
13626 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13627 phba = hba_eq_hdl->phba;
13628 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
13629
13630 if (unlikely(!phba))
13631 return IRQ_NONE;
67d12733 13632 if (unlikely(!phba->sli4_hba.hba_eq))
5350d872 13633 return IRQ_NONE;
4f774513
JS
13634
13635 /* Get to the EQ struct associated with this vector */
895427bd 13636 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
2e90f4b5
JS
13637 if (unlikely(!fpeq))
13638 return IRQ_NONE;
4f774513 13639
bd2cdd5e
JS
13640#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13641 if (phba->ktime_on)
13642 fpeq->isr_timestamp = ktime_get_ns();
13643#endif
13644
ba20c853 13645 if (lpfc_fcp_look_ahead) {
895427bd 13646 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
ba20c853
JS
13647 lpfc_sli4_eq_clr_intr(fpeq);
13648 else {
895427bd 13649 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
13650 return IRQ_NONE;
13651 }
13652 }
13653
4f774513
JS
13654 /* Check device state for handling interrupt */
13655 if (unlikely(lpfc_intr_state_check(phba))) {
b84daac9 13656 fpeq->EQ_badstate++;
4f774513
JS
13657 /* Check again for link_state with lock held */
13658 spin_lock_irqsave(&phba->hbalock, iflag);
13659 if (phba->link_state < LPFC_LINK_DOWN)
13660 /* Flush, clear interrupt, and rearm the EQ */
13661 lpfc_sli4_eq_flush(phba, fpeq);
13662 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853 13663 if (lpfc_fcp_look_ahead)
895427bd 13664 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
4f774513
JS
13665 return IRQ_NONE;
13666 }
13667
13668 /*
13669 * Process all the event on FCP fast-path EQ
13670 */
13671 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
eb016566
JS
13672 if (eqe == NULL)
13673 break;
13674
895427bd 13675 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
73d91e50 13676 if (!(++ecount % fpeq->entry_repost))
4f774513 13677 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
b84daac9 13678 fpeq->EQ_processed++;
4f774513
JS
13679 }
13680
b84daac9
JS
13681 /* Track the max number of EQEs processed in 1 intr */
13682 if (ecount > fpeq->EQ_max_eqe)
13683 fpeq->EQ_max_eqe = ecount;
13684
4f774513
JS
13685 /* Always clear and re-arm the fast-path EQ */
13686 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13687
13688 if (unlikely(ecount == 0)) {
b84daac9 13689 fpeq->EQ_no_entry++;
ba20c853
JS
13690
13691 if (lpfc_fcp_look_ahead) {
895427bd 13692 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
13693 return IRQ_NONE;
13694 }
13695
4f774513
JS
13696 if (phba->intr_type == MSIX)
13697 /* MSI-X treated interrupt served as no EQ share INT */
13698 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13699 "0358 MSI-X interrupt with no EQE\n");
13700 else
13701 /* Non MSI-X treated on interrupt as EQ share INT */
13702 return IRQ_NONE;
13703 }
13704
ba20c853 13705 if (lpfc_fcp_look_ahead)
895427bd
JS
13706 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13707
4f774513
JS
13708 return IRQ_HANDLED;
13709} /* lpfc_sli4_fp_intr_handler */
13710
13711/**
13712 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13713 * @irq: Interrupt number.
13714 * @dev_id: The device context pointer.
13715 *
13716 * This function is the device-level interrupt handler to device with SLI-4
13717 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13718 * interrupt mode is enabled and there is an event in the HBA which requires
13719 * driver attention. This function invokes the slow-path interrupt attention
13720 * handling function and fast-path interrupt attention handling function in
13721 * turn to process the relevant HBA attention events. This function is called
13722 * without any lock held. It gets the hbalock to access and update SLI data
13723 * structures.
13724 *
13725 * This function returns IRQ_HANDLED when interrupt is handled, else it
13726 * returns IRQ_NONE.
13727 **/
13728irqreturn_t
13729lpfc_sli4_intr_handler(int irq, void *dev_id)
13730{
13731 struct lpfc_hba *phba;
67d12733
JS
13732 irqreturn_t hba_irq_rc;
13733 bool hba_handled = false;
895427bd 13734 int qidx;
4f774513
JS
13735
13736 /* Get the driver's phba structure from the dev_id */
13737 phba = (struct lpfc_hba *)dev_id;
13738
13739 if (unlikely(!phba))
13740 return IRQ_NONE;
13741
4f774513
JS
13742 /*
13743 * Invoke fast-path host attention interrupt handling as appropriate.
13744 */
895427bd 13745 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
67d12733 13746 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 13747 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733
JS
13748 if (hba_irq_rc == IRQ_HANDLED)
13749 hba_handled |= true;
4f774513
JS
13750 }
13751
1ba981fd
JS
13752 if (phba->cfg_fof) {
13753 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
895427bd 13754 &phba->sli4_hba.hba_eq_hdl[qidx]);
1ba981fd
JS
13755 if (hba_irq_rc == IRQ_HANDLED)
13756 hba_handled |= true;
13757 }
13758
67d12733 13759 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
13760} /* lpfc_sli4_intr_handler */
13761
13762/**
13763 * lpfc_sli4_queue_free - free a queue structure and associated memory
13764 * @queue: The queue structure to free.
13765 *
b595076a 13766 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
13767 * the host resident queue. This function must be called after destroying the
13768 * queue on the HBA.
13769 **/
13770void
13771lpfc_sli4_queue_free(struct lpfc_queue *queue)
13772{
13773 struct lpfc_dmabuf *dmabuf;
13774
13775 if (!queue)
13776 return;
13777
13778 while (!list_empty(&queue->page_list)) {
13779 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13780 list);
49198b37 13781 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
4f774513
JS
13782 dmabuf->virt, dmabuf->phys);
13783 kfree(dmabuf);
13784 }
895427bd
JS
13785 if (queue->rqbp) {
13786 lpfc_free_rq_buffer(queue->phba, queue);
13787 kfree(queue->rqbp);
13788 }
d1f525aa
JS
13789
13790 if (!list_empty(&queue->wq_list))
13791 list_del(&queue->wq_list);
13792
4f774513
JS
13793 kfree(queue);
13794 return;
13795}
13796
13797/**
13798 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13799 * @phba: The HBA that this queue is being created on.
13800 * @entry_size: The size of each queue entry for this queue.
13801 * @entry count: The number of entries that this queue will handle.
13802 *
13803 * This function allocates a queue structure and the DMAable memory used for
13804 * the host resident queue. This function must be called before creating the
13805 * queue on the HBA.
13806 **/
13807struct lpfc_queue *
13808lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13809 uint32_t entry_count)
13810{
13811 struct lpfc_queue *queue;
13812 struct lpfc_dmabuf *dmabuf;
13813 int x, total_qe_count;
13814 void *dma_pointer;
cb5172ea 13815 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 13816
cb5172ea
JS
13817 if (!phba->sli4_hba.pc_sli4_params.supported)
13818 hw_page_size = SLI4_PAGE_SIZE;
13819
4f774513
JS
13820 queue = kzalloc(sizeof(struct lpfc_queue) +
13821 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13822 if (!queue)
13823 return NULL;
cb5172ea
JS
13824 queue->page_count = (ALIGN(entry_size * entry_count,
13825 hw_page_size))/hw_page_size;
895427bd
JS
13826
13827 /* If needed, Adjust page count to match the max the adapter supports */
13828 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
13829 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13830
4f774513 13831 INIT_LIST_HEAD(&queue->list);
895427bd 13832 INIT_LIST_HEAD(&queue->wq_list);
4f774513
JS
13833 INIT_LIST_HEAD(&queue->page_list);
13834 INIT_LIST_HEAD(&queue->child_list);
13835 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13836 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13837 if (!dmabuf)
13838 goto out_fail;
1aee383d
JP
13839 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13840 hw_page_size, &dmabuf->phys,
13841 GFP_KERNEL);
4f774513
JS
13842 if (!dmabuf->virt) {
13843 kfree(dmabuf);
13844 goto out_fail;
13845 }
13846 dmabuf->buffer_tag = x;
13847 list_add_tail(&dmabuf->list, &queue->page_list);
13848 /* initialize queue's entry array */
13849 dma_pointer = dmabuf->virt;
13850 for (; total_qe_count < entry_count &&
cb5172ea 13851 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
13852 total_qe_count++, dma_pointer += entry_size) {
13853 queue->qe[total_qe_count].address = dma_pointer;
13854 }
13855 }
13856 queue->entry_size = entry_size;
13857 queue->entry_count = entry_count;
73d91e50
JS
13858
13859 /*
13860 * entry_repost is calculated based on the number of entries in the
13861 * queue. This works out except for RQs. If buffers are NOT initially
13862 * posted for every RQE, entry_repost should be adjusted accordingly.
13863 */
13864 queue->entry_repost = (entry_count >> 3);
13865 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13866 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
4f774513
JS
13867 queue->phba = phba;
13868
13869 return queue;
13870out_fail:
13871 lpfc_sli4_queue_free(queue);
13872 return NULL;
13873}
13874
962bc51b
JS
13875/**
13876 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13877 * @phba: HBA structure that indicates port to create a queue on.
13878 * @pci_barset: PCI BAR set flag.
13879 *
13880 * This function shall perform iomap of the specified PCI BAR address to host
13881 * memory address if not already done so and return it. The returned host
13882 * memory address can be NULL.
13883 */
13884static void __iomem *
13885lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13886{
962bc51b
JS
13887 if (!phba->pcidev)
13888 return NULL;
962bc51b
JS
13889
13890 switch (pci_barset) {
13891 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
13892 return phba->pci_bar0_memmap_p;
13893 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
13894 return phba->pci_bar2_memmap_p;
13895 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
13896 return phba->pci_bar4_memmap_p;
13897 default:
13898 break;
13899 }
13900 return NULL;
13901}
13902
173edbb2 13903/**
895427bd 13904 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
173edbb2
JS
13905 * @phba: HBA structure that indicates port to create a queue on.
13906 * @startq: The starting FCP EQ to modify
13907 *
13908 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
43140ca6
JS
13909 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
13910 * updated in one mailbox command.
173edbb2
JS
13911 *
13912 * The @phba struct is used to send mailbox command to HBA. The @startq
13913 * is used to get the starting FCP EQ to change.
13914 * This function is asynchronous and will wait for the mailbox
13915 * command to finish before continuing.
13916 *
13917 * On success this function will return a zero. If unable to allocate enough
13918 * memory this function will return -ENOMEM. If the queue create mailbox command
13919 * fails this function will return -ENXIO.
13920 **/
a2fc4aef 13921int
895427bd 13922lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
173edbb2
JS
13923{
13924 struct lpfc_mbx_modify_eq_delay *eq_delay;
13925 LPFC_MBOXQ_t *mbox;
13926 struct lpfc_queue *eq;
13927 int cnt, rc, length, status = 0;
13928 uint32_t shdr_status, shdr_add_status;
ee02006b 13929 uint32_t result;
895427bd 13930 int qidx;
173edbb2
JS
13931 union lpfc_sli4_cfg_shdr *shdr;
13932 uint16_t dmult;
13933
895427bd 13934 if (startq >= phba->io_channel_irqs)
173edbb2
JS
13935 return 0;
13936
13937 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13938 if (!mbox)
13939 return -ENOMEM;
13940 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13941 sizeof(struct lpfc_sli4_cfg_mhdr));
13942 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13943 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13944 length, LPFC_SLI4_MBX_EMBED);
13945 eq_delay = &mbox->u.mqe.un.eq_delay;
13946
13947 /* Calculate delay multiper from maximum interrupt per second */
895427bd
JS
13948 result = phba->cfg_fcp_imax / phba->io_channel_irqs;
13949 if (result > LPFC_DMULT_CONST || result == 0)
ee02006b
JS
13950 dmult = 0;
13951 else
13952 dmult = LPFC_DMULT_CONST/result - 1;
173edbb2
JS
13953
13954 cnt = 0;
895427bd
JS
13955 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
13956 eq = phba->sli4_hba.hba_eq[qidx];
173edbb2
JS
13957 if (!eq)
13958 continue;
13959 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13960 eq_delay->u.request.eq[cnt].phase = 0;
13961 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13962 cnt++;
43140ca6 13963 if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
173edbb2
JS
13964 break;
13965 }
13966 eq_delay->u.request.num_eq = cnt;
13967
13968 mbox->vport = phba->pport;
13969 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13970 mbox->context1 = NULL;
13971 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13972 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13973 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13974 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13975 if (shdr_status || shdr_add_status || rc) {
13976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13977 "2512 MODIFY_EQ_DELAY mailbox failed with "
13978 "status x%x add_status x%x, mbx status x%x\n",
13979 shdr_status, shdr_add_status, rc);
13980 status = -ENXIO;
13981 }
13982 mempool_free(mbox, phba->mbox_mem_pool);
13983 return status;
13984}
13985
4f774513
JS
13986/**
13987 * lpfc_eq_create - Create an Event Queue on the HBA
13988 * @phba: HBA structure that indicates port to create a queue on.
13989 * @eq: The queue structure to use to create the event queue.
13990 * @imax: The maximum interrupt per second limit.
13991 *
13992 * This function creates an event queue, as detailed in @eq, on a port,
13993 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13994 *
13995 * The @phba struct is used to send mailbox command to HBA. The @eq struct
13996 * is used to get the entry count and entry size that are necessary to
13997 * determine the number of pages to allocate and use for this queue. This
13998 * function will send the EQ_CREATE mailbox command to the HBA to setup the
13999 * event queue. This function is asynchronous and will wait for the mailbox
14000 * command to finish before continuing.
14001 *
14002 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14003 * memory this function will return -ENOMEM. If the queue create mailbox command
14004 * fails this function will return -ENXIO.
4f774513 14005 **/
a2fc4aef 14006int
ee02006b 14007lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14008{
14009 struct lpfc_mbx_eq_create *eq_create;
14010 LPFC_MBOXQ_t *mbox;
14011 int rc, length, status = 0;
14012 struct lpfc_dmabuf *dmabuf;
14013 uint32_t shdr_status, shdr_add_status;
14014 union lpfc_sli4_cfg_shdr *shdr;
14015 uint16_t dmult;
49198b37
JS
14016 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14017
2e90f4b5
JS
14018 /* sanity check on queue memory */
14019 if (!eq)
14020 return -ENODEV;
49198b37
JS
14021 if (!phba->sli4_hba.pc_sli4_params.supported)
14022 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14023
14024 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14025 if (!mbox)
14026 return -ENOMEM;
14027 length = (sizeof(struct lpfc_mbx_eq_create) -
14028 sizeof(struct lpfc_sli4_cfg_mhdr));
14029 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14030 LPFC_MBOX_OPCODE_EQ_CREATE,
14031 length, LPFC_SLI4_MBX_EMBED);
14032 eq_create = &mbox->u.mqe.un.eq_create;
14033 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14034 eq->page_count);
14035 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14036 LPFC_EQE_SIZE);
14037 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
2c9c5a00
JS
14038 /* don't setup delay multiplier using EQ_CREATE */
14039 dmult = 0;
4f774513
JS
14040 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14041 dmult);
14042 switch (eq->entry_count) {
14043 default:
14044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14045 "0360 Unsupported EQ count. (%d)\n",
14046 eq->entry_count);
14047 if (eq->entry_count < 256)
14048 return -EINVAL;
14049 /* otherwise default to smallest count (drop through) */
14050 case 256:
14051 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14052 LPFC_EQ_CNT_256);
14053 break;
14054 case 512:
14055 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14056 LPFC_EQ_CNT_512);
14057 break;
14058 case 1024:
14059 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14060 LPFC_EQ_CNT_1024);
14061 break;
14062 case 2048:
14063 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14064 LPFC_EQ_CNT_2048);
14065 break;
14066 case 4096:
14067 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14068 LPFC_EQ_CNT_4096);
14069 break;
14070 }
14071 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14072 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14073 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14074 putPaddrLow(dmabuf->phys);
14075 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14076 putPaddrHigh(dmabuf->phys);
14077 }
14078 mbox->vport = phba->pport;
14079 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14080 mbox->context1 = NULL;
14081 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14082 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14083 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14084 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14085 if (shdr_status || shdr_add_status || rc) {
14086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14087 "2500 EQ_CREATE mailbox failed with "
14088 "status x%x add_status x%x, mbx status x%x\n",
14089 shdr_status, shdr_add_status, rc);
14090 status = -ENXIO;
14091 }
14092 eq->type = LPFC_EQ;
14093 eq->subtype = LPFC_NONE;
14094 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14095 if (eq->queue_id == 0xFFFF)
14096 status = -ENXIO;
14097 eq->host_index = 0;
14098 eq->hba_index = 0;
14099
8fa38513 14100 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14101 return status;
14102}
14103
14104/**
14105 * lpfc_cq_create - Create a Completion Queue on the HBA
14106 * @phba: HBA structure that indicates port to create a queue on.
14107 * @cq: The queue structure to use to create the completion queue.
14108 * @eq: The event queue to bind this completion queue to.
14109 *
14110 * This function creates a completion queue, as detailed in @wq, on a port,
14111 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14112 *
14113 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14114 * is used to get the entry count and entry size that are necessary to
14115 * determine the number of pages to allocate and use for this queue. The @eq
14116 * is used to indicate which event queue to bind this completion queue to. This
14117 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14118 * completion queue. This function is asynchronous and will wait for the mailbox
14119 * command to finish before continuing.
14120 *
14121 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14122 * memory this function will return -ENOMEM. If the queue create mailbox command
14123 * fails this function will return -ENXIO.
4f774513 14124 **/
a2fc4aef 14125int
4f774513
JS
14126lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14127 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14128{
14129 struct lpfc_mbx_cq_create *cq_create;
14130 struct lpfc_dmabuf *dmabuf;
14131 LPFC_MBOXQ_t *mbox;
14132 int rc, length, status = 0;
14133 uint32_t shdr_status, shdr_add_status;
14134 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
14135 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14136
2e90f4b5
JS
14137 /* sanity check on queue memory */
14138 if (!cq || !eq)
14139 return -ENODEV;
49198b37
JS
14140 if (!phba->sli4_hba.pc_sli4_params.supported)
14141 hw_page_size = SLI4_PAGE_SIZE;
14142
4f774513
JS
14143 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14144 if (!mbox)
14145 return -ENOMEM;
14146 length = (sizeof(struct lpfc_mbx_cq_create) -
14147 sizeof(struct lpfc_sli4_cfg_mhdr));
14148 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14149 LPFC_MBOX_OPCODE_CQ_CREATE,
14150 length, LPFC_SLI4_MBX_EMBED);
14151 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 14152 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
14153 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14154 cq->page_count);
14155 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14156 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
14157 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14158 phba->sli4_hba.pc_sli4_params.cqv);
14159 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
c31098ce
JS
14160 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
14161 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
5a6f133e
JS
14162 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14163 eq->queue_id);
14164 } else {
14165 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14166 eq->queue_id);
14167 }
4f774513
JS
14168 switch (cq->entry_count) {
14169 default:
14170 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee
JS
14171 "0361 Unsupported CQ count: "
14172 "entry cnt %d sz %d pg cnt %d repost %d\n",
14173 cq->entry_count, cq->entry_size,
14174 cq->page_count, cq->entry_repost);
4f4c1863
JS
14175 if (cq->entry_count < 256) {
14176 status = -EINVAL;
14177 goto out;
14178 }
4f774513
JS
14179 /* otherwise default to smallest count (drop through) */
14180 case 256:
14181 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14182 LPFC_CQ_CNT_256);
14183 break;
14184 case 512:
14185 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14186 LPFC_CQ_CNT_512);
14187 break;
14188 case 1024:
14189 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14190 LPFC_CQ_CNT_1024);
14191 break;
14192 }
14193 list_for_each_entry(dmabuf, &cq->page_list, list) {
49198b37 14194 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14195 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14196 putPaddrLow(dmabuf->phys);
14197 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14198 putPaddrHigh(dmabuf->phys);
14199 }
14200 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14201
14202 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
14203 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14204 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14205 if (shdr_status || shdr_add_status || rc) {
14206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14207 "2501 CQ_CREATE mailbox failed with "
14208 "status x%x add_status x%x, mbx status x%x\n",
14209 shdr_status, shdr_add_status, rc);
14210 status = -ENXIO;
14211 goto out;
14212 }
14213 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14214 if (cq->queue_id == 0xFFFF) {
14215 status = -ENXIO;
14216 goto out;
14217 }
14218 /* link the cq onto the parent eq child list */
14219 list_add_tail(&cq->list, &eq->child_list);
14220 /* Set up completion queue's type and subtype */
14221 cq->type = type;
14222 cq->subtype = subtype;
14223 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 14224 cq->assoc_qid = eq->queue_id;
4f774513
JS
14225 cq->host_index = 0;
14226 cq->hba_index = 0;
4f774513 14227
8fa38513
JS
14228out:
14229 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14230 return status;
14231}
14232
2d7dbc4c
JS
14233/**
14234 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14235 * @phba: HBA structure that indicates port to create a queue on.
14236 * @cqp: The queue structure array to use to create the completion queues.
14237 * @eqp: The event queue array to bind these completion queues to.
14238 *
14239 * This function creates a set of completion queue, s to support MRQ
14240 * as detailed in @cqp, on a port,
14241 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14242 *
14243 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14244 * is used to get the entry count and entry size that are necessary to
14245 * determine the number of pages to allocate and use for this queue. The @eq
14246 * is used to indicate which event queue to bind this completion queue to. This
14247 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14248 * completion queue. This function is asynchronous and will wait for the mailbox
14249 * command to finish before continuing.
14250 *
14251 * On success this function will return a zero. If unable to allocate enough
14252 * memory this function will return -ENOMEM. If the queue create mailbox command
14253 * fails this function will return -ENXIO.
14254 **/
14255int
14256lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14257 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14258{
14259 struct lpfc_queue *cq;
14260 struct lpfc_queue *eq;
14261 struct lpfc_mbx_cq_create_set *cq_set;
14262 struct lpfc_dmabuf *dmabuf;
14263 LPFC_MBOXQ_t *mbox;
14264 int rc, length, alloclen, status = 0;
14265 int cnt, idx, numcq, page_idx = 0;
14266 uint32_t shdr_status, shdr_add_status;
14267 union lpfc_sli4_cfg_shdr *shdr;
14268 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14269
14270 /* sanity check on queue memory */
14271 numcq = phba->cfg_nvmet_mrq;
14272 if (!cqp || !eqp || !numcq)
14273 return -ENODEV;
14274 if (!phba->sli4_hba.pc_sli4_params.supported)
14275 hw_page_size = SLI4_PAGE_SIZE;
14276
14277 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14278 if (!mbox)
14279 return -ENOMEM;
14280
14281 length = sizeof(struct lpfc_mbx_cq_create_set);
14282 length += ((numcq * cqp[0]->page_count) *
14283 sizeof(struct dma_address));
14284 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14285 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14286 LPFC_SLI4_MBX_NEMBED);
14287 if (alloclen < length) {
14288 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14289 "3098 Allocated DMA memory size (%d) is "
14290 "less than the requested DMA memory size "
14291 "(%d)\n", alloclen, length);
14292 status = -ENOMEM;
14293 goto out;
14294 }
14295 cq_set = mbox->sge_array->addr[0];
14296 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14297 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14298
14299 for (idx = 0; idx < numcq; idx++) {
14300 cq = cqp[idx];
14301 eq = eqp[idx];
14302 if (!cq || !eq) {
14303 status = -ENOMEM;
14304 goto out;
14305 }
14306
14307 switch (idx) {
14308 case 0:
14309 bf_set(lpfc_mbx_cq_create_set_page_size,
14310 &cq_set->u.request,
14311 (hw_page_size / SLI4_PAGE_SIZE));
14312 bf_set(lpfc_mbx_cq_create_set_num_pages,
14313 &cq_set->u.request, cq->page_count);
14314 bf_set(lpfc_mbx_cq_create_set_evt,
14315 &cq_set->u.request, 1);
14316 bf_set(lpfc_mbx_cq_create_set_valid,
14317 &cq_set->u.request, 1);
14318 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14319 &cq_set->u.request, 0);
14320 bf_set(lpfc_mbx_cq_create_set_num_cq,
14321 &cq_set->u.request, numcq);
14322 switch (cq->entry_count) {
14323 default:
14324 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14325 "3118 Bad CQ count. (%d)\n",
14326 cq->entry_count);
14327 if (cq->entry_count < 256) {
14328 status = -EINVAL;
14329 goto out;
14330 }
14331 /* otherwise default to smallest (drop thru) */
14332 case 256:
14333 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14334 &cq_set->u.request, LPFC_CQ_CNT_256);
14335 break;
14336 case 512:
14337 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14338 &cq_set->u.request, LPFC_CQ_CNT_512);
14339 break;
14340 case 1024:
14341 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14342 &cq_set->u.request, LPFC_CQ_CNT_1024);
14343 break;
14344 }
14345 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14346 &cq_set->u.request, eq->queue_id);
14347 break;
14348 case 1:
14349 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14350 &cq_set->u.request, eq->queue_id);
14351 break;
14352 case 2:
14353 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14354 &cq_set->u.request, eq->queue_id);
14355 break;
14356 case 3:
14357 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14358 &cq_set->u.request, eq->queue_id);
14359 break;
14360 case 4:
14361 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14362 &cq_set->u.request, eq->queue_id);
14363 break;
14364 case 5:
14365 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14366 &cq_set->u.request, eq->queue_id);
14367 break;
14368 case 6:
14369 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14370 &cq_set->u.request, eq->queue_id);
14371 break;
14372 case 7:
14373 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14374 &cq_set->u.request, eq->queue_id);
14375 break;
14376 case 8:
14377 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14378 &cq_set->u.request, eq->queue_id);
14379 break;
14380 case 9:
14381 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14382 &cq_set->u.request, eq->queue_id);
14383 break;
14384 case 10:
14385 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14386 &cq_set->u.request, eq->queue_id);
14387 break;
14388 case 11:
14389 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14390 &cq_set->u.request, eq->queue_id);
14391 break;
14392 case 12:
14393 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14394 &cq_set->u.request, eq->queue_id);
14395 break;
14396 case 13:
14397 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14398 &cq_set->u.request, eq->queue_id);
14399 break;
14400 case 14:
14401 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14402 &cq_set->u.request, eq->queue_id);
14403 break;
14404 case 15:
14405 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14406 &cq_set->u.request, eq->queue_id);
14407 break;
14408 }
14409
14410 /* link the cq onto the parent eq child list */
14411 list_add_tail(&cq->list, &eq->child_list);
14412 /* Set up completion queue's type and subtype */
14413 cq->type = type;
14414 cq->subtype = subtype;
14415 cq->assoc_qid = eq->queue_id;
14416 cq->host_index = 0;
14417 cq->hba_index = 0;
14418
14419 rc = 0;
14420 list_for_each_entry(dmabuf, &cq->page_list, list) {
14421 memset(dmabuf->virt, 0, hw_page_size);
14422 cnt = page_idx + dmabuf->buffer_tag;
14423 cq_set->u.request.page[cnt].addr_lo =
14424 putPaddrLow(dmabuf->phys);
14425 cq_set->u.request.page[cnt].addr_hi =
14426 putPaddrHigh(dmabuf->phys);
14427 rc++;
14428 }
14429 page_idx += rc;
14430 }
14431
14432 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14433
14434 /* The IOCTL status is embedded in the mailbox subheader. */
14435 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14436 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14437 if (shdr_status || shdr_add_status || rc) {
14438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14439 "3119 CQ_CREATE_SET mailbox failed with "
14440 "status x%x add_status x%x, mbx status x%x\n",
14441 shdr_status, shdr_add_status, rc);
14442 status = -ENXIO;
14443 goto out;
14444 }
14445 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14446 if (rc == 0xFFFF) {
14447 status = -ENXIO;
14448 goto out;
14449 }
14450
14451 for (idx = 0; idx < numcq; idx++) {
14452 cq = cqp[idx];
14453 cq->queue_id = rc + idx;
14454 }
14455
14456out:
14457 lpfc_sli4_mbox_cmd_free(phba, mbox);
14458 return status;
14459}
14460
b19a061a
JS
14461/**
14462 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14463 * @phba: HBA structure that indicates port to create a queue on.
14464 * @mq: The queue structure to use to create the mailbox queue.
14465 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14466 * @cq: The completion queue to associate with this cq.
14467 *
14468 * This function provides failback (fb) functionality when the
14469 * mq_create_ext fails on older FW generations. It's purpose is identical
14470 * to mq_create_ext otherwise.
14471 *
14472 * This routine cannot fail as all attributes were previously accessed and
14473 * initialized in mq_create_ext.
14474 **/
14475static void
14476lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14477 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14478{
14479 struct lpfc_mbx_mq_create *mq_create;
14480 struct lpfc_dmabuf *dmabuf;
14481 int length;
14482
14483 length = (sizeof(struct lpfc_mbx_mq_create) -
14484 sizeof(struct lpfc_sli4_cfg_mhdr));
14485 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14486 LPFC_MBOX_OPCODE_MQ_CREATE,
14487 length, LPFC_SLI4_MBX_EMBED);
14488 mq_create = &mbox->u.mqe.un.mq_create;
14489 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14490 mq->page_count);
14491 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14492 cq->queue_id);
14493 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14494 switch (mq->entry_count) {
14495 case 16:
5a6f133e
JS
14496 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14497 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
14498 break;
14499 case 32:
5a6f133e
JS
14500 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14501 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
14502 break;
14503 case 64:
5a6f133e
JS
14504 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14505 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
14506 break;
14507 case 128:
5a6f133e
JS
14508 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14509 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
14510 break;
14511 }
14512 list_for_each_entry(dmabuf, &mq->page_list, list) {
14513 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14514 putPaddrLow(dmabuf->phys);
14515 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14516 putPaddrHigh(dmabuf->phys);
14517 }
14518}
14519
04c68496
JS
14520/**
14521 * lpfc_mq_create - Create a mailbox Queue on the HBA
14522 * @phba: HBA structure that indicates port to create a queue on.
14523 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
14524 * @cq: The completion queue to associate with this cq.
14525 * @subtype: The queue's subtype.
04c68496
JS
14526 *
14527 * This function creates a mailbox queue, as detailed in @mq, on a port,
14528 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14529 *
14530 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14531 * is used to get the entry count and entry size that are necessary to
14532 * determine the number of pages to allocate and use for this queue. This
14533 * function will send the MQ_CREATE mailbox command to the HBA to setup the
14534 * mailbox queue. This function is asynchronous and will wait for the mailbox
14535 * command to finish before continuing.
14536 *
14537 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14538 * memory this function will return -ENOMEM. If the queue create mailbox command
14539 * fails this function will return -ENXIO.
04c68496 14540 **/
b19a061a 14541int32_t
04c68496
JS
14542lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14543 struct lpfc_queue *cq, uint32_t subtype)
14544{
14545 struct lpfc_mbx_mq_create *mq_create;
b19a061a 14546 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
14547 struct lpfc_dmabuf *dmabuf;
14548 LPFC_MBOXQ_t *mbox;
14549 int rc, length, status = 0;
14550 uint32_t shdr_status, shdr_add_status;
14551 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14552 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 14553
2e90f4b5
JS
14554 /* sanity check on queue memory */
14555 if (!mq || !cq)
14556 return -ENODEV;
49198b37
JS
14557 if (!phba->sli4_hba.pc_sli4_params.supported)
14558 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 14559
04c68496
JS
14560 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14561 if (!mbox)
14562 return -ENOMEM;
b19a061a 14563 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
14564 sizeof(struct lpfc_sli4_cfg_mhdr));
14565 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 14566 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 14567 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
14568
14569 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 14570 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
14571 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14572 &mq_create_ext->u.request, mq->page_count);
14573 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14574 &mq_create_ext->u.request, 1);
14575 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
14576 &mq_create_ext->u.request, 1);
14577 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14578 &mq_create_ext->u.request, 1);
70f3c073
JS
14579 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14580 &mq_create_ext->u.request, 1);
14581 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14582 &mq_create_ext->u.request, 1);
b19a061a 14583 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
14584 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14585 phba->sli4_hba.pc_sli4_params.mqv);
14586 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14587 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14588 cq->queue_id);
14589 else
14590 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14591 cq->queue_id);
04c68496
JS
14592 switch (mq->entry_count) {
14593 default:
14594 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14595 "0362 Unsupported MQ count. (%d)\n",
14596 mq->entry_count);
4f4c1863
JS
14597 if (mq->entry_count < 16) {
14598 status = -EINVAL;
14599 goto out;
14600 }
04c68496
JS
14601 /* otherwise default to smallest count (drop through) */
14602 case 16:
5a6f133e
JS
14603 bf_set(lpfc_mq_context_ring_size,
14604 &mq_create_ext->u.request.context,
14605 LPFC_MQ_RING_SIZE_16);
04c68496
JS
14606 break;
14607 case 32:
5a6f133e
JS
14608 bf_set(lpfc_mq_context_ring_size,
14609 &mq_create_ext->u.request.context,
14610 LPFC_MQ_RING_SIZE_32);
04c68496
JS
14611 break;
14612 case 64:
5a6f133e
JS
14613 bf_set(lpfc_mq_context_ring_size,
14614 &mq_create_ext->u.request.context,
14615 LPFC_MQ_RING_SIZE_64);
04c68496
JS
14616 break;
14617 case 128:
5a6f133e
JS
14618 bf_set(lpfc_mq_context_ring_size,
14619 &mq_create_ext->u.request.context,
14620 LPFC_MQ_RING_SIZE_128);
04c68496
JS
14621 break;
14622 }
14623 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 14624 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 14625 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 14626 putPaddrLow(dmabuf->phys);
b19a061a 14627 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
14628 putPaddrHigh(dmabuf->phys);
14629 }
14630 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
14631 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14632 &mq_create_ext->u.response);
14633 if (rc != MBX_SUCCESS) {
14634 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14635 "2795 MQ_CREATE_EXT failed with "
14636 "status x%x. Failback to MQ_CREATE.\n",
14637 rc);
14638 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14639 mq_create = &mbox->u.mqe.un.mq_create;
14640 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14641 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14642 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14643 &mq_create->u.response);
14644 }
14645
04c68496 14646 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
14647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14649 if (shdr_status || shdr_add_status || rc) {
14650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14651 "2502 MQ_CREATE mailbox failed with "
14652 "status x%x add_status x%x, mbx status x%x\n",
14653 shdr_status, shdr_add_status, rc);
14654 status = -ENXIO;
14655 goto out;
14656 }
04c68496
JS
14657 if (mq->queue_id == 0xFFFF) {
14658 status = -ENXIO;
14659 goto out;
14660 }
14661 mq->type = LPFC_MQ;
2a622bfb 14662 mq->assoc_qid = cq->queue_id;
04c68496
JS
14663 mq->subtype = subtype;
14664 mq->host_index = 0;
14665 mq->hba_index = 0;
14666
14667 /* link the mq onto the parent cq child list */
14668 list_add_tail(&mq->list, &cq->child_list);
14669out:
8fa38513 14670 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
14671 return status;
14672}
14673
4f774513
JS
14674/**
14675 * lpfc_wq_create - Create a Work Queue on the HBA
14676 * @phba: HBA structure that indicates port to create a queue on.
14677 * @wq: The queue structure to use to create the work queue.
14678 * @cq: The completion queue to bind this work queue to.
14679 * @subtype: The subtype of the work queue indicating its functionality.
14680 *
14681 * This function creates a work queue, as detailed in @wq, on a port, described
14682 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14683 *
14684 * The @phba struct is used to send mailbox command to HBA. The @wq struct
14685 * is used to get the entry count and entry size that are necessary to
14686 * determine the number of pages to allocate and use for this queue. The @cq
14687 * is used to indicate which completion queue to bind this work queue to. This
14688 * function will send the WQ_CREATE mailbox command to the HBA to setup the
14689 * work queue. This function is asynchronous and will wait for the mailbox
14690 * command to finish before continuing.
14691 *
14692 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14693 * memory this function will return -ENOMEM. If the queue create mailbox command
14694 * fails this function will return -ENXIO.
4f774513 14695 **/
a2fc4aef 14696int
4f774513
JS
14697lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14698 struct lpfc_queue *cq, uint32_t subtype)
14699{
14700 struct lpfc_mbx_wq_create *wq_create;
14701 struct lpfc_dmabuf *dmabuf;
14702 LPFC_MBOXQ_t *mbox;
14703 int rc, length, status = 0;
14704 uint32_t shdr_status, shdr_add_status;
14705 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14706 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 14707 struct dma_address *page;
962bc51b
JS
14708 void __iomem *bar_memmap_p;
14709 uint32_t db_offset;
14710 uint16_t pci_barset;
49198b37 14711
2e90f4b5
JS
14712 /* sanity check on queue memory */
14713 if (!wq || !cq)
14714 return -ENODEV;
49198b37
JS
14715 if (!phba->sli4_hba.pc_sli4_params.supported)
14716 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14717
14718 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14719 if (!mbox)
14720 return -ENOMEM;
14721 length = (sizeof(struct lpfc_mbx_wq_create) -
14722 sizeof(struct lpfc_sli4_cfg_mhdr));
14723 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14724 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14725 length, LPFC_SLI4_MBX_EMBED);
14726 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 14727 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
14728 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14729 wq->page_count);
14730 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14731 cq->queue_id);
0c651878
JS
14732
14733 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
14734 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14735 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 14736
0c651878
JS
14737 switch (phba->sli4_hba.pc_sli4_params.wqv) {
14738 case LPFC_Q_CREATE_VERSION_0:
14739 switch (wq->entry_size) {
14740 default:
14741 case 64:
14742 /* Nothing to do, version 0 ONLY supports 64 byte */
14743 page = wq_create->u.request.page;
14744 break;
14745 case 128:
14746 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14747 LPFC_WQ_SZ128_SUPPORT)) {
14748 status = -ERANGE;
14749 goto out;
14750 }
14751 /* If we get here the HBA MUST also support V1 and
14752 * we MUST use it
14753 */
14754 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14755 LPFC_Q_CREATE_VERSION_1);
14756
14757 bf_set(lpfc_mbx_wq_create_wqe_count,
14758 &wq_create->u.request_1, wq->entry_count);
14759 bf_set(lpfc_mbx_wq_create_wqe_size,
14760 &wq_create->u.request_1,
14761 LPFC_WQ_WQE_SIZE_128);
14762 bf_set(lpfc_mbx_wq_create_page_size,
14763 &wq_create->u.request_1,
8ea73db4 14764 LPFC_WQ_PAGE_SIZE_4096);
0c651878
JS
14765 page = wq_create->u.request_1.page;
14766 break;
14767 }
14768 break;
14769 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
14770 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14771 wq->entry_count);
3f247de7
JS
14772 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14773 LPFC_Q_CREATE_VERSION_1);
14774
5a6f133e
JS
14775 switch (wq->entry_size) {
14776 default:
14777 case 64:
14778 bf_set(lpfc_mbx_wq_create_wqe_size,
14779 &wq_create->u.request_1,
14780 LPFC_WQ_WQE_SIZE_64);
14781 break;
14782 case 128:
0c651878
JS
14783 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14784 LPFC_WQ_SZ128_SUPPORT)) {
14785 status = -ERANGE;
14786 goto out;
14787 }
5a6f133e
JS
14788 bf_set(lpfc_mbx_wq_create_wqe_size,
14789 &wq_create->u.request_1,
14790 LPFC_WQ_WQE_SIZE_128);
14791 break;
14792 }
8ea73db4
JS
14793 bf_set(lpfc_mbx_wq_create_page_size,
14794 &wq_create->u.request_1,
14795 LPFC_WQ_PAGE_SIZE_4096);
5a6f133e 14796 page = wq_create->u.request_1.page;
0c651878
JS
14797 break;
14798 default:
14799 status = -ERANGE;
14800 goto out;
5a6f133e 14801 }
0c651878 14802
4f774513 14803 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 14804 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
14805 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14806 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 14807 }
962bc51b
JS
14808
14809 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14810 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14811
4f774513
JS
14812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14813 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
14814 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14815 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14816 if (shdr_status || shdr_add_status || rc) {
14817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14818 "2503 WQ_CREATE mailbox failed with "
14819 "status x%x add_status x%x, mbx status x%x\n",
14820 shdr_status, shdr_add_status, rc);
14821 status = -ENXIO;
14822 goto out;
14823 }
14824 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14825 if (wq->queue_id == 0xFFFF) {
14826 status = -ENXIO;
14827 goto out;
14828 }
962bc51b
JS
14829 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14830 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14831 &wq_create->u.response);
14832 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14833 (wq->db_format != LPFC_DB_RING_FORMAT)) {
14834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14835 "3265 WQ[%d] doorbell format not "
14836 "supported: x%x\n", wq->queue_id,
14837 wq->db_format);
14838 status = -EINVAL;
14839 goto out;
14840 }
14841 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14842 &wq_create->u.response);
14843 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14844 if (!bar_memmap_p) {
14845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14846 "3263 WQ[%d] failed to memmap pci "
14847 "barset:x%x\n", wq->queue_id,
14848 pci_barset);
14849 status = -ENOMEM;
14850 goto out;
14851 }
14852 db_offset = wq_create->u.response.doorbell_offset;
14853 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14854 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14856 "3252 WQ[%d] doorbell offset not "
14857 "supported: x%x\n", wq->queue_id,
14858 db_offset);
14859 status = -EINVAL;
14860 goto out;
14861 }
14862 wq->db_regaddr = bar_memmap_p + db_offset;
14863 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
14864 "3264 WQ[%d]: barset:x%x, offset:x%x, "
14865 "format:x%x\n", wq->queue_id, pci_barset,
14866 db_offset, wq->db_format);
962bc51b
JS
14867 } else {
14868 wq->db_format = LPFC_DB_LIST_FORMAT;
14869 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
14870 }
895427bd
JS
14871 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
14872 if (wq->pring == NULL) {
14873 status = -ENOMEM;
14874 goto out;
14875 }
4f774513 14876 wq->type = LPFC_WQ;
2a622bfb 14877 wq->assoc_qid = cq->queue_id;
4f774513
JS
14878 wq->subtype = subtype;
14879 wq->host_index = 0;
14880 wq->hba_index = 0;
ff78d8f9 14881 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
14882
14883 /* link the wq onto the parent cq child list */
14884 list_add_tail(&wq->list, &cq->child_list);
14885out:
8fa38513 14886 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14887 return status;
14888}
14889
14890/**
14891 * lpfc_rq_create - Create a Receive Queue on the HBA
14892 * @phba: HBA structure that indicates port to create a queue on.
14893 * @hrq: The queue structure to use to create the header receive queue.
14894 * @drq: The queue structure to use to create the data receive queue.
14895 * @cq: The completion queue to bind this work queue to.
14896 *
14897 * This function creates a receive buffer queue pair , as detailed in @hrq and
14898 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
14899 * to the HBA.
14900 *
14901 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
14902 * struct is used to get the entry count that is necessary to determine the
14903 * number of pages to use for this queue. The @cq is used to indicate which
14904 * completion queue to bind received buffers that are posted to these queues to.
14905 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
14906 * receive queue pair. This function is asynchronous and will wait for the
14907 * mailbox command to finish before continuing.
14908 *
14909 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14910 * memory this function will return -ENOMEM. If the queue create mailbox command
14911 * fails this function will return -ENXIO.
4f774513 14912 **/
a2fc4aef 14913int
4f774513
JS
14914lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14915 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
14916{
14917 struct lpfc_mbx_rq_create *rq_create;
14918 struct lpfc_dmabuf *dmabuf;
14919 LPFC_MBOXQ_t *mbox;
14920 int rc, length, status = 0;
14921 uint32_t shdr_status, shdr_add_status;
14922 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14923 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
14924 void __iomem *bar_memmap_p;
14925 uint32_t db_offset;
14926 uint16_t pci_barset;
49198b37 14927
2e90f4b5
JS
14928 /* sanity check on queue memory */
14929 if (!hrq || !drq || !cq)
14930 return -ENODEV;
49198b37
JS
14931 if (!phba->sli4_hba.pc_sli4_params.supported)
14932 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14933
14934 if (hrq->entry_count != drq->entry_count)
14935 return -EINVAL;
14936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14937 if (!mbox)
14938 return -ENOMEM;
14939 length = (sizeof(struct lpfc_mbx_rq_create) -
14940 sizeof(struct lpfc_sli4_cfg_mhdr));
14941 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14942 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14943 length, LPFC_SLI4_MBX_EMBED);
14944 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
14945 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14946 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14947 phba->sli4_hba.pc_sli4_params.rqv);
14948 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14949 bf_set(lpfc_rq_context_rqe_count_1,
14950 &rq_create->u.request.context,
14951 hrq->entry_count);
14952 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
14953 bf_set(lpfc_rq_context_rqe_size,
14954 &rq_create->u.request.context,
14955 LPFC_RQE_SIZE_8);
14956 bf_set(lpfc_rq_context_page_size,
14957 &rq_create->u.request.context,
8ea73db4 14958 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
14959 } else {
14960 switch (hrq->entry_count) {
14961 default:
14962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14963 "2535 Unsupported RQ count. (%d)\n",
14964 hrq->entry_count);
4f4c1863
JS
14965 if (hrq->entry_count < 512) {
14966 status = -EINVAL;
14967 goto out;
14968 }
5a6f133e
JS
14969 /* otherwise default to smallest count (drop through) */
14970 case 512:
14971 bf_set(lpfc_rq_context_rqe_count,
14972 &rq_create->u.request.context,
14973 LPFC_RQ_RING_SIZE_512);
14974 break;
14975 case 1024:
14976 bf_set(lpfc_rq_context_rqe_count,
14977 &rq_create->u.request.context,
14978 LPFC_RQ_RING_SIZE_1024);
14979 break;
14980 case 2048:
14981 bf_set(lpfc_rq_context_rqe_count,
14982 &rq_create->u.request.context,
14983 LPFC_RQ_RING_SIZE_2048);
14984 break;
14985 case 4096:
14986 bf_set(lpfc_rq_context_rqe_count,
14987 &rq_create->u.request.context,
14988 LPFC_RQ_RING_SIZE_4096);
14989 break;
14990 }
14991 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14992 LPFC_HDR_BUF_SIZE);
4f774513
JS
14993 }
14994 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14995 cq->queue_id);
14996 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14997 hrq->page_count);
4f774513 14998 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 14999 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
15000 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15001 putPaddrLow(dmabuf->phys);
15002 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15003 putPaddrHigh(dmabuf->phys);
15004 }
962bc51b
JS
15005 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15006 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15007
4f774513
JS
15008 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15009 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15010 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15011 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15012 if (shdr_status || shdr_add_status || rc) {
15013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15014 "2504 RQ_CREATE mailbox failed with "
15015 "status x%x add_status x%x, mbx status x%x\n",
15016 shdr_status, shdr_add_status, rc);
15017 status = -ENXIO;
15018 goto out;
15019 }
15020 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15021 if (hrq->queue_id == 0xFFFF) {
15022 status = -ENXIO;
15023 goto out;
15024 }
962bc51b
JS
15025
15026 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15027 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15028 &rq_create->u.response);
15029 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15030 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15032 "3262 RQ [%d] doorbell format not "
15033 "supported: x%x\n", hrq->queue_id,
15034 hrq->db_format);
15035 status = -EINVAL;
15036 goto out;
15037 }
15038
15039 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15040 &rq_create->u.response);
15041 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15042 if (!bar_memmap_p) {
15043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15044 "3269 RQ[%d] failed to memmap pci "
15045 "barset:x%x\n", hrq->queue_id,
15046 pci_barset);
15047 status = -ENOMEM;
15048 goto out;
15049 }
15050
15051 db_offset = rq_create->u.response.doorbell_offset;
15052 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15053 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15055 "3270 RQ[%d] doorbell offset not "
15056 "supported: x%x\n", hrq->queue_id,
15057 db_offset);
15058 status = -EINVAL;
15059 goto out;
15060 }
15061 hrq->db_regaddr = bar_memmap_p + db_offset;
15062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
15063 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15064 "format:x%x\n", hrq->queue_id, pci_barset,
15065 db_offset, hrq->db_format);
962bc51b
JS
15066 } else {
15067 hrq->db_format = LPFC_DB_RING_FORMAT;
15068 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15069 }
4f774513 15070 hrq->type = LPFC_HRQ;
2a622bfb 15071 hrq->assoc_qid = cq->queue_id;
4f774513
JS
15072 hrq->subtype = subtype;
15073 hrq->host_index = 0;
15074 hrq->hba_index = 0;
61f3d4bf 15075 hrq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
15076
15077 /* now create the data queue */
15078 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15079 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15080 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
15081 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15082 phba->sli4_hba.pc_sli4_params.rqv);
15083 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15084 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 15085 &rq_create->u.request.context, hrq->entry_count);
5a6f133e 15086 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
c31098ce
JS
15087 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15088 LPFC_RQE_SIZE_8);
15089 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15090 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
15091 } else {
15092 switch (drq->entry_count) {
15093 default:
15094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15095 "2536 Unsupported RQ count. (%d)\n",
15096 drq->entry_count);
4f4c1863
JS
15097 if (drq->entry_count < 512) {
15098 status = -EINVAL;
15099 goto out;
15100 }
5a6f133e
JS
15101 /* otherwise default to smallest count (drop through) */
15102 case 512:
15103 bf_set(lpfc_rq_context_rqe_count,
15104 &rq_create->u.request.context,
15105 LPFC_RQ_RING_SIZE_512);
15106 break;
15107 case 1024:
15108 bf_set(lpfc_rq_context_rqe_count,
15109 &rq_create->u.request.context,
15110 LPFC_RQ_RING_SIZE_1024);
15111 break;
15112 case 2048:
15113 bf_set(lpfc_rq_context_rqe_count,
15114 &rq_create->u.request.context,
15115 LPFC_RQ_RING_SIZE_2048);
15116 break;
15117 case 4096:
15118 bf_set(lpfc_rq_context_rqe_count,
15119 &rq_create->u.request.context,
15120 LPFC_RQ_RING_SIZE_4096);
15121 break;
15122 }
15123 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15124 LPFC_DATA_BUF_SIZE);
4f774513
JS
15125 }
15126 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15127 cq->queue_id);
15128 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15129 drq->page_count);
4f774513
JS
15130 list_for_each_entry(dmabuf, &drq->page_list, list) {
15131 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15132 putPaddrLow(dmabuf->phys);
15133 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15134 putPaddrHigh(dmabuf->phys);
15135 }
962bc51b
JS
15136 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15137 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
15138 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15139 /* The IOCTL status is embedded in the mailbox subheader. */
15140 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15141 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15142 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15143 if (shdr_status || shdr_add_status || rc) {
15144 status = -ENXIO;
15145 goto out;
15146 }
15147 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15148 if (drq->queue_id == 0xFFFF) {
15149 status = -ENXIO;
15150 goto out;
15151 }
15152 drq->type = LPFC_DRQ;
2a622bfb 15153 drq->assoc_qid = cq->queue_id;
4f774513
JS
15154 drq->subtype = subtype;
15155 drq->host_index = 0;
15156 drq->hba_index = 0;
61f3d4bf 15157 drq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
15158
15159 /* link the header and data RQs onto the parent cq child list */
15160 list_add_tail(&hrq->list, &cq->child_list);
15161 list_add_tail(&drq->list, &cq->child_list);
15162
15163out:
8fa38513 15164 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15165 return status;
15166}
15167
2d7dbc4c
JS
15168/**
15169 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15170 * @phba: HBA structure that indicates port to create a queue on.
15171 * @hrqp: The queue structure array to use to create the header receive queues.
15172 * @drqp: The queue structure array to use to create the data receive queues.
15173 * @cqp: The completion queue array to bind these receive queues to.
15174 *
15175 * This function creates a receive buffer queue pair , as detailed in @hrq and
15176 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15177 * to the HBA.
15178 *
15179 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15180 * struct is used to get the entry count that is necessary to determine the
15181 * number of pages to use for this queue. The @cq is used to indicate which
15182 * completion queue to bind received buffers that are posted to these queues to.
15183 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15184 * receive queue pair. This function is asynchronous and will wait for the
15185 * mailbox command to finish before continuing.
15186 *
15187 * On success this function will return a zero. If unable to allocate enough
15188 * memory this function will return -ENOMEM. If the queue create mailbox command
15189 * fails this function will return -ENXIO.
15190 **/
15191int
15192lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15193 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15194 uint32_t subtype)
15195{
15196 struct lpfc_queue *hrq, *drq, *cq;
15197 struct lpfc_mbx_rq_create_v2 *rq_create;
15198 struct lpfc_dmabuf *dmabuf;
15199 LPFC_MBOXQ_t *mbox;
15200 int rc, length, alloclen, status = 0;
15201 int cnt, idx, numrq, page_idx = 0;
15202 uint32_t shdr_status, shdr_add_status;
15203 union lpfc_sli4_cfg_shdr *shdr;
15204 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15205
15206 numrq = phba->cfg_nvmet_mrq;
15207 /* sanity check on array memory */
15208 if (!hrqp || !drqp || !cqp || !numrq)
15209 return -ENODEV;
15210 if (!phba->sli4_hba.pc_sli4_params.supported)
15211 hw_page_size = SLI4_PAGE_SIZE;
15212
15213 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15214 if (!mbox)
15215 return -ENOMEM;
15216
15217 length = sizeof(struct lpfc_mbx_rq_create_v2);
15218 length += ((2 * numrq * hrqp[0]->page_count) *
15219 sizeof(struct dma_address));
15220
15221 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15222 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15223 LPFC_SLI4_MBX_NEMBED);
15224 if (alloclen < length) {
15225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15226 "3099 Allocated DMA memory size (%d) is "
15227 "less than the requested DMA memory size "
15228 "(%d)\n", alloclen, length);
15229 status = -ENOMEM;
15230 goto out;
15231 }
15232
15233
15234
15235 rq_create = mbox->sge_array->addr[0];
15236 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15237
15238 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15239 cnt = 0;
15240
15241 for (idx = 0; idx < numrq; idx++) {
15242 hrq = hrqp[idx];
15243 drq = drqp[idx];
15244 cq = cqp[idx];
15245
2d7dbc4c
JS
15246 /* sanity check on queue memory */
15247 if (!hrq || !drq || !cq) {
15248 status = -ENODEV;
15249 goto out;
15250 }
15251
7aabe84b
JS
15252 if (hrq->entry_count != drq->entry_count) {
15253 status = -EINVAL;
15254 goto out;
15255 }
15256
2d7dbc4c
JS
15257 if (idx == 0) {
15258 bf_set(lpfc_mbx_rq_create_num_pages,
15259 &rq_create->u.request,
15260 hrq->page_count);
15261 bf_set(lpfc_mbx_rq_create_rq_cnt,
15262 &rq_create->u.request, (numrq * 2));
15263 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15264 1);
15265 bf_set(lpfc_rq_context_base_cq,
15266 &rq_create->u.request.context,
15267 cq->queue_id);
15268 bf_set(lpfc_rq_context_data_size,
15269 &rq_create->u.request.context,
15270 LPFC_DATA_BUF_SIZE);
15271 bf_set(lpfc_rq_context_hdr_size,
15272 &rq_create->u.request.context,
15273 LPFC_HDR_BUF_SIZE);
15274 bf_set(lpfc_rq_context_rqe_count_1,
15275 &rq_create->u.request.context,
15276 hrq->entry_count);
15277 bf_set(lpfc_rq_context_rqe_size,
15278 &rq_create->u.request.context,
15279 LPFC_RQE_SIZE_8);
15280 bf_set(lpfc_rq_context_page_size,
15281 &rq_create->u.request.context,
15282 (PAGE_SIZE/SLI4_PAGE_SIZE));
15283 }
15284 rc = 0;
15285 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15286 memset(dmabuf->virt, 0, hw_page_size);
15287 cnt = page_idx + dmabuf->buffer_tag;
15288 rq_create->u.request.page[cnt].addr_lo =
15289 putPaddrLow(dmabuf->phys);
15290 rq_create->u.request.page[cnt].addr_hi =
15291 putPaddrHigh(dmabuf->phys);
15292 rc++;
15293 }
15294 page_idx += rc;
15295
15296 rc = 0;
15297 list_for_each_entry(dmabuf, &drq->page_list, list) {
15298 memset(dmabuf->virt, 0, hw_page_size);
15299 cnt = page_idx + dmabuf->buffer_tag;
15300 rq_create->u.request.page[cnt].addr_lo =
15301 putPaddrLow(dmabuf->phys);
15302 rq_create->u.request.page[cnt].addr_hi =
15303 putPaddrHigh(dmabuf->phys);
15304 rc++;
15305 }
15306 page_idx += rc;
15307
15308 hrq->db_format = LPFC_DB_RING_FORMAT;
15309 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15310 hrq->type = LPFC_HRQ;
15311 hrq->assoc_qid = cq->queue_id;
15312 hrq->subtype = subtype;
15313 hrq->host_index = 0;
15314 hrq->hba_index = 0;
61f3d4bf 15315 hrq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
15316
15317 drq->db_format = LPFC_DB_RING_FORMAT;
15318 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15319 drq->type = LPFC_DRQ;
15320 drq->assoc_qid = cq->queue_id;
15321 drq->subtype = subtype;
15322 drq->host_index = 0;
15323 drq->hba_index = 0;
61f3d4bf 15324 drq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
15325
15326 list_add_tail(&hrq->list, &cq->child_list);
15327 list_add_tail(&drq->list, &cq->child_list);
15328 }
15329
15330 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15331 /* The IOCTL status is embedded in the mailbox subheader. */
15332 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15333 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15334 if (shdr_status || shdr_add_status || rc) {
15335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15336 "3120 RQ_CREATE mailbox failed with "
15337 "status x%x add_status x%x, mbx status x%x\n",
15338 shdr_status, shdr_add_status, rc);
15339 status = -ENXIO;
15340 goto out;
15341 }
15342 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15343 if (rc == 0xFFFF) {
15344 status = -ENXIO;
15345 goto out;
15346 }
15347
15348 /* Initialize all RQs with associated queue id */
15349 for (idx = 0; idx < numrq; idx++) {
15350 hrq = hrqp[idx];
15351 hrq->queue_id = rc + (2 * idx);
15352 drq = drqp[idx];
15353 drq->queue_id = rc + (2 * idx) + 1;
15354 }
15355
15356out:
15357 lpfc_sli4_mbox_cmd_free(phba, mbox);
15358 return status;
15359}
15360
4f774513
JS
15361/**
15362 * lpfc_eq_destroy - Destroy an event Queue on the HBA
15363 * @eq: The queue structure associated with the queue to destroy.
15364 *
15365 * This function destroys a queue, as detailed in @eq by sending an mailbox
15366 * command, specific to the type of queue, to the HBA.
15367 *
15368 * The @eq struct is used to get the queue ID of the queue to destroy.
15369 *
15370 * On success this function will return a zero. If the queue destroy mailbox
d439d286 15371 * command fails this function will return -ENXIO.
4f774513 15372 **/
a2fc4aef 15373int
4f774513
JS
15374lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15375{
15376 LPFC_MBOXQ_t *mbox;
15377 int rc, length, status = 0;
15378 uint32_t shdr_status, shdr_add_status;
15379 union lpfc_sli4_cfg_shdr *shdr;
15380
2e90f4b5 15381 /* sanity check on queue memory */
4f774513
JS
15382 if (!eq)
15383 return -ENODEV;
15384 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15385 if (!mbox)
15386 return -ENOMEM;
15387 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15388 sizeof(struct lpfc_sli4_cfg_mhdr));
15389 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15390 LPFC_MBOX_OPCODE_EQ_DESTROY,
15391 length, LPFC_SLI4_MBX_EMBED);
15392 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15393 eq->queue_id);
15394 mbox->vport = eq->phba->pport;
15395 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15396
15397 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15398 /* The IOCTL status is embedded in the mailbox subheader. */
15399 shdr = (union lpfc_sli4_cfg_shdr *)
15400 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15403 if (shdr_status || shdr_add_status || rc) {
15404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15405 "2505 EQ_DESTROY mailbox failed with "
15406 "status x%x add_status x%x, mbx status x%x\n",
15407 shdr_status, shdr_add_status, rc);
15408 status = -ENXIO;
15409 }
15410
15411 /* Remove eq from any list */
15412 list_del_init(&eq->list);
8fa38513 15413 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
15414 return status;
15415}
15416
15417/**
15418 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15419 * @cq: The queue structure associated with the queue to destroy.
15420 *
15421 * This function destroys a queue, as detailed in @cq by sending an mailbox
15422 * command, specific to the type of queue, to the HBA.
15423 *
15424 * The @cq struct is used to get the queue ID of the queue to destroy.
15425 *
15426 * On success this function will return a zero. If the queue destroy mailbox
d439d286 15427 * command fails this function will return -ENXIO.
4f774513 15428 **/
a2fc4aef 15429int
4f774513
JS
15430lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15431{
15432 LPFC_MBOXQ_t *mbox;
15433 int rc, length, status = 0;
15434 uint32_t shdr_status, shdr_add_status;
15435 union lpfc_sli4_cfg_shdr *shdr;
15436
2e90f4b5 15437 /* sanity check on queue memory */
4f774513
JS
15438 if (!cq)
15439 return -ENODEV;
15440 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15441 if (!mbox)
15442 return -ENOMEM;
15443 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15444 sizeof(struct lpfc_sli4_cfg_mhdr));
15445 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15446 LPFC_MBOX_OPCODE_CQ_DESTROY,
15447 length, LPFC_SLI4_MBX_EMBED);
15448 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15449 cq->queue_id);
15450 mbox->vport = cq->phba->pport;
15451 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15452 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15453 /* The IOCTL status is embedded in the mailbox subheader. */
15454 shdr = (union lpfc_sli4_cfg_shdr *)
15455 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15458 if (shdr_status || shdr_add_status || rc) {
15459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15460 "2506 CQ_DESTROY mailbox failed with "
15461 "status x%x add_status x%x, mbx status x%x\n",
15462 shdr_status, shdr_add_status, rc);
15463 status = -ENXIO;
15464 }
15465 /* Remove cq from any list */
15466 list_del_init(&cq->list);
8fa38513 15467 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
15468 return status;
15469}
15470
04c68496
JS
15471/**
15472 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15473 * @qm: The queue structure associated with the queue to destroy.
15474 *
15475 * This function destroys a queue, as detailed in @mq by sending an mailbox
15476 * command, specific to the type of queue, to the HBA.
15477 *
15478 * The @mq struct is used to get the queue ID of the queue to destroy.
15479 *
15480 * On success this function will return a zero. If the queue destroy mailbox
d439d286 15481 * command fails this function will return -ENXIO.
04c68496 15482 **/
a2fc4aef 15483int
04c68496
JS
15484lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15485{
15486 LPFC_MBOXQ_t *mbox;
15487 int rc, length, status = 0;
15488 uint32_t shdr_status, shdr_add_status;
15489 union lpfc_sli4_cfg_shdr *shdr;
15490
2e90f4b5 15491 /* sanity check on queue memory */
04c68496
JS
15492 if (!mq)
15493 return -ENODEV;
15494 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15495 if (!mbox)
15496 return -ENOMEM;
15497 length = (sizeof(struct lpfc_mbx_mq_destroy) -
15498 sizeof(struct lpfc_sli4_cfg_mhdr));
15499 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15500 LPFC_MBOX_OPCODE_MQ_DESTROY,
15501 length, LPFC_SLI4_MBX_EMBED);
15502 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15503 mq->queue_id);
15504 mbox->vport = mq->phba->pport;
15505 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15506 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15507 /* The IOCTL status is embedded in the mailbox subheader. */
15508 shdr = (union lpfc_sli4_cfg_shdr *)
15509 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15510 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15511 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15512 if (shdr_status || shdr_add_status || rc) {
15513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15514 "2507 MQ_DESTROY mailbox failed with "
15515 "status x%x add_status x%x, mbx status x%x\n",
15516 shdr_status, shdr_add_status, rc);
15517 status = -ENXIO;
15518 }
15519 /* Remove mq from any list */
15520 list_del_init(&mq->list);
8fa38513 15521 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
15522 return status;
15523}
15524
4f774513
JS
15525/**
15526 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15527 * @wq: The queue structure associated with the queue to destroy.
15528 *
15529 * This function destroys a queue, as detailed in @wq by sending an mailbox
15530 * command, specific to the type of queue, to the HBA.
15531 *
15532 * The @wq struct is used to get the queue ID of the queue to destroy.
15533 *
15534 * On success this function will return a zero. If the queue destroy mailbox
d439d286 15535 * command fails this function will return -ENXIO.
4f774513 15536 **/
a2fc4aef 15537int
4f774513
JS
15538lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
15539{
15540 LPFC_MBOXQ_t *mbox;
15541 int rc, length, status = 0;
15542 uint32_t shdr_status, shdr_add_status;
15543 union lpfc_sli4_cfg_shdr *shdr;
15544
2e90f4b5 15545 /* sanity check on queue memory */
4f774513
JS
15546 if (!wq)
15547 return -ENODEV;
15548 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
15549 if (!mbox)
15550 return -ENOMEM;
15551 length = (sizeof(struct lpfc_mbx_wq_destroy) -
15552 sizeof(struct lpfc_sli4_cfg_mhdr));
15553 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15554 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
15555 length, LPFC_SLI4_MBX_EMBED);
15556 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
15557 wq->queue_id);
15558 mbox->vport = wq->phba->pport;
15559 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15560 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
15561 shdr = (union lpfc_sli4_cfg_shdr *)
15562 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
15563 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15564 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15565 if (shdr_status || shdr_add_status || rc) {
15566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15567 "2508 WQ_DESTROY mailbox failed with "
15568 "status x%x add_status x%x, mbx status x%x\n",
15569 shdr_status, shdr_add_status, rc);
15570 status = -ENXIO;
15571 }
15572 /* Remove wq from any list */
15573 list_del_init(&wq->list);
d1f525aa
JS
15574 kfree(wq->pring);
15575 wq->pring = NULL;
8fa38513 15576 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
15577 return status;
15578}
15579
15580/**
15581 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
15582 * @rq: The queue structure associated with the queue to destroy.
15583 *
15584 * This function destroys a queue, as detailed in @rq by sending an mailbox
15585 * command, specific to the type of queue, to the HBA.
15586 *
15587 * The @rq struct is used to get the queue ID of the queue to destroy.
15588 *
15589 * On success this function will return a zero. If the queue destroy mailbox
d439d286 15590 * command fails this function will return -ENXIO.
4f774513 15591 **/
a2fc4aef 15592int
4f774513
JS
15593lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15594 struct lpfc_queue *drq)
15595{
15596 LPFC_MBOXQ_t *mbox;
15597 int rc, length, status = 0;
15598 uint32_t shdr_status, shdr_add_status;
15599 union lpfc_sli4_cfg_shdr *shdr;
15600
2e90f4b5 15601 /* sanity check on queue memory */
4f774513
JS
15602 if (!hrq || !drq)
15603 return -ENODEV;
15604 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
15605 if (!mbox)
15606 return -ENOMEM;
15607 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 15608 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
15609 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15610 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
15611 length, LPFC_SLI4_MBX_EMBED);
15612 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15613 hrq->queue_id);
15614 mbox->vport = hrq->phba->pport;
15615 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15616 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
15617 /* The IOCTL status is embedded in the mailbox subheader. */
15618 shdr = (union lpfc_sli4_cfg_shdr *)
15619 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15620 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15621 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15622 if (shdr_status || shdr_add_status || rc) {
15623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15624 "2509 RQ_DESTROY mailbox failed with "
15625 "status x%x add_status x%x, mbx status x%x\n",
15626 shdr_status, shdr_add_status, rc);
15627 if (rc != MBX_TIMEOUT)
15628 mempool_free(mbox, hrq->phba->mbox_mem_pool);
15629 return -ENXIO;
15630 }
15631 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15632 drq->queue_id);
15633 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
15634 shdr = (union lpfc_sli4_cfg_shdr *)
15635 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15636 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15637 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15638 if (shdr_status || shdr_add_status || rc) {
15639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15640 "2510 RQ_DESTROY mailbox failed with "
15641 "status x%x add_status x%x, mbx status x%x\n",
15642 shdr_status, shdr_add_status, rc);
15643 status = -ENXIO;
15644 }
15645 list_del_init(&hrq->list);
15646 list_del_init(&drq->list);
8fa38513 15647 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
15648 return status;
15649}
15650
15651/**
15652 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
15653 * @phba: The virtual port for which this call being executed.
15654 * @pdma_phys_addr0: Physical address of the 1st SGL page.
15655 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
15656 * @xritag: the xritag that ties this io to the SGL pages.
15657 *
15658 * This routine will post the sgl pages for the IO that has the xritag
15659 * that is in the iocbq structure. The xritag is assigned during iocbq
15660 * creation and persists for as long as the driver is loaded.
15661 * if the caller has fewer than 256 scatter gather segments to map then
15662 * pdma_phys_addr1 should be 0.
15663 * If the caller needs to map more than 256 scatter gather segment then
15664 * pdma_phys_addr1 should be a valid physical address.
15665 * physical address for SGLs must be 64 byte aligned.
15666 * If you are going to map 2 SGL's then the first one must have 256 entries
15667 * the second sgl can have between 1 and 256 entries.
15668 *
15669 * Return codes:
15670 * 0 - Success
15671 * -ENXIO, -ENOMEM - Failure
15672 **/
15673int
15674lpfc_sli4_post_sgl(struct lpfc_hba *phba,
15675 dma_addr_t pdma_phys_addr0,
15676 dma_addr_t pdma_phys_addr1,
15677 uint16_t xritag)
15678{
15679 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
15680 LPFC_MBOXQ_t *mbox;
15681 int rc;
15682 uint32_t shdr_status, shdr_add_status;
6d368e53 15683 uint32_t mbox_tmo;
4f774513
JS
15684 union lpfc_sli4_cfg_shdr *shdr;
15685
15686 if (xritag == NO_XRI) {
15687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15688 "0364 Invalid param:\n");
15689 return -EINVAL;
15690 }
15691
15692 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15693 if (!mbox)
15694 return -ENOMEM;
15695
15696 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15697 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
15698 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 15699 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
15700
15701 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
15702 &mbox->u.mqe.un.post_sgl_pages;
15703 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
15704 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
15705
15706 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
15707 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
15708 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
15709 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
15710
15711 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
15712 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
15713 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
15714 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
15715 if (!phba->sli4_hba.intr_enable)
15716 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 15717 else {
a183a15f 15718 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
15719 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15720 }
4f774513
JS
15721 /* The IOCTL status is embedded in the mailbox subheader. */
15722 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
15723 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15724 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15725 if (rc != MBX_TIMEOUT)
15726 mempool_free(mbox, phba->mbox_mem_pool);
15727 if (shdr_status || shdr_add_status || rc) {
15728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15729 "2511 POST_SGL mailbox failed with "
15730 "status x%x add_status x%x, mbx status x%x\n",
15731 shdr_status, shdr_add_status, rc);
4f774513
JS
15732 }
15733 return 0;
15734}
4f774513 15735
6d368e53 15736/**
88a2cfbb 15737 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
15738 * @phba: pointer to lpfc hba data structure.
15739 *
15740 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
15741 * HBA consistent with the SLI-4 interface spec. This routine
15742 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15743 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 15744 *
88a2cfbb
JS
15745 * Returns
15746 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15747 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15748 **/
5d8b8167 15749static uint16_t
6d368e53
JS
15750lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15751{
15752 unsigned long xri;
15753
15754 /*
15755 * Fetch the next logical xri. Because this index is logical,
15756 * the driver starts at 0 each time.
15757 */
15758 spin_lock_irq(&phba->hbalock);
15759 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15760 phba->sli4_hba.max_cfg_param.max_xri, 0);
15761 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15762 spin_unlock_irq(&phba->hbalock);
15763 return NO_XRI;
15764 } else {
15765 set_bit(xri, phba->sli4_hba.xri_bmask);
15766 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 15767 }
6d368e53
JS
15768 spin_unlock_irq(&phba->hbalock);
15769 return xri;
15770}
15771
15772/**
15773 * lpfc_sli4_free_xri - Release an xri for reuse.
15774 * @phba: pointer to lpfc hba data structure.
15775 *
15776 * This routine is invoked to release an xri to the pool of
15777 * available rpis maintained by the driver.
15778 **/
5d8b8167 15779static void
6d368e53
JS
15780__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15781{
15782 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
15783 phba->sli4_hba.max_cfg_param.xri_used--;
15784 }
15785}
15786
15787/**
15788 * lpfc_sli4_free_xri - Release an xri for reuse.
15789 * @phba: pointer to lpfc hba data structure.
15790 *
15791 * This routine is invoked to release an xri to the pool of
15792 * available rpis maintained by the driver.
15793 **/
15794void
15795lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15796{
15797 spin_lock_irq(&phba->hbalock);
15798 __lpfc_sli4_free_xri(phba, xri);
15799 spin_unlock_irq(&phba->hbalock);
15800}
15801
4f774513
JS
15802/**
15803 * lpfc_sli4_next_xritag - Get an xritag for the io
15804 * @phba: Pointer to HBA context object.
15805 *
15806 * This function gets an xritag for the iocb. If there is no unused xritag
15807 * it will return 0xffff.
15808 * The function returns the allocated xritag if successful, else returns zero.
15809 * Zero is not a valid xritag.
15810 * The caller is not required to hold any lock.
15811 **/
15812uint16_t
15813lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15814{
6d368e53 15815 uint16_t xri_index;
4f774513 15816
6d368e53 15817 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
15818 if (xri_index == NO_XRI)
15819 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15820 "2004 Failed to allocate XRI.last XRITAG is %d"
15821 " Max XRI is %d, Used XRI is %d\n",
15822 xri_index,
15823 phba->sli4_hba.max_cfg_param.max_xri,
15824 phba->sli4_hba.max_cfg_param.xri_used);
15825 return xri_index;
4f774513
JS
15826}
15827
15828/**
895427bd 15829 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 15830 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
15831 * @post_sgl_list: pointer to els sgl entry list.
15832 * @count: number of els sgl entries on the list.
4f774513
JS
15833 *
15834 * This routine is invoked to post a block of driver's sgl pages to the
15835 * HBA using non-embedded mailbox command. No Lock is held. This routine
15836 * is only called when the driver is loading and after all IO has been
15837 * stopped.
15838 **/
8a9d2e80 15839static int
895427bd 15840lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
15841 struct list_head *post_sgl_list,
15842 int post_cnt)
4f774513 15843{
8a9d2e80 15844 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
15845 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15846 struct sgl_page_pairs *sgl_pg_pairs;
15847 void *viraddr;
15848 LPFC_MBOXQ_t *mbox;
15849 uint32_t reqlen, alloclen, pg_pairs;
15850 uint32_t mbox_tmo;
8a9d2e80
JS
15851 uint16_t xritag_start = 0;
15852 int rc = 0;
4f774513
JS
15853 uint32_t shdr_status, shdr_add_status;
15854 union lpfc_sli4_cfg_shdr *shdr;
15855
895427bd 15856 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 15857 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 15858 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 15859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
15860 "2559 Block sgl registration required DMA "
15861 "size (%d) great than a page\n", reqlen);
15862 return -ENOMEM;
15863 }
895427bd 15864
4f774513 15865 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 15866 if (!mbox)
4f774513 15867 return -ENOMEM;
4f774513
JS
15868
15869 /* Allocate DMA memory and set up the non-embedded mailbox command */
15870 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15871 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15872 LPFC_SLI4_MBX_NEMBED);
15873
15874 if (alloclen < reqlen) {
15875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15876 "0285 Allocated DMA memory size (%d) is "
15877 "less than the requested DMA memory "
15878 "size (%d)\n", alloclen, reqlen);
15879 lpfc_sli4_mbox_cmd_free(phba, mbox);
15880 return -ENOMEM;
15881 }
4f774513 15882 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 15883 viraddr = mbox->sge_array->addr[0];
4f774513
JS
15884 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15885 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15886
8a9d2e80
JS
15887 pg_pairs = 0;
15888 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
15889 /* Set up the sge entry */
15890 sgl_pg_pairs->sgl_pg0_addr_lo =
15891 cpu_to_le32(putPaddrLow(sglq_entry->phys));
15892 sgl_pg_pairs->sgl_pg0_addr_hi =
15893 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
15894 sgl_pg_pairs->sgl_pg1_addr_lo =
15895 cpu_to_le32(putPaddrLow(0));
15896 sgl_pg_pairs->sgl_pg1_addr_hi =
15897 cpu_to_le32(putPaddrHigh(0));
6d368e53 15898
4f774513
JS
15899 /* Keep the first xritag on the list */
15900 if (pg_pairs == 0)
15901 xritag_start = sglq_entry->sli4_xritag;
15902 sgl_pg_pairs++;
8a9d2e80 15903 pg_pairs++;
4f774513 15904 }
6d368e53
JS
15905
15906 /* Complete initialization and perform endian conversion. */
4f774513 15907 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 15908 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 15909 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 15910
4f774513
JS
15911 if (!phba->sli4_hba.intr_enable)
15912 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15913 else {
a183a15f 15914 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
15915 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15916 }
15917 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15918 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15919 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15920 if (rc != MBX_TIMEOUT)
15921 lpfc_sli4_mbox_cmd_free(phba, mbox);
15922 if (shdr_status || shdr_add_status || rc) {
15923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15924 "2513 POST_SGL_BLOCK mailbox command failed "
15925 "status x%x add_status x%x mbx status x%x\n",
15926 shdr_status, shdr_add_status, rc);
15927 rc = -ENXIO;
15928 }
15929 return rc;
15930}
15931
15932/**
15933 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
15934 * @phba: pointer to lpfc hba data structure.
15935 * @sblist: pointer to scsi buffer list.
15936 * @count: number of scsi buffers on the list.
15937 *
15938 * This routine is invoked to post a block of @count scsi sgl pages from a
15939 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
15940 * No Lock is held.
15941 *
15942 **/
15943int
8a9d2e80
JS
15944lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
15945 struct list_head *sblist,
15946 int count)
4f774513
JS
15947{
15948 struct lpfc_scsi_buf *psb;
15949 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15950 struct sgl_page_pairs *sgl_pg_pairs;
15951 void *viraddr;
15952 LPFC_MBOXQ_t *mbox;
15953 uint32_t reqlen, alloclen, pg_pairs;
15954 uint32_t mbox_tmo;
15955 uint16_t xritag_start = 0;
15956 int rc = 0;
15957 uint32_t shdr_status, shdr_add_status;
15958 dma_addr_t pdma_phys_bpl1;
15959 union lpfc_sli4_cfg_shdr *shdr;
15960
15961 /* Calculate the requested length of the dma memory */
8a9d2e80 15962 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 15963 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 15964 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
15965 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
15966 "0217 Block sgl registration required DMA "
15967 "size (%d) great than a page\n", reqlen);
15968 return -ENOMEM;
15969 }
15970 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15971 if (!mbox) {
15972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15973 "0283 Failed to allocate mbox cmd memory\n");
15974 return -ENOMEM;
15975 }
15976
15977 /* Allocate DMA memory and set up the non-embedded mailbox command */
15978 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15979 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15980 LPFC_SLI4_MBX_NEMBED);
15981
15982 if (alloclen < reqlen) {
15983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15984 "2561 Allocated DMA memory size (%d) is "
15985 "less than the requested DMA memory "
15986 "size (%d)\n", alloclen, reqlen);
15987 lpfc_sli4_mbox_cmd_free(phba, mbox);
15988 return -ENOMEM;
15989 }
6d368e53 15990
4f774513 15991 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
15992 viraddr = mbox->sge_array->addr[0];
15993
15994 /* Set up the SGL pages in the non-embedded DMA pages */
15995 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15996 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15997
15998 pg_pairs = 0;
15999 list_for_each_entry(psb, sblist, list) {
16000 /* Set up the sge entry */
16001 sgl_pg_pairs->sgl_pg0_addr_lo =
16002 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16003 sgl_pg_pairs->sgl_pg0_addr_hi =
16004 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16005 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16006 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16007 else
16008 pdma_phys_bpl1 = 0;
16009 sgl_pg_pairs->sgl_pg1_addr_lo =
16010 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16011 sgl_pg_pairs->sgl_pg1_addr_hi =
16012 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16013 /* Keep the first xritag on the list */
16014 if (pg_pairs == 0)
16015 xritag_start = psb->cur_iocbq.sli4_xritag;
16016 sgl_pg_pairs++;
16017 pg_pairs++;
16018 }
16019 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16020 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16021 /* Perform endian conversion if necessary */
16022 sgl->word0 = cpu_to_le32(sgl->word0);
16023
16024 if (!phba->sli4_hba.intr_enable)
16025 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16026 else {
a183a15f 16027 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16028 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16029 }
16030 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16033 if (rc != MBX_TIMEOUT)
16034 lpfc_sli4_mbox_cmd_free(phba, mbox);
16035 if (shdr_status || shdr_add_status || rc) {
16036 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16037 "2564 POST_SGL_BLOCK mailbox command failed "
16038 "status x%x add_status x%x mbx status x%x\n",
16039 shdr_status, shdr_add_status, rc);
16040 rc = -ENXIO;
16041 }
16042 return rc;
16043}
16044
2ea259ee
JS
16045static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
16046static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
16047
4f774513
JS
16048/**
16049 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16050 * @phba: pointer to lpfc_hba struct that the frame was received on
16051 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16052 *
16053 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16054 * valid type of frame that the LPFC driver will handle. This function will
16055 * return a zero if the frame is a valid frame or a non zero value when the
16056 * frame does not pass the check.
16057 **/
16058static int
16059lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16060{
474ffb74 16061 /* make rctl_names static to save stack space */
4f774513 16062 struct fc_vft_header *fc_vft_hdr;
546fc854 16063 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
16064
16065 switch (fc_hdr->fh_r_ctl) {
16066 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16067 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16068 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16069 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16070 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16071 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16072 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16073 case FC_RCTL_DD_CMD_STATUS: /* command status */
16074 case FC_RCTL_ELS_REQ: /* extended link services request */
16075 case FC_RCTL_ELS_REP: /* extended link services reply */
16076 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16077 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16078 case FC_RCTL_BA_NOP: /* basic link service NOP */
16079 case FC_RCTL_BA_ABTS: /* basic link service abort */
16080 case FC_RCTL_BA_RMC: /* remove connection */
16081 case FC_RCTL_BA_ACC: /* basic accept */
16082 case FC_RCTL_BA_RJT: /* basic reject */
16083 case FC_RCTL_BA_PRMT:
16084 case FC_RCTL_ACK_1: /* acknowledge_1 */
16085 case FC_RCTL_ACK_0: /* acknowledge_0 */
16086 case FC_RCTL_P_RJT: /* port reject */
16087 case FC_RCTL_F_RJT: /* fabric reject */
16088 case FC_RCTL_P_BSY: /* port busy */
16089 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16090 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16091 case FC_RCTL_LCR: /* link credit reset */
16092 case FC_RCTL_END: /* end */
16093 break;
16094 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16095 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16096 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16097 return lpfc_fc_frame_check(phba, fc_hdr);
16098 default:
16099 goto drop;
16100 }
16101 switch (fc_hdr->fh_type) {
16102 case FC_TYPE_BLS:
16103 case FC_TYPE_ELS:
16104 case FC_TYPE_FCP:
16105 case FC_TYPE_CT:
895427bd 16106 case FC_TYPE_NVME:
4f774513
JS
16107 break;
16108 case FC_TYPE_IP:
16109 case FC_TYPE_ILS:
16110 default:
16111 goto drop;
16112 }
546fc854 16113
4f774513 16114 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
88f43a08
JS
16115 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
16116 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
2ea259ee
JS
16117 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
16118 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
546fc854
JS
16119 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16120 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
88f43a08
JS
16121 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16122 be32_to_cpu(header[6]));
4f774513
JS
16123 return 0;
16124drop:
16125 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16126 "2539 Dropped frame rctl:%s type:%s\n",
2ea259ee
JS
16127 lpfc_rctl_names[fc_hdr->fh_r_ctl],
16128 lpfc_type_names[fc_hdr->fh_type]);
4f774513
JS
16129 return 1;
16130}
16131
16132/**
16133 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16134 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16135 *
16136 * This function processes the FC header to retrieve the VFI from the VF
16137 * header, if one exists. This function will return the VFI if one exists
16138 * or 0 if no VSAN Header exists.
16139 **/
16140static uint32_t
16141lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16142{
16143 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16144
16145 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16146 return 0;
16147 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16148}
16149
16150/**
16151 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16152 * @phba: Pointer to the HBA structure to search for the vport on
16153 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16154 * @fcfi: The FC Fabric ID that the frame came from
16155 *
16156 * This function searches the @phba for a vport that matches the content of the
16157 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16158 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16159 * returns the matching vport pointer or NULL if unable to match frame to a
16160 * vport.
16161 **/
16162static struct lpfc_vport *
16163lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 16164 uint16_t fcfi, uint32_t did)
4f774513
JS
16165{
16166 struct lpfc_vport **vports;
16167 struct lpfc_vport *vport = NULL;
16168 int i;
939723a4 16169
bf08611b
JS
16170 if (did == Fabric_DID)
16171 return phba->pport;
939723a4
JS
16172 if ((phba->pport->fc_flag & FC_PT2PT) &&
16173 !(phba->link_state == LPFC_HBA_READY))
16174 return phba->pport;
16175
4f774513 16176 vports = lpfc_create_vport_work_array(phba);
895427bd 16177 if (vports != NULL) {
4f774513
JS
16178 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16179 if (phba->fcf.fcfi == fcfi &&
16180 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16181 vports[i]->fc_myDID == did) {
16182 vport = vports[i];
16183 break;
16184 }
16185 }
895427bd 16186 }
4f774513
JS
16187 lpfc_destroy_vport_work_array(phba, vports);
16188 return vport;
16189}
16190
45ed1190
JS
16191/**
16192 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16193 * @vport: The vport to work on.
16194 *
16195 * This function updates the receive sequence time stamp for this vport. The
16196 * receive sequence time stamp indicates the time that the last frame of the
16197 * the sequence that has been idle for the longest amount of time was received.
16198 * the driver uses this time stamp to indicate if any received sequences have
16199 * timed out.
16200 **/
5d8b8167 16201static void
45ed1190
JS
16202lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16203{
16204 struct lpfc_dmabuf *h_buf;
16205 struct hbq_dmabuf *dmabuf = NULL;
16206
16207 /* get the oldest sequence on the rcv list */
16208 h_buf = list_get_first(&vport->rcv_buffer_list,
16209 struct lpfc_dmabuf, list);
16210 if (!h_buf)
16211 return;
16212 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16213 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16214}
16215
16216/**
16217 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16218 * @vport: The vport that the received sequences were sent to.
16219 *
16220 * This function cleans up all outstanding received sequences. This is called
16221 * by the driver when a link event or user action invalidates all the received
16222 * sequences.
16223 **/
16224void
16225lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16226{
16227 struct lpfc_dmabuf *h_buf, *hnext;
16228 struct lpfc_dmabuf *d_buf, *dnext;
16229 struct hbq_dmabuf *dmabuf = NULL;
16230
16231 /* start with the oldest sequence on the rcv list */
16232 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16233 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16234 list_del_init(&dmabuf->hbuf.list);
16235 list_for_each_entry_safe(d_buf, dnext,
16236 &dmabuf->dbuf.list, list) {
16237 list_del_init(&d_buf->list);
16238 lpfc_in_buf_free(vport->phba, d_buf);
16239 }
16240 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16241 }
16242}
16243
16244/**
16245 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16246 * @vport: The vport that the received sequences were sent to.
16247 *
16248 * This function determines whether any received sequences have timed out by
16249 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16250 * indicates that there is at least one timed out sequence this routine will
16251 * go through the received sequences one at a time from most inactive to most
16252 * active to determine which ones need to be cleaned up. Once it has determined
16253 * that a sequence needs to be cleaned up it will simply free up the resources
16254 * without sending an abort.
16255 **/
16256void
16257lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16258{
16259 struct lpfc_dmabuf *h_buf, *hnext;
16260 struct lpfc_dmabuf *d_buf, *dnext;
16261 struct hbq_dmabuf *dmabuf = NULL;
16262 unsigned long timeout;
16263 int abort_count = 0;
16264
16265 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16266 vport->rcv_buffer_time_stamp);
16267 if (list_empty(&vport->rcv_buffer_list) ||
16268 time_before(jiffies, timeout))
16269 return;
16270 /* start with the oldest sequence on the rcv list */
16271 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16272 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16273 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16274 dmabuf->time_stamp);
16275 if (time_before(jiffies, timeout))
16276 break;
16277 abort_count++;
16278 list_del_init(&dmabuf->hbuf.list);
16279 list_for_each_entry_safe(d_buf, dnext,
16280 &dmabuf->dbuf.list, list) {
16281 list_del_init(&d_buf->list);
16282 lpfc_in_buf_free(vport->phba, d_buf);
16283 }
16284 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16285 }
16286 if (abort_count)
16287 lpfc_update_rcv_time_stamp(vport);
16288}
16289
4f774513
JS
16290/**
16291 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16292 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16293 *
16294 * This function searches through the existing incomplete sequences that have
16295 * been sent to this @vport. If the frame matches one of the incomplete
16296 * sequences then the dbuf in the @dmabuf is added to the list of frames that
16297 * make up that sequence. If no sequence is found that matches this frame then
16298 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16299 * This function returns a pointer to the first dmabuf in the sequence list that
16300 * the frame was linked to.
16301 **/
16302static struct hbq_dmabuf *
16303lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16304{
16305 struct fc_frame_header *new_hdr;
16306 struct fc_frame_header *temp_hdr;
16307 struct lpfc_dmabuf *d_buf;
16308 struct lpfc_dmabuf *h_buf;
16309 struct hbq_dmabuf *seq_dmabuf = NULL;
16310 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 16311 uint8_t found = 0;
4f774513 16312
4d9ab994 16313 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 16314 dmabuf->time_stamp = jiffies;
4f774513 16315 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 16316
4f774513
JS
16317 /* Use the hdr_buf to find the sequence that this frame belongs to */
16318 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16319 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16320 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16321 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16322 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16323 continue;
16324 /* found a pending sequence that matches this frame */
16325 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16326 break;
16327 }
16328 if (!seq_dmabuf) {
16329 /*
16330 * This indicates first frame received for this sequence.
16331 * Queue the buffer on the vport's rcv_buffer_list.
16332 */
16333 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 16334 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
16335 return dmabuf;
16336 }
16337 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
16338 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16339 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
16340 list_del_init(&seq_dmabuf->hbuf.list);
16341 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16342 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 16343 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
16344 return dmabuf;
16345 }
45ed1190
JS
16346 /* move this sequence to the tail to indicate a young sequence */
16347 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16348 seq_dmabuf->time_stamp = jiffies;
16349 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
16350 if (list_empty(&seq_dmabuf->dbuf.list)) {
16351 temp_hdr = dmabuf->hbuf.virt;
16352 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16353 return seq_dmabuf;
16354 }
4f774513 16355 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
16356 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16357 while (!found) {
4f774513
JS
16358 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16359 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16360 /*
16361 * If the frame's sequence count is greater than the frame on
16362 * the list then insert the frame right after this frame
16363 */
eeead811
JS
16364 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16365 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 16366 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
16367 found = 1;
16368 break;
4f774513 16369 }
4360ca9c
JS
16370
16371 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16372 break;
16373 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 16374 }
4360ca9c
JS
16375
16376 if (found)
16377 return seq_dmabuf;
4f774513
JS
16378 return NULL;
16379}
16380
6669f9bb
JS
16381/**
16382 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16383 * @vport: pointer to a vitural port
16384 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16385 *
16386 * This function tries to abort from the partially assembed sequence, described
16387 * by the information from basic abbort @dmabuf. It checks to see whether such
16388 * partially assembled sequence held by the driver. If so, it shall free up all
16389 * the frames from the partially assembled sequence.
16390 *
16391 * Return
16392 * true -- if there is matching partially assembled sequence present and all
16393 * the frames freed with the sequence;
16394 * false -- if there is no matching partially assembled sequence present so
16395 * nothing got aborted in the lower layer driver
16396 **/
16397static bool
16398lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16399 struct hbq_dmabuf *dmabuf)
16400{
16401 struct fc_frame_header *new_hdr;
16402 struct fc_frame_header *temp_hdr;
16403 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16404 struct hbq_dmabuf *seq_dmabuf = NULL;
16405
16406 /* Use the hdr_buf to find the sequence that matches this frame */
16407 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16408 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16409 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16410 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16411 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16412 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16413 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16414 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16415 continue;
16416 /* found a pending sequence that matches this frame */
16417 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16418 break;
16419 }
16420
16421 /* Free up all the frames from the partially assembled sequence */
16422 if (seq_dmabuf) {
16423 list_for_each_entry_safe(d_buf, n_buf,
16424 &seq_dmabuf->dbuf.list, list) {
16425 list_del_init(&d_buf->list);
16426 lpfc_in_buf_free(vport->phba, d_buf);
16427 }
16428 return true;
16429 }
16430 return false;
16431}
16432
6dd9e31c
JS
16433/**
16434 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16435 * @vport: pointer to a vitural port
16436 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16437 *
16438 * This function tries to abort from the assembed sequence from upper level
16439 * protocol, described by the information from basic abbort @dmabuf. It
16440 * checks to see whether such pending context exists at upper level protocol.
16441 * If so, it shall clean up the pending context.
16442 *
16443 * Return
16444 * true -- if there is matching pending context of the sequence cleaned
16445 * at ulp;
16446 * false -- if there is no matching pending context of the sequence present
16447 * at ulp.
16448 **/
16449static bool
16450lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16451{
16452 struct lpfc_hba *phba = vport->phba;
16453 int handled;
16454
16455 /* Accepting abort at ulp with SLI4 only */
16456 if (phba->sli_rev < LPFC_SLI_REV4)
16457 return false;
16458
16459 /* Register all caring upper level protocols to attend abort */
16460 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16461 if (handled)
16462 return true;
16463
16464 return false;
16465}
16466
6669f9bb 16467/**
546fc854 16468 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
16469 * @phba: Pointer to HBA context object.
16470 * @cmd_iocbq: pointer to the command iocbq structure.
16471 * @rsp_iocbq: pointer to the response iocbq structure.
16472 *
546fc854 16473 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
16474 * event. It properly releases the memory allocated to the sequence abort
16475 * accept iocb.
16476 **/
16477static void
546fc854 16478lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
16479 struct lpfc_iocbq *cmd_iocbq,
16480 struct lpfc_iocbq *rsp_iocbq)
16481{
6dd9e31c
JS
16482 struct lpfc_nodelist *ndlp;
16483
16484 if (cmd_iocbq) {
16485 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16486 lpfc_nlp_put(ndlp);
16487 lpfc_nlp_not_used(ndlp);
6669f9bb 16488 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 16489 }
6b5151fd
JS
16490
16491 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
16492 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16493 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16494 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
16495 rsp_iocbq->iocb.ulpStatus,
16496 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
16497}
16498
6d368e53
JS
16499/**
16500 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16501 * @phba: Pointer to HBA context object.
16502 * @xri: xri id in transaction.
16503 *
16504 * This function validates the xri maps to the known range of XRIs allocated an
16505 * used by the driver.
16506 **/
7851fe2c 16507uint16_t
6d368e53
JS
16508lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16509 uint16_t xri)
16510{
a2fc4aef 16511 uint16_t i;
6d368e53
JS
16512
16513 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16514 if (xri == phba->sli4_hba.xri_ids[i])
16515 return i;
16516 }
16517 return NO_XRI;
16518}
16519
6669f9bb 16520/**
546fc854 16521 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
16522 * @phba: Pointer to HBA context object.
16523 * @fc_hdr: pointer to a FC frame header.
16524 *
546fc854 16525 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
16526 * event after aborting the sequence handling.
16527 **/
86c67379 16528void
6dd9e31c
JS
16529lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16530 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 16531{
6dd9e31c 16532 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
16533 struct lpfc_iocbq *ctiocb = NULL;
16534 struct lpfc_nodelist *ndlp;
ee0f4fe1 16535 uint16_t oxid, rxid, xri, lxri;
5ffc266e 16536 uint32_t sid, fctl;
6669f9bb 16537 IOCB_t *icmd;
546fc854 16538 int rc;
6669f9bb
JS
16539
16540 if (!lpfc_is_link_up(phba))
16541 return;
16542
16543 sid = sli4_sid_from_fc_hdr(fc_hdr);
16544 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 16545 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 16546
6dd9e31c 16547 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 16548 if (!ndlp) {
9d3d340d 16549 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
16550 if (!ndlp) {
16551 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16552 "1268 Failed to allocate ndlp for "
16553 "oxid:x%x SID:x%x\n", oxid, sid);
16554 return;
16555 }
6dd9e31c
JS
16556 /* Put ndlp onto pport node list */
16557 lpfc_enqueue_node(vport, ndlp);
16558 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
16559 /* re-setup ndlp without removing from node list */
16560 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
16561 if (!ndlp) {
16562 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16563 "3275 Failed to active ndlp found "
16564 "for oxid:x%x SID:x%x\n", oxid, sid);
16565 return;
16566 }
6669f9bb
JS
16567 }
16568
546fc854 16569 /* Allocate buffer for rsp iocb */
6669f9bb
JS
16570 ctiocb = lpfc_sli_get_iocbq(phba);
16571 if (!ctiocb)
16572 return;
16573
5ffc266e
JS
16574 /* Extract the F_CTL field from FC_HDR */
16575 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
16576
6669f9bb 16577 icmd = &ctiocb->iocb;
6669f9bb 16578 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 16579 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
16580 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
16581 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
16582 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
16583
16584 /* Fill in the rest of iocb fields */
16585 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
16586 icmd->ulpBdeCount = 0;
16587 icmd->ulpLe = 1;
16588 icmd->ulpClass = CLASS3;
6d368e53 16589 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 16590 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 16591
6669f9bb
JS
16592 ctiocb->iocb_cmpl = NULL;
16593 ctiocb->vport = phba->pport;
546fc854 16594 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 16595 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
16596 ctiocb->sli4_xritag = NO_XRI;
16597
ee0f4fe1
JS
16598 if (fctl & FC_FC_EX_CTX)
16599 /* Exchange responder sent the abort so we
16600 * own the oxid.
16601 */
16602 xri = oxid;
16603 else
16604 xri = rxid;
16605 lxri = lpfc_sli4_xri_inrange(phba, xri);
16606 if (lxri != NO_XRI)
16607 lpfc_set_rrq_active(phba, ndlp, lxri,
16608 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
16609 /* For BA_ABTS from exchange responder, if the logical xri with
16610 * the oxid maps to the FCP XRI range, the port no longer has
16611 * that exchange context, send a BLS_RJT. Override the IOCB for
16612 * a BA_RJT.
16613 */
16614 if ((fctl & FC_FC_EX_CTX) &&
895427bd 16615 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
16616 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16617 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16618 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16619 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16620 }
16621
16622 /* If BA_ABTS failed to abort a partially assembled receive sequence,
16623 * the driver no longer has that exchange, send a BLS_RJT. Override
16624 * the IOCB for a BA_RJT.
546fc854 16625 */
6dd9e31c 16626 if (aborted == false) {
546fc854
JS
16627 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16628 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16629 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16630 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16631 }
6669f9bb 16632
5ffc266e
JS
16633 if (fctl & FC_FC_EX_CTX) {
16634 /* ABTS sent by responder to CT exchange, construction
16635 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
16636 * field and RX_ID from ABTS for RX_ID field.
16637 */
546fc854 16638 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
16639 } else {
16640 /* ABTS sent by initiator to CT exchange, construction
16641 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 16642 * XRI_TAG field.
5ffc266e 16643 */
546fc854 16644 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 16645 }
f09c3acc 16646 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 16647 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 16648
546fc854 16649 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
16650 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
16651 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
16652 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
16653
16654 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
16655 if (rc == IOCB_ERROR) {
6dd9e31c
JS
16656 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
16657 "2925 Failed to issue CT ABTS RSP x%x on "
16658 "xri x%x, Data x%x\n",
16659 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
16660 phba->link_state);
16661 lpfc_nlp_put(ndlp);
16662 ctiocb->context1 = NULL;
546fc854
JS
16663 lpfc_sli_release_iocbq(phba, ctiocb);
16664 }
6669f9bb
JS
16665}
16666
16667/**
16668 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
16669 * @vport: Pointer to the vport on which this sequence was received
16670 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16671 *
16672 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
16673 * receive sequence is only partially assembed by the driver, it shall abort
16674 * the partially assembled frames for the sequence. Otherwise, if the
16675 * unsolicited receive sequence has been completely assembled and passed to
16676 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
16677 * unsolicited sequence has been aborted. After that, it will issue a basic
16678 * accept to accept the abort.
16679 **/
5d8b8167 16680static void
6669f9bb
JS
16681lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
16682 struct hbq_dmabuf *dmabuf)
16683{
16684 struct lpfc_hba *phba = vport->phba;
16685 struct fc_frame_header fc_hdr;
5ffc266e 16686 uint32_t fctl;
6dd9e31c 16687 bool aborted;
6669f9bb 16688
6669f9bb
JS
16689 /* Make a copy of fc_hdr before the dmabuf being released */
16690 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 16691 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 16692
5ffc266e 16693 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
16694 /* ABTS by responder to exchange, no cleanup needed */
16695 aborted = true;
5ffc266e 16696 } else {
6dd9e31c
JS
16697 /* ABTS by initiator to exchange, need to do cleanup */
16698 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
16699 if (aborted == false)
16700 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 16701 }
6dd9e31c
JS
16702 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16703
86c67379
JS
16704 if (phba->nvmet_support) {
16705 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
16706 return;
16707 }
16708
6dd9e31c
JS
16709 /* Respond with BA_ACC or BA_RJT accordingly */
16710 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
16711}
16712
4f774513
JS
16713/**
16714 * lpfc_seq_complete - Indicates if a sequence is complete
16715 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16716 *
16717 * This function checks the sequence, starting with the frame described by
16718 * @dmabuf, to see if all the frames associated with this sequence are present.
16719 * the frames associated with this sequence are linked to the @dmabuf using the
16720 * dbuf list. This function looks for two major things. 1) That the first frame
16721 * has a sequence count of zero. 2) There is a frame with last frame of sequence
16722 * set. 3) That there are no holes in the sequence count. The function will
16723 * return 1 when the sequence is complete, otherwise it will return 0.
16724 **/
16725static int
16726lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
16727{
16728 struct fc_frame_header *hdr;
16729 struct lpfc_dmabuf *d_buf;
16730 struct hbq_dmabuf *seq_dmabuf;
16731 uint32_t fctl;
16732 int seq_count = 0;
16733
16734 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16735 /* make sure first fame of sequence has a sequence count of zero */
16736 if (hdr->fh_seq_cnt != seq_count)
16737 return 0;
16738 fctl = (hdr->fh_f_ctl[0] << 16 |
16739 hdr->fh_f_ctl[1] << 8 |
16740 hdr->fh_f_ctl[2]);
16741 /* If last frame of sequence we can return success. */
16742 if (fctl & FC_FC_END_SEQ)
16743 return 1;
16744 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
16745 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16746 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16747 /* If there is a hole in the sequence count then fail. */
eeead811 16748 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
16749 return 0;
16750 fctl = (hdr->fh_f_ctl[0] << 16 |
16751 hdr->fh_f_ctl[1] << 8 |
16752 hdr->fh_f_ctl[2]);
16753 /* If last frame of sequence we can return success. */
16754 if (fctl & FC_FC_END_SEQ)
16755 return 1;
16756 }
16757 return 0;
16758}
16759
16760/**
16761 * lpfc_prep_seq - Prep sequence for ULP processing
16762 * @vport: Pointer to the vport on which this sequence was received
16763 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16764 *
16765 * This function takes a sequence, described by a list of frames, and creates
16766 * a list of iocbq structures to describe the sequence. This iocbq list will be
16767 * used to issue to the generic unsolicited sequence handler. This routine
16768 * returns a pointer to the first iocbq in the list. If the function is unable
16769 * to allocate an iocbq then it throw out the received frames that were not
16770 * able to be described and return a pointer to the first iocbq. If unable to
16771 * allocate any iocbqs (including the first) this function will return NULL.
16772 **/
16773static struct lpfc_iocbq *
16774lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16775{
7851fe2c 16776 struct hbq_dmabuf *hbq_buf;
4f774513
JS
16777 struct lpfc_dmabuf *d_buf, *n_buf;
16778 struct lpfc_iocbq *first_iocbq, *iocbq;
16779 struct fc_frame_header *fc_hdr;
16780 uint32_t sid;
7851fe2c 16781 uint32_t len, tot_len;
eeead811 16782 struct ulp_bde64 *pbde;
4f774513
JS
16783
16784 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16785 /* remove from receive buffer list */
16786 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 16787 lpfc_update_rcv_time_stamp(vport);
4f774513 16788 /* get the Remote Port's SID */
6669f9bb 16789 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 16790 tot_len = 0;
4f774513
JS
16791 /* Get an iocbq struct to fill in. */
16792 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16793 if (first_iocbq) {
16794 /* Initialize the first IOCB. */
8fa38513 16795 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 16796 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 16797 first_iocbq->vport = vport;
939723a4
JS
16798
16799 /* Check FC Header to see what TYPE of frame we are rcv'ing */
16800 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16801 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16802 first_iocbq->iocb.un.rcvels.parmRo =
16803 sli4_did_from_fc_hdr(fc_hdr);
16804 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16805 } else
16806 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
16807 first_iocbq->iocb.ulpContext = NO_XRI;
16808 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16809 be16_to_cpu(fc_hdr->fh_ox_id);
16810 /* iocbq is prepped for internal consumption. Physical vpi. */
16811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
16812 vport->phba->vpi_ids[vport->vpi];
4f774513 16813 /* put the first buffer into the first IOCBq */
48a5a664
JS
16814 tot_len = bf_get(lpfc_rcqe_length,
16815 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16816
4f774513
JS
16817 first_iocbq->context2 = &seq_dmabuf->dbuf;
16818 first_iocbq->context3 = NULL;
16819 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
16820 if (tot_len > LPFC_DATA_BUF_SIZE)
16821 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 16822 LPFC_DATA_BUF_SIZE;
48a5a664
JS
16823 else
16824 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16825
4f774513 16826 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 16827
7851fe2c 16828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
16829 }
16830 iocbq = first_iocbq;
16831 /*
16832 * Each IOCBq can have two Buffers assigned, so go through the list
16833 * of buffers for this sequence and save two buffers in each IOCBq
16834 */
16835 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16836 if (!iocbq) {
16837 lpfc_in_buf_free(vport->phba, d_buf);
16838 continue;
16839 }
16840 if (!iocbq->context3) {
16841 iocbq->context3 = d_buf;
16842 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
16843 /* We need to get the size out of the right CQE */
16844 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16845 len = bf_get(lpfc_rcqe_length,
16846 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
16847 pbde = (struct ulp_bde64 *)
16848 &iocbq->iocb.unsli3.sli3Words[4];
16849 if (len > LPFC_DATA_BUF_SIZE)
16850 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
16851 else
16852 pbde->tus.f.bdeSize = len;
16853
7851fe2c
JS
16854 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
16855 tot_len += len;
4f774513
JS
16856 } else {
16857 iocbq = lpfc_sli_get_iocbq(vport->phba);
16858 if (!iocbq) {
16859 if (first_iocbq) {
16860 first_iocbq->iocb.ulpStatus =
16861 IOSTAT_FCP_RSP_ERROR;
16862 first_iocbq->iocb.un.ulpWord[4] =
16863 IOERR_NO_RESOURCES;
16864 }
16865 lpfc_in_buf_free(vport->phba, d_buf);
16866 continue;
16867 }
48a5a664
JS
16868 /* We need to get the size out of the right CQE */
16869 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16870 len = bf_get(lpfc_rcqe_length,
16871 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
16872 iocbq->context2 = d_buf;
16873 iocbq->context3 = NULL;
16874 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
16875 if (len > LPFC_DATA_BUF_SIZE)
16876 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 16877 LPFC_DATA_BUF_SIZE;
48a5a664
JS
16878 else
16879 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 16880
7851fe2c
JS
16881 tot_len += len;
16882 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16883
4f774513
JS
16884 iocbq->iocb.un.rcvels.remoteID = sid;
16885 list_add_tail(&iocbq->list, &first_iocbq->list);
16886 }
16887 }
16888 return first_iocbq;
16889}
16890
6669f9bb
JS
16891static void
16892lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16893 struct hbq_dmabuf *seq_dmabuf)
16894{
16895 struct fc_frame_header *fc_hdr;
16896 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
16897 struct lpfc_hba *phba = vport->phba;
16898
16899 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16900 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
16901 if (!iocbq) {
16902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16903 "2707 Ring %d handler: Failed to allocate "
16904 "iocb Rctl x%x Type x%x received\n",
16905 LPFC_ELS_RING,
16906 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16907 return;
16908 }
16909 if (!lpfc_complete_unsol_iocb(phba,
895427bd 16910 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
16911 iocbq, fc_hdr->fh_r_ctl,
16912 fc_hdr->fh_type))
6d368e53 16913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
16914 "2540 Ring %d handler: unexpected Rctl "
16915 "x%x Type x%x received\n",
16916 LPFC_ELS_RING,
16917 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16918
16919 /* Free iocb created in lpfc_prep_seq */
16920 list_for_each_entry_safe(curr_iocb, next_iocb,
16921 &iocbq->list, list) {
16922 list_del_init(&curr_iocb->list);
16923 lpfc_sli_release_iocbq(phba, curr_iocb);
16924 }
16925 lpfc_sli_release_iocbq(phba, iocbq);
16926}
16927
4f774513
JS
16928/**
16929 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16930 * @phba: Pointer to HBA context object.
16931 *
16932 * This function is called with no lock held. This function processes all
16933 * the received buffers and gives it to upper layers when a received buffer
16934 * indicates that it is the final frame in the sequence. The interrupt
895427bd 16935 * service routine processes received buffers at interrupt contexts.
4f774513
JS
16936 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
16937 * appropriate receive function when the final frame in a sequence is received.
16938 **/
4d9ab994
JS
16939void
16940lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16941 struct hbq_dmabuf *dmabuf)
4f774513 16942{
4d9ab994 16943 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
16944 struct fc_frame_header *fc_hdr;
16945 struct lpfc_vport *vport;
16946 uint32_t fcfi;
939723a4 16947 uint32_t did;
4f774513 16948
4f774513 16949 /* Process each received buffer */
4d9ab994 16950 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 16951
4d9ab994
JS
16952 /* check to see if this a valid type of frame */
16953 if (lpfc_fc_frame_check(phba, fc_hdr)) {
16954 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16955 return;
16956 }
2ea259ee 16957
7851fe2c
JS
16958 if ((bf_get(lpfc_cqe_code,
16959 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
16960 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
16961 &dmabuf->cq_event.cqe.rcqe_cmpl);
16962 else
16963 fcfi = bf_get(lpfc_rcqe_fcf_id,
16964 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 16965
895427bd
JS
16966 /* d_id this frame is directed to */
16967 did = sli4_did_from_fc_hdr(fc_hdr);
16968
16969 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 16970 if (!vport) {
4d9ab994
JS
16971 /* throw out the frame */
16972 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16973 return;
16974 }
939723a4 16975
939723a4
JS
16976 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
16977 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
16978 (did != Fabric_DID)) {
16979 /*
16980 * Throw out the frame if we are not pt2pt.
16981 * The pt2pt protocol allows for discovery frames
16982 * to be received without a registered VPI.
16983 */
16984 if (!(vport->fc_flag & FC_PT2PT) ||
16985 (phba->link_state == LPFC_HBA_READY)) {
16986 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16987 return;
16988 }
16989 }
16990
6669f9bb
JS
16991 /* Handle the basic abort sequence (BA_ABTS) event */
16992 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
16993 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
16994 return;
16995 }
16996
4d9ab994
JS
16997 /* Link this frame */
16998 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
16999 if (!seq_dmabuf) {
17000 /* unable to add frame to vport - throw it out */
17001 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17002 return;
17003 }
17004 /* If not last frame in sequence continue processing frames. */
def9c7a9 17005 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 17006 return;
def9c7a9 17007
6669f9bb
JS
17008 /* Send the complete sequence to the upper layer protocol */
17009 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 17010}
6fb120a7
JS
17011
17012/**
17013 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17014 * @phba: pointer to lpfc hba data structure.
17015 *
17016 * This routine is invoked to post rpi header templates to the
17017 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
17018 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17019 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
17020 *
17021 * This routine does not require any locks. It's usage is expected
17022 * to be driver load or reset recovery when the driver is
17023 * sequential.
17024 *
17025 * Return codes
af901ca1 17026 * 0 - successful
d439d286 17027 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
17028 * When this error occurs, the driver is not guaranteed
17029 * to have any rpi regions posted to the device and
17030 * must either attempt to repost the regions or take a
17031 * fatal error.
17032 **/
17033int
17034lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17035{
17036 struct lpfc_rpi_hdr *rpi_page;
17037 uint32_t rc = 0;
6d368e53
JS
17038 uint16_t lrpi = 0;
17039
17040 /* SLI4 ports that support extents do not require RPI headers. */
17041 if (!phba->sli4_hba.rpi_hdrs_in_use)
17042 goto exit;
17043 if (phba->sli4_hba.extents_in_use)
17044 return -EIO;
6fb120a7 17045
6fb120a7 17046 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
17047 /*
17048 * Assign the rpi headers a physical rpi only if the driver
17049 * has not initialized those resources. A port reset only
17050 * needs the headers posted.
17051 */
17052 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17053 LPFC_RPI_RSRC_RDY)
17054 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17055
6fb120a7
JS
17056 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17057 if (rc != MBX_SUCCESS) {
17058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17059 "2008 Error %d posting all rpi "
17060 "headers\n", rc);
17061 rc = -EIO;
17062 break;
17063 }
17064 }
17065
6d368e53
JS
17066 exit:
17067 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17068 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
17069 return rc;
17070}
17071
17072/**
17073 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17074 * @phba: pointer to lpfc hba data structure.
17075 * @rpi_page: pointer to the rpi memory region.
17076 *
17077 * This routine is invoked to post a single rpi header to the
17078 * HBA consistent with the SLI-4 interface spec. This memory region
17079 * maps up to 64 rpi context regions.
17080 *
17081 * Return codes
af901ca1 17082 * 0 - successful
d439d286
JS
17083 * -ENOMEM - No available memory
17084 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
17085 **/
17086int
17087lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17088{
17089 LPFC_MBOXQ_t *mboxq;
17090 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17091 uint32_t rc = 0;
6fb120a7
JS
17092 uint32_t shdr_status, shdr_add_status;
17093 union lpfc_sli4_cfg_shdr *shdr;
17094
6d368e53
JS
17095 /* SLI4 ports that support extents do not require RPI headers. */
17096 if (!phba->sli4_hba.rpi_hdrs_in_use)
17097 return rc;
17098 if (phba->sli4_hba.extents_in_use)
17099 return -EIO;
17100
6fb120a7
JS
17101 /* The port is notified of the header region via a mailbox command. */
17102 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17103 if (!mboxq) {
17104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17105 "2001 Unable to allocate memory for issuing "
17106 "SLI_CONFIG_SPECIAL mailbox command\n");
17107 return -ENOMEM;
17108 }
17109
17110 /* Post all rpi memory regions to the port. */
17111 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
17112 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17113 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17114 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
17115 sizeof(struct lpfc_sli4_cfg_mhdr),
17116 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
17117
17118
17119 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
17120 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17121 rpi_page->start_rpi);
6d368e53
JS
17122 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17123 hdr_tmpl, rpi_page->page_count);
17124
6fb120a7
JS
17125 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17126 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 17127 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
17128 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17129 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17130 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17131 if (rc != MBX_TIMEOUT)
17132 mempool_free(mboxq, phba->mbox_mem_pool);
17133 if (shdr_status || shdr_add_status || rc) {
17134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17135 "2514 POST_RPI_HDR mailbox failed with "
17136 "status x%x add_status x%x, mbx status x%x\n",
17137 shdr_status, shdr_add_status, rc);
17138 rc = -ENXIO;
845d9e8d
JS
17139 } else {
17140 /*
17141 * The next_rpi stores the next logical module-64 rpi value used
17142 * to post physical rpis in subsequent rpi postings.
17143 */
17144 spin_lock_irq(&phba->hbalock);
17145 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17146 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
17147 }
17148 return rc;
17149}
17150
17151/**
17152 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17153 * @phba: pointer to lpfc hba data structure.
17154 *
17155 * This routine is invoked to post rpi header templates to the
17156 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
17157 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17158 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
17159 *
17160 * Returns
af901ca1 17161 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
17162 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17163 **/
17164int
17165lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17166{
6d368e53
JS
17167 unsigned long rpi;
17168 uint16_t max_rpi, rpi_limit;
17169 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 17170 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 17171 unsigned long iflag;
6fb120a7 17172
6fb120a7 17173 /*
6d368e53
JS
17174 * Fetch the next logical rpi. Because this index is logical,
17175 * the driver starts at 0 each time.
6fb120a7 17176 */
4902b381 17177 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
17178 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17179 rpi_limit = phba->sli4_hba.next_rpi;
17180
6d368e53
JS
17181 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17182 if (rpi >= rpi_limit)
6fb120a7
JS
17183 rpi = LPFC_RPI_ALLOC_ERROR;
17184 else {
17185 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17186 phba->sli4_hba.max_cfg_param.rpi_used++;
17187 phba->sli4_hba.rpi_count++;
17188 }
be6bb941
JS
17189 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17190 "0001 rpi:%x max:%x lim:%x\n",
17191 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
17192
17193 /*
17194 * Don't try to allocate more rpi header regions if the device limit
6d368e53 17195 * has been exhausted.
6fb120a7
JS
17196 */
17197 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17198 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 17199 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
17200 return rpi;
17201 }
17202
6d368e53
JS
17203 /*
17204 * RPI header postings are not required for SLI4 ports capable of
17205 * extents.
17206 */
17207 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 17208 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
17209 return rpi;
17210 }
17211
6fb120a7
JS
17212 /*
17213 * If the driver is running low on rpi resources, allocate another
17214 * page now. Note that the next_rpi value is used because
17215 * it represents how many are actually in use whereas max_rpi notes
17216 * how many are supported max by the device.
17217 */
6d368e53 17218 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 17219 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
17220 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17221 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17222 if (!rpi_hdr) {
17223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17224 "2002 Error Could not grow rpi "
17225 "count\n");
17226 } else {
6d368e53
JS
17227 lrpi = rpi_hdr->start_rpi;
17228 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
17229 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17230 }
17231 }
17232
17233 return rpi;
17234}
17235
d7c47992
JS
17236/**
17237 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17238 * @phba: pointer to lpfc hba data structure.
17239 *
17240 * This routine is invoked to release an rpi to the pool of
17241 * available rpis maintained by the driver.
17242 **/
5d8b8167 17243static void
d7c47992
JS
17244__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17245{
17246 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17247 phba->sli4_hba.rpi_count--;
17248 phba->sli4_hba.max_cfg_param.rpi_used--;
17249 }
17250}
17251
6fb120a7
JS
17252/**
17253 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17254 * @phba: pointer to lpfc hba data structure.
17255 *
17256 * This routine is invoked to release an rpi to the pool of
17257 * available rpis maintained by the driver.
17258 **/
17259void
17260lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17261{
17262 spin_lock_irq(&phba->hbalock);
d7c47992 17263 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
17264 spin_unlock_irq(&phba->hbalock);
17265}
17266
17267/**
17268 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17269 * @phba: pointer to lpfc hba data structure.
17270 *
17271 * This routine is invoked to remove the memory region that
17272 * provided rpi via a bitmask.
17273 **/
17274void
17275lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17276{
17277 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
17278 kfree(phba->sli4_hba.rpi_ids);
17279 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
17280}
17281
17282/**
17283 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17284 * @phba: pointer to lpfc hba data structure.
17285 *
17286 * This routine is invoked to remove the memory region that
17287 * provided rpi via a bitmask.
17288 **/
17289int
6b5151fd
JS
17290lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17291 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
17292{
17293 LPFC_MBOXQ_t *mboxq;
17294 struct lpfc_hba *phba = ndlp->phba;
17295 int rc;
17296
17297 /* The port is notified of the header region via a mailbox command. */
17298 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17299 if (!mboxq)
17300 return -ENOMEM;
17301
17302 /* Post all rpi memory regions to the port. */
17303 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
17304 if (cmpl) {
17305 mboxq->mbox_cmpl = cmpl;
17306 mboxq->context1 = arg;
17307 mboxq->context2 = ndlp;
72859909
JS
17308 } else
17309 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 17310 mboxq->vport = ndlp->vport;
6fb120a7
JS
17311 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17312 if (rc == MBX_NOT_FINISHED) {
17313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17314 "2010 Resume RPI Mailbox failed "
17315 "status %d, mbxStatus x%x\n", rc,
17316 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17317 mempool_free(mboxq, phba->mbox_mem_pool);
17318 return -EIO;
17319 }
17320 return 0;
17321}
17322
17323/**
17324 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 17325 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 17326 *
76a95d75 17327 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
17328 *
17329 * Returns:
17330 * 0 success
17331 * -Evalue otherwise
17332 **/
17333int
76a95d75 17334lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
17335{
17336 LPFC_MBOXQ_t *mboxq;
17337 int rc = 0;
6a9c52cf 17338 int retval = MBX_SUCCESS;
6fb120a7 17339 uint32_t mbox_tmo;
76a95d75 17340 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
17341 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17342 if (!mboxq)
17343 return -ENOMEM;
76a95d75 17344 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 17345 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 17346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 17347 if (rc != MBX_SUCCESS) {
76a95d75 17348 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
17349 "2022 INIT VPI Mailbox failed "
17350 "status %d, mbxStatus x%x\n", rc,
17351 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 17352 retval = -EIO;
6fb120a7 17353 }
6a9c52cf 17354 if (rc != MBX_TIMEOUT)
76a95d75 17355 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
17356
17357 return retval;
6fb120a7
JS
17358}
17359
17360/**
17361 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17362 * @phba: pointer to lpfc hba data structure.
17363 * @mboxq: Pointer to mailbox object.
17364 *
17365 * This routine is invoked to manually add a single FCF record. The caller
17366 * must pass a completely initialized FCF_Record. This routine takes
17367 * care of the nonembedded mailbox operations.
17368 **/
17369static void
17370lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17371{
17372 void *virt_addr;
17373 union lpfc_sli4_cfg_shdr *shdr;
17374 uint32_t shdr_status, shdr_add_status;
17375
17376 virt_addr = mboxq->sge_array->addr[0];
17377 /* The IOCTL status is embedded in the mailbox subheader. */
17378 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17379 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17380 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17381
17382 if ((shdr_status || shdr_add_status) &&
17383 (shdr_status != STATUS_FCF_IN_USE))
17384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17385 "2558 ADD_FCF_RECORD mailbox failed with "
17386 "status x%x add_status x%x\n",
17387 shdr_status, shdr_add_status);
17388
17389 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17390}
17391
17392/**
17393 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17394 * @phba: pointer to lpfc hba data structure.
17395 * @fcf_record: pointer to the initialized fcf record to add.
17396 *
17397 * This routine is invoked to manually add a single FCF record. The caller
17398 * must pass a completely initialized FCF_Record. This routine takes
17399 * care of the nonembedded mailbox operations.
17400 **/
17401int
17402lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17403{
17404 int rc = 0;
17405 LPFC_MBOXQ_t *mboxq;
17406 uint8_t *bytep;
17407 void *virt_addr;
6fb120a7
JS
17408 struct lpfc_mbx_sge sge;
17409 uint32_t alloc_len, req_len;
17410 uint32_t fcfindex;
17411
17412 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17413 if (!mboxq) {
17414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17415 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17416 return -ENOMEM;
17417 }
17418
17419 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17420 sizeof(uint32_t);
17421
17422 /* Allocate DMA memory and set up the non-embedded mailbox command */
17423 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17424 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17425 req_len, LPFC_SLI4_MBX_NEMBED);
17426 if (alloc_len < req_len) {
17427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17428 "2523 Allocated DMA memory size (x%x) is "
17429 "less than the requested DMA memory "
17430 "size (x%x)\n", alloc_len, req_len);
17431 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17432 return -ENOMEM;
17433 }
17434
17435 /*
17436 * Get the first SGE entry from the non-embedded DMA memory. This
17437 * routine only uses a single SGE.
17438 */
17439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
17440 virt_addr = mboxq->sge_array->addr[0];
17441 /*
17442 * Configure the FCF record for FCFI 0. This is the driver's
17443 * hardcoded default and gets used in nonFIP mode.
17444 */
17445 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
17446 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
17447 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
17448
17449 /*
17450 * Copy the fcf_index and the FCF Record Data. The data starts after
17451 * the FCoE header plus word10. The data copy needs to be endian
17452 * correct.
17453 */
17454 bytep += sizeof(uint32_t);
17455 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
17456 mboxq->vport = phba->pport;
17457 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
17458 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17459 if (rc == MBX_NOT_FINISHED) {
17460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17461 "2515 ADD_FCF_RECORD mailbox failed with "
17462 "status 0x%x\n", rc);
17463 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17464 rc = -EIO;
17465 } else
17466 rc = 0;
17467
17468 return rc;
17469}
17470
17471/**
17472 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
17473 * @phba: pointer to lpfc hba data structure.
17474 * @fcf_record: pointer to the fcf record to write the default data.
17475 * @fcf_index: FCF table entry index.
17476 *
17477 * This routine is invoked to build the driver's default FCF record. The
17478 * values used are hardcoded. This routine handles memory initialization.
17479 *
17480 **/
17481void
17482lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
17483 struct fcf_record *fcf_record,
17484 uint16_t fcf_index)
17485{
17486 memset(fcf_record, 0, sizeof(struct fcf_record));
17487 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
17488 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
17489 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
17490 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
17491 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
17492 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
17493 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
17494 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
17495 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
17496 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
17497 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
17498 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
17499 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 17500 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
17501 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
17502 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
17503 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
17504 /* Set the VLAN bit map */
17505 if (phba->valid_vlan) {
17506 fcf_record->vlan_bitmap[phba->vlan_id / 8]
17507 = 1 << (phba->vlan_id % 8);
17508 }
17509}
17510
17511/**
0c9ab6f5 17512 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
17513 * @phba: pointer to lpfc hba data structure.
17514 * @fcf_index: FCF table entry offset.
17515 *
0c9ab6f5
JS
17516 * This routine is invoked to scan the entire FCF table by reading FCF
17517 * record and processing it one at a time starting from the @fcf_index
17518 * for initial FCF discovery or fast FCF failover rediscovery.
17519 *
25985edc 17520 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 17521 * otherwise.
6fb120a7
JS
17522 **/
17523int
0c9ab6f5 17524lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
17525{
17526 int rc = 0, error;
17527 LPFC_MBOXQ_t *mboxq;
6fb120a7 17528
32b9793f 17529 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 17530 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
17531 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17532 if (!mboxq) {
17533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17534 "2000 Failed to allocate mbox for "
17535 "READ_FCF cmd\n");
4d9ab994 17536 error = -ENOMEM;
0c9ab6f5 17537 goto fail_fcf_scan;
6fb120a7 17538 }
ecfd03c6 17539 /* Construct the read FCF record mailbox command */
0c9ab6f5 17540 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
17541 if (rc) {
17542 error = -EINVAL;
0c9ab6f5 17543 goto fail_fcf_scan;
6fb120a7 17544 }
ecfd03c6 17545 /* Issue the mailbox command asynchronously */
6fb120a7 17546 mboxq->vport = phba->pport;
0c9ab6f5 17547 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
17548
17549 spin_lock_irq(&phba->hbalock);
17550 phba->hba_flag |= FCF_TS_INPROG;
17551 spin_unlock_irq(&phba->hbalock);
17552
6fb120a7 17553 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 17554 if (rc == MBX_NOT_FINISHED)
6fb120a7 17555 error = -EIO;
ecfd03c6 17556 else {
38b92ef8
JS
17557 /* Reset eligible FCF count for new scan */
17558 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 17559 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 17560 error = 0;
32b9793f 17561 }
0c9ab6f5 17562fail_fcf_scan:
4d9ab994
JS
17563 if (error) {
17564 if (mboxq)
17565 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 17566 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 17567 spin_lock_irq(&phba->hbalock);
a93ff37a 17568 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
17569 spin_unlock_irq(&phba->hbalock);
17570 }
6fb120a7
JS
17571 return error;
17572}
a0c87cbd 17573
0c9ab6f5 17574/**
a93ff37a 17575 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
17576 * @phba: pointer to lpfc hba data structure.
17577 * @fcf_index: FCF table entry offset.
17578 *
17579 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 17580 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 17581 *
25985edc 17582 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
17583 * otherwise.
17584 **/
17585int
17586lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17587{
17588 int rc = 0, error;
17589 LPFC_MBOXQ_t *mboxq;
17590
17591 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17592 if (!mboxq) {
17593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17594 "2763 Failed to allocate mbox for "
17595 "READ_FCF cmd\n");
17596 error = -ENOMEM;
17597 goto fail_fcf_read;
17598 }
17599 /* Construct the read FCF record mailbox command */
17600 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17601 if (rc) {
17602 error = -EINVAL;
17603 goto fail_fcf_read;
17604 }
17605 /* Issue the mailbox command asynchronously */
17606 mboxq->vport = phba->pport;
17607 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
17608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17609 if (rc == MBX_NOT_FINISHED)
17610 error = -EIO;
17611 else
17612 error = 0;
17613
17614fail_fcf_read:
17615 if (error && mboxq)
17616 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17617 return error;
17618}
17619
17620/**
17621 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
17622 * @phba: pointer to lpfc hba data structure.
17623 * @fcf_index: FCF table entry offset.
17624 *
17625 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 17626 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 17627 *
25985edc 17628 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
17629 * otherwise.
17630 **/
17631int
17632lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17633{
17634 int rc = 0, error;
17635 LPFC_MBOXQ_t *mboxq;
17636
17637 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17638 if (!mboxq) {
17639 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17640 "2758 Failed to allocate mbox for "
17641 "READ_FCF cmd\n");
17642 error = -ENOMEM;
17643 goto fail_fcf_read;
17644 }
17645 /* Construct the read FCF record mailbox command */
17646 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17647 if (rc) {
17648 error = -EINVAL;
17649 goto fail_fcf_read;
17650 }
17651 /* Issue the mailbox command asynchronously */
17652 mboxq->vport = phba->pport;
17653 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
17654 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17655 if (rc == MBX_NOT_FINISHED)
17656 error = -EIO;
17657 else
17658 error = 0;
17659
17660fail_fcf_read:
17661 if (error && mboxq)
17662 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17663 return error;
17664}
17665
7d791df7 17666/**
f5cb5304 17667 * lpfc_check_next_fcf_pri_level
7d791df7
JS
17668 * phba pointer to the lpfc_hba struct for this port.
17669 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
17670 * routine when the rr_bmask is empty. The FCF indecies are put into the
17671 * rr_bmask based on their priority level. Starting from the highest priority
17672 * to the lowest. The most likely FCF candidate will be in the highest
17673 * priority group. When this routine is called it searches the fcf_pri list for
17674 * next lowest priority group and repopulates the rr_bmask with only those
17675 * fcf_indexes.
17676 * returns:
17677 * 1=success 0=failure
17678 **/
5d8b8167 17679static int
7d791df7
JS
17680lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
17681{
17682 uint16_t next_fcf_pri;
17683 uint16_t last_index;
17684 struct lpfc_fcf_pri *fcf_pri;
17685 int rc;
17686 int ret = 0;
17687
17688 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
17689 LPFC_SLI4_FCF_TBL_INDX_MAX);
17690 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17691 "3060 Last IDX %d\n", last_index);
2562669c
JS
17692
17693 /* Verify the priority list has 2 or more entries */
17694 spin_lock_irq(&phba->hbalock);
17695 if (list_empty(&phba->fcf.fcf_pri_list) ||
17696 list_is_singular(&phba->fcf.fcf_pri_list)) {
17697 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
17698 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17699 "3061 Last IDX %d\n", last_index);
17700 return 0; /* Empty rr list */
17701 }
2562669c
JS
17702 spin_unlock_irq(&phba->hbalock);
17703
7d791df7
JS
17704 next_fcf_pri = 0;
17705 /*
17706 * Clear the rr_bmask and set all of the bits that are at this
17707 * priority.
17708 */
17709 memset(phba->fcf.fcf_rr_bmask, 0,
17710 sizeof(*phba->fcf.fcf_rr_bmask));
17711 spin_lock_irq(&phba->hbalock);
17712 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17713 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
17714 continue;
17715 /*
17716 * the 1st priority that has not FLOGI failed
17717 * will be the highest.
17718 */
17719 if (!next_fcf_pri)
17720 next_fcf_pri = fcf_pri->fcf_rec.priority;
17721 spin_unlock_irq(&phba->hbalock);
17722 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17723 rc = lpfc_sli4_fcf_rr_index_set(phba,
17724 fcf_pri->fcf_rec.fcf_index);
17725 if (rc)
17726 return 0;
17727 }
17728 spin_lock_irq(&phba->hbalock);
17729 }
17730 /*
17731 * if next_fcf_pri was not set above and the list is not empty then
17732 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 17733 * and start at the beginning.
7d791df7
JS
17734 */
17735 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
17736 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17737 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
17738 /*
17739 * the 1st priority that has not FLOGI failed
17740 * will be the highest.
17741 */
17742 if (!next_fcf_pri)
17743 next_fcf_pri = fcf_pri->fcf_rec.priority;
17744 spin_unlock_irq(&phba->hbalock);
17745 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17746 rc = lpfc_sli4_fcf_rr_index_set(phba,
17747 fcf_pri->fcf_rec.fcf_index);
17748 if (rc)
17749 return 0;
17750 }
17751 spin_lock_irq(&phba->hbalock);
17752 }
17753 } else
17754 ret = 1;
17755 spin_unlock_irq(&phba->hbalock);
17756
17757 return ret;
17758}
0c9ab6f5
JS
17759/**
17760 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
17761 * @phba: pointer to lpfc hba data structure.
17762 *
17763 * This routine is to get the next eligible FCF record index in a round
17764 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 17765 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
17766 * shall be returned, otherwise, the next eligible FCF record's index
17767 * shall be returned.
17768 **/
17769uint16_t
17770lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
17771{
17772 uint16_t next_fcf_index;
17773
421c6622 17774initial_priority:
3804dc84 17775 /* Search start from next bit of currently registered FCF index */
421c6622
JS
17776 next_fcf_index = phba->fcf.current_rec.fcf_indx;
17777
7d791df7 17778next_priority:
421c6622
JS
17779 /* Determine the next fcf index to check */
17780 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
17781 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17782 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
17783 next_fcf_index);
17784
0c9ab6f5 17785 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
17786 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17787 /*
17788 * If we have wrapped then we need to clear the bits that
17789 * have been tested so that we can detect when we should
17790 * change the priority level.
17791 */
0c9ab6f5
JS
17792 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17793 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
17794 }
17795
3804dc84
JS
17796
17797 /* Check roundrobin failover list empty condition */
7d791df7
JS
17798 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
17799 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
17800 /*
17801 * If next fcf index is not found check if there are lower
17802 * Priority level fcf's in the fcf_priority list.
17803 * Set up the rr_bmask with all of the avaiable fcf bits
17804 * at that level and continue the selection process.
17805 */
17806 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 17807 goto initial_priority;
3804dc84
JS
17808 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17809 "2844 No roundrobin failover FCF available\n");
7d791df7
JS
17810 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
17811 return LPFC_FCOE_FCF_NEXT_NONE;
17812 else {
17813 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17814 "3063 Only FCF available idx %d, flag %x\n",
17815 next_fcf_index,
17816 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
17817 return next_fcf_index;
17818 }
3804dc84
JS
17819 }
17820
7d791df7
JS
17821 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
17822 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
17823 LPFC_FCF_FLOGI_FAILED) {
17824 if (list_is_singular(&phba->fcf.fcf_pri_list))
17825 return LPFC_FCOE_FCF_NEXT_NONE;
17826
7d791df7 17827 goto next_priority;
f5cb5304 17828 }
7d791df7 17829
3804dc84 17830 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
17831 "2845 Get next roundrobin failover FCF (x%x)\n",
17832 next_fcf_index);
17833
0c9ab6f5
JS
17834 return next_fcf_index;
17835}
17836
17837/**
17838 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
17839 * @phba: pointer to lpfc hba data structure.
17840 *
17841 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 17842 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
17843 * does not go beyond the range of the driver allocated bmask dimension
17844 * before setting the bit.
17845 *
17846 * Returns 0 if the index bit successfully set, otherwise, it returns
17847 * -EINVAL.
17848 **/
17849int
17850lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
17851{
17852 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17853 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
17854 "2610 FCF (x%x) reached driver's book "
17855 "keeping dimension:x%x\n",
0c9ab6f5
JS
17856 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17857 return -EINVAL;
17858 }
17859 /* Set the eligible FCF record index bmask */
17860 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
17861
3804dc84 17862 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 17863 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
17864 "bmask\n", fcf_index);
17865
0c9ab6f5
JS
17866 return 0;
17867}
17868
17869/**
3804dc84 17870 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
17871 * @phba: pointer to lpfc hba data structure.
17872 *
17873 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 17874 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
17875 * does not go beyond the range of the driver allocated bmask dimension
17876 * before clearing the bit.
17877 **/
17878void
17879lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
17880{
9a803a74 17881 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
17882 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17883 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
17884 "2762 FCF (x%x) reached driver's book "
17885 "keeping dimension:x%x\n",
0c9ab6f5
JS
17886 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17887 return;
17888 }
17889 /* Clear the eligible FCF record index bmask */
7d791df7 17890 spin_lock_irq(&phba->hbalock);
9a803a74
JS
17891 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
17892 list) {
7d791df7
JS
17893 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
17894 list_del_init(&fcf_pri->list);
17895 break;
17896 }
17897 }
17898 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 17899 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
17900
17901 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 17902 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 17903 "bmask\n", fcf_index);
0c9ab6f5
JS
17904}
17905
ecfd03c6
JS
17906/**
17907 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
17908 * @phba: pointer to lpfc hba data structure.
17909 *
17910 * This routine is the completion routine for the rediscover FCF table mailbox
17911 * command. If the mailbox command returned failure, it will try to stop the
17912 * FCF rediscover wait timer.
17913 **/
5d8b8167 17914static void
ecfd03c6
JS
17915lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
17916{
17917 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17918 uint32_t shdr_status, shdr_add_status;
17919
17920 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17921
17922 shdr_status = bf_get(lpfc_mbox_hdr_status,
17923 &redisc_fcf->header.cfg_shdr.response);
17924 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
17925 &redisc_fcf->header.cfg_shdr.response);
17926 if (shdr_status || shdr_add_status) {
0c9ab6f5 17927 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
17928 "2746 Requesting for FCF rediscovery failed "
17929 "status x%x add_status x%x\n",
17930 shdr_status, shdr_add_status);
0c9ab6f5 17931 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 17932 spin_lock_irq(&phba->hbalock);
0c9ab6f5 17933 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
17934 spin_unlock_irq(&phba->hbalock);
17935 /*
17936 * CVL event triggered FCF rediscover request failed,
17937 * last resort to re-try current registered FCF entry.
17938 */
17939 lpfc_retry_pport_discovery(phba);
17940 } else {
17941 spin_lock_irq(&phba->hbalock);
0c9ab6f5 17942 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
17943 spin_unlock_irq(&phba->hbalock);
17944 /*
17945 * DEAD FCF event triggered FCF rediscover request
17946 * failed, last resort to fail over as a link down
17947 * to FCF registration.
17948 */
17949 lpfc_sli4_fcf_dead_failthrough(phba);
17950 }
0c9ab6f5
JS
17951 } else {
17952 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 17953 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
17954 /*
17955 * Start FCF rediscovery wait timer for pending FCF
17956 * before rescan FCF record table.
17957 */
17958 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 17959 }
ecfd03c6
JS
17960
17961 mempool_free(mbox, phba->mbox_mem_pool);
17962}
17963
17964/**
3804dc84 17965 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
17966 * @phba: pointer to lpfc hba data structure.
17967 *
17968 * This routine is invoked to request for rediscovery of the entire FCF table
17969 * by the port.
17970 **/
17971int
17972lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
17973{
17974 LPFC_MBOXQ_t *mbox;
17975 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17976 int rc, length;
17977
0c9ab6f5
JS
17978 /* Cancel retry delay timers to all vports before FCF rediscover */
17979 lpfc_cancel_all_vport_retry_delay_timer(phba);
17980
ecfd03c6
JS
17981 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17982 if (!mbox) {
17983 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17984 "2745 Failed to allocate mbox for "
17985 "requesting FCF rediscover.\n");
17986 return -ENOMEM;
17987 }
17988
17989 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
17990 sizeof(struct lpfc_sli4_cfg_mhdr));
17991 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17992 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
17993 length, LPFC_SLI4_MBX_EMBED);
17994
17995 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17996 /* Set count to 0 for invalidating the entire FCF database */
17997 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
17998
17999 /* Issue the mailbox command asynchronously */
18000 mbox->vport = phba->pport;
18001 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18002 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18003
18004 if (rc == MBX_NOT_FINISHED) {
18005 mempool_free(mbox, phba->mbox_mem_pool);
18006 return -EIO;
18007 }
18008 return 0;
18009}
18010
fc2b989b
JS
18011/**
18012 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18013 * @phba: pointer to lpfc hba data structure.
18014 *
18015 * This function is the failover routine as a last resort to the FCF DEAD
18016 * event when driver failed to perform fast FCF failover.
18017 **/
18018void
18019lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18020{
18021 uint32_t link_state;
18022
18023 /*
18024 * Last resort as FCF DEAD event failover will treat this as
18025 * a link down, but save the link state because we don't want
18026 * it to be changed to Link Down unless it is already down.
18027 */
18028 link_state = phba->link_state;
18029 lpfc_linkdown(phba);
18030 phba->link_state = link_state;
18031
18032 /* Unregister FCF if no devices connected to it */
18033 lpfc_unregister_unused_fcf(phba);
18034}
18035
a0c87cbd 18036/**
026abb87 18037 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 18038 * @phba: pointer to lpfc hba data structure.
026abb87 18039 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 18040 *
026abb87
JS
18041 * This function gets SLI3 port configure region 23 data through memory dump
18042 * mailbox command. When it successfully retrieves data, the size of the data
18043 * will be returned, otherwise, 0 will be returned.
a0c87cbd 18044 **/
026abb87
JS
18045static uint32_t
18046lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
18047{
18048 LPFC_MBOXQ_t *pmb = NULL;
18049 MAILBOX_t *mb;
026abb87 18050 uint32_t offset = 0;
a0c87cbd
JS
18051 int rc;
18052
026abb87
JS
18053 if (!rgn23_data)
18054 return 0;
18055
a0c87cbd
JS
18056 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18057 if (!pmb) {
18058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
18059 "2600 failed to allocate mailbox memory\n");
18060 return 0;
a0c87cbd
JS
18061 }
18062 mb = &pmb->u.mb;
18063
a0c87cbd
JS
18064 do {
18065 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18066 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18067
18068 if (rc != MBX_SUCCESS) {
18069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
18070 "2601 failed to read config "
18071 "region 23, rc 0x%x Status 0x%x\n",
18072 rc, mb->mbxStatus);
a0c87cbd
JS
18073 mb->un.varDmp.word_cnt = 0;
18074 }
18075 /*
18076 * dump mem may return a zero when finished or we got a
18077 * mailbox error, either way we are done.
18078 */
18079 if (mb->un.varDmp.word_cnt == 0)
18080 break;
18081 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18082 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18083
18084 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
18085 rgn23_data + offset,
18086 mb->un.varDmp.word_cnt);
a0c87cbd
JS
18087 offset += mb->un.varDmp.word_cnt;
18088 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18089
026abb87
JS
18090 mempool_free(pmb, phba->mbox_mem_pool);
18091 return offset;
18092}
18093
18094/**
18095 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18096 * @phba: pointer to lpfc hba data structure.
18097 * @rgn23_data: pointer to configure region 23 data.
18098 *
18099 * This function gets SLI4 port configure region 23 data through memory dump
18100 * mailbox command. When it successfully retrieves data, the size of the data
18101 * will be returned, otherwise, 0 will be returned.
18102 **/
18103static uint32_t
18104lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18105{
18106 LPFC_MBOXQ_t *mboxq = NULL;
18107 struct lpfc_dmabuf *mp = NULL;
18108 struct lpfc_mqe *mqe;
18109 uint32_t data_length = 0;
18110 int rc;
18111
18112 if (!rgn23_data)
18113 return 0;
18114
18115 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18116 if (!mboxq) {
18117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18118 "3105 failed to allocate mailbox memory\n");
18119 return 0;
18120 }
18121
18122 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18123 goto out;
18124 mqe = &mboxq->u.mqe;
18125 mp = (struct lpfc_dmabuf *) mboxq->context1;
18126 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18127 if (rc)
18128 goto out;
18129 data_length = mqe->un.mb_words[5];
18130 if (data_length == 0)
18131 goto out;
18132 if (data_length > DMP_RGN23_SIZE) {
18133 data_length = 0;
18134 goto out;
18135 }
18136 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18137out:
18138 mempool_free(mboxq, phba->mbox_mem_pool);
18139 if (mp) {
18140 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18141 kfree(mp);
18142 }
18143 return data_length;
18144}
18145
18146/**
18147 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18148 * @phba: pointer to lpfc hba data structure.
18149 *
18150 * This function read region 23 and parse TLV for port status to
18151 * decide if the user disaled the port. If the TLV indicates the
18152 * port is disabled, the hba_flag is set accordingly.
18153 **/
18154void
18155lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18156{
18157 uint8_t *rgn23_data = NULL;
18158 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18159 uint32_t offset = 0;
18160
18161 /* Get adapter Region 23 data */
18162 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18163 if (!rgn23_data)
18164 goto out;
18165
18166 if (phba->sli_rev < LPFC_SLI_REV4)
18167 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18168 else {
18169 if_type = bf_get(lpfc_sli_intf_if_type,
18170 &phba->sli4_hba.sli_intf);
18171 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18172 goto out;
18173 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18174 }
a0c87cbd
JS
18175
18176 if (!data_size)
18177 goto out;
18178
18179 /* Check the region signature first */
18180 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18182 "2619 Config region 23 has bad signature\n");
18183 goto out;
18184 }
18185 offset += 4;
18186
18187 /* Check the data structure version */
18188 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18190 "2620 Config region 23 has bad version\n");
18191 goto out;
18192 }
18193 offset += 4;
18194
18195 /* Parse TLV entries in the region */
18196 while (offset < data_size) {
18197 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18198 break;
18199 /*
18200 * If the TLV is not driver specific TLV or driver id is
18201 * not linux driver id, skip the record.
18202 */
18203 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18204 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18205 (rgn23_data[offset + 3] != 0)) {
18206 offset += rgn23_data[offset + 1] * 4 + 4;
18207 continue;
18208 }
18209
18210 /* Driver found a driver specific TLV in the config region */
18211 sub_tlv_len = rgn23_data[offset + 1] * 4;
18212 offset += 4;
18213 tlv_offset = 0;
18214
18215 /*
18216 * Search for configured port state sub-TLV.
18217 */
18218 while ((offset < data_size) &&
18219 (tlv_offset < sub_tlv_len)) {
18220 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18221 offset += 4;
18222 tlv_offset += 4;
18223 break;
18224 }
18225 if (rgn23_data[offset] != PORT_STE_TYPE) {
18226 offset += rgn23_data[offset + 1] * 4 + 4;
18227 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18228 continue;
18229 }
18230
18231 /* This HBA contains PORT_STE configured */
18232 if (!rgn23_data[offset + 2])
18233 phba->hba_flag |= LINK_DISABLED;
18234
18235 goto out;
18236 }
18237 }
026abb87 18238
a0c87cbd 18239out:
a0c87cbd
JS
18240 kfree(rgn23_data);
18241 return;
18242}
695a814e 18243
52d52440
JS
18244/**
18245 * lpfc_wr_object - write an object to the firmware
18246 * @phba: HBA structure that indicates port to create a queue on.
18247 * @dmabuf_list: list of dmabufs to write to the port.
18248 * @size: the total byte value of the objects to write to the port.
18249 * @offset: the current offset to be used to start the transfer.
18250 *
18251 * This routine will create a wr_object mailbox command to send to the port.
18252 * the mailbox command will be constructed using the dma buffers described in
18253 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18254 * BDEs that the imbedded mailbox can support. The @offset variable will be
18255 * used to indicate the starting offset of the transfer and will also return
18256 * the offset after the write object mailbox has completed. @size is used to
18257 * determine the end of the object and whether the eof bit should be set.
18258 *
18259 * Return 0 is successful and offset will contain the the new offset to use
18260 * for the next write.
18261 * Return negative value for error cases.
18262 **/
18263int
18264lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18265 uint32_t size, uint32_t *offset)
18266{
18267 struct lpfc_mbx_wr_object *wr_object;
18268 LPFC_MBOXQ_t *mbox;
18269 int rc = 0, i = 0;
18270 uint32_t shdr_status, shdr_add_status;
18271 uint32_t mbox_tmo;
18272 union lpfc_sli4_cfg_shdr *shdr;
18273 struct lpfc_dmabuf *dmabuf;
18274 uint32_t written = 0;
18275
18276 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18277 if (!mbox)
18278 return -ENOMEM;
18279
18280 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18281 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18282 sizeof(struct lpfc_mbx_wr_object) -
18283 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18284
18285 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18286 wr_object->u.request.write_offset = *offset;
18287 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18288 wr_object->u.request.object_name[0] =
18289 cpu_to_le32(wr_object->u.request.object_name[0]);
18290 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18291 list_for_each_entry(dmabuf, dmabuf_list, list) {
18292 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18293 break;
18294 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18295 wr_object->u.request.bde[i].addrHigh =
18296 putPaddrHigh(dmabuf->phys);
18297 if (written + SLI4_PAGE_SIZE >= size) {
18298 wr_object->u.request.bde[i].tus.f.bdeSize =
18299 (size - written);
18300 written += (size - written);
18301 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18302 } else {
18303 wr_object->u.request.bde[i].tus.f.bdeSize =
18304 SLI4_PAGE_SIZE;
18305 written += SLI4_PAGE_SIZE;
18306 }
18307 i++;
18308 }
18309 wr_object->u.request.bde_count = i;
18310 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18311 if (!phba->sli4_hba.intr_enable)
18312 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18313 else {
a183a15f 18314 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
18315 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18316 }
18317 /* The IOCTL status is embedded in the mailbox subheader. */
18318 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18319 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18320 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18321 if (rc != MBX_TIMEOUT)
18322 mempool_free(mbox, phba->mbox_mem_pool);
18323 if (shdr_status || shdr_add_status || rc) {
18324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18325 "3025 Write Object mailbox failed with "
18326 "status x%x add_status x%x, mbx status x%x\n",
18327 shdr_status, shdr_add_status, rc);
18328 rc = -ENXIO;
18329 } else
18330 *offset += wr_object->u.response.actual_write_length;
18331 return rc;
18332}
18333
695a814e
JS
18334/**
18335 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18336 * @vport: pointer to vport data structure.
18337 *
18338 * This function iterate through the mailboxq and clean up all REG_LOGIN
18339 * and REG_VPI mailbox commands associated with the vport. This function
18340 * is called when driver want to restart discovery of the vport due to
18341 * a Clear Virtual Link event.
18342 **/
18343void
18344lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18345{
18346 struct lpfc_hba *phba = vport->phba;
18347 LPFC_MBOXQ_t *mb, *nextmb;
18348 struct lpfc_dmabuf *mp;
78730cfe 18349 struct lpfc_nodelist *ndlp;
d439d286 18350 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 18351 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 18352 LIST_HEAD(mbox_cmd_list);
63e801ce 18353 uint8_t restart_loop;
695a814e 18354
d439d286 18355 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
18356 spin_lock_irq(&phba->hbalock);
18357 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18358 if (mb->vport != vport)
18359 continue;
18360
18361 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18362 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18363 continue;
18364
d439d286
JS
18365 list_del(&mb->list);
18366 list_add_tail(&mb->list, &mbox_cmd_list);
18367 }
18368 /* Clean up active mailbox command with the vport */
18369 mb = phba->sli.mbox_active;
18370 if (mb && (mb->vport == vport)) {
18371 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18372 (mb->u.mb.mbxCommand == MBX_REG_VPI))
18373 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18374 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18375 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18376 /* Put reference count for delayed processing */
18377 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18378 /* Unregister the RPI when mailbox complete */
18379 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18380 }
18381 }
63e801ce
JS
18382 /* Cleanup any mailbox completions which are not yet processed */
18383 do {
18384 restart_loop = 0;
18385 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18386 /*
18387 * If this mailox is already processed or it is
18388 * for another vport ignore it.
18389 */
18390 if ((mb->vport != vport) ||
18391 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18392 continue;
18393
18394 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18395 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18396 continue;
18397
18398 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18399 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18400 ndlp = (struct lpfc_nodelist *)mb->context2;
18401 /* Unregister the RPI when mailbox complete */
18402 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18403 restart_loop = 1;
18404 spin_unlock_irq(&phba->hbalock);
18405 spin_lock(shost->host_lock);
18406 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18407 spin_unlock(shost->host_lock);
18408 spin_lock_irq(&phba->hbalock);
18409 break;
18410 }
18411 }
18412 } while (restart_loop);
18413
d439d286
JS
18414 spin_unlock_irq(&phba->hbalock);
18415
18416 /* Release the cleaned-up mailbox commands */
18417 while (!list_empty(&mbox_cmd_list)) {
18418 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e
JS
18419 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18420 mp = (struct lpfc_dmabuf *) (mb->context1);
18421 if (mp) {
18422 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18423 kfree(mp);
18424 }
78730cfe 18425 ndlp = (struct lpfc_nodelist *) mb->context2;
d439d286 18426 mb->context2 = NULL;
78730cfe 18427 if (ndlp) {
ec21b3b0 18428 spin_lock(shost->host_lock);
589a52d6 18429 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 18430 spin_unlock(shost->host_lock);
78730cfe 18431 lpfc_nlp_put(ndlp);
78730cfe 18432 }
695a814e 18433 }
695a814e
JS
18434 mempool_free(mb, phba->mbox_mem_pool);
18435 }
d439d286
JS
18436
18437 /* Release the ndlp with the cleaned-up active mailbox command */
18438 if (act_mbx_ndlp) {
18439 spin_lock(shost->host_lock);
18440 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18441 spin_unlock(shost->host_lock);
18442 lpfc_nlp_put(act_mbx_ndlp);
695a814e 18443 }
695a814e
JS
18444}
18445
2a9bf3d0
JS
18446/**
18447 * lpfc_drain_txq - Drain the txq
18448 * @phba: Pointer to HBA context object.
18449 *
18450 * This function attempt to submit IOCBs on the txq
18451 * to the adapter. For SLI4 adapters, the txq contains
18452 * ELS IOCBs that have been deferred because the there
18453 * are no SGLs. This congestion can occur with large
18454 * vport counts during node discovery.
18455 **/
18456
18457uint32_t
18458lpfc_drain_txq(struct lpfc_hba *phba)
18459{
18460 LIST_HEAD(completions);
895427bd 18461 struct lpfc_sli_ring *pring;
2e706377 18462 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
18463 unsigned long iflags = 0;
18464 char *fail_msg = NULL;
18465 struct lpfc_sglq *sglq;
2f07784f
JS
18466 union lpfc_wqe128 wqe128;
18467 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
a2fc4aef 18468 uint32_t txq_cnt = 0;
2a9bf3d0 18469
895427bd
JS
18470 pring = lpfc_phba_elsring(phba);
18471
398d81c9 18472 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
18473 list_for_each_entry(piocbq, &pring->txq, list) {
18474 txq_cnt++;
18475 }
18476
18477 if (txq_cnt > pring->txq_max)
18478 pring->txq_max = txq_cnt;
2a9bf3d0 18479
398d81c9 18480 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 18481
0e9bb8d7 18482 while (!list_empty(&pring->txq)) {
398d81c9 18483 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 18484
19ca7609 18485 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 18486 if (!piocbq) {
398d81c9 18487 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
18488 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18489 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 18490 txq_cnt);
a629852a
JS
18491 break;
18492 }
895427bd 18493 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 18494 if (!sglq) {
19ca7609 18495 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 18496 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 18497 break;
2a9bf3d0 18498 }
0e9bb8d7 18499 txq_cnt--;
2a9bf3d0
JS
18500
18501 /* The xri and iocb resources secured,
18502 * attempt to issue request
18503 */
6d368e53 18504 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
18505 piocbq->sli4_xritag = sglq->sli4_xritag;
18506 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
18507 fail_msg = "to convert bpl to sgl";
2f07784f 18508 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
2a9bf3d0 18509 fail_msg = "to convert iocb to wqe";
2f07784f 18510 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
2a9bf3d0
JS
18511 fail_msg = " - Wq is full";
18512 else
18513 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
18514
18515 if (fail_msg) {
18516 /* Failed means we can't issue and need to cancel */
18517 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18518 "2822 IOCB failed %s iotag 0x%x "
18519 "xri 0x%x\n",
18520 fail_msg,
18521 piocbq->iotag, piocbq->sli4_xritag);
18522 list_add_tail(&piocbq->list, &completions);
18523 }
398d81c9 18524 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
18525 }
18526
2a9bf3d0
JS
18527 /* Cancel all the IOCBs that cannot be issued */
18528 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
18529 IOERR_SLI_ABORTED);
18530
0e9bb8d7 18531 return txq_cnt;
2a9bf3d0 18532}
895427bd
JS
18533
18534/**
18535 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
18536 * @phba: Pointer to HBA context object.
18537 * @pwqe: Pointer to command WQE.
18538 * @sglq: Pointer to the scatter gather queue object.
18539 *
18540 * This routine converts the bpl or bde that is in the WQE
18541 * to a sgl list for the sli4 hardware. The physical address
18542 * of the bpl/bde is converted back to a virtual address.
18543 * If the WQE contains a BPL then the list of BDE's is
18544 * converted to sli4_sge's. If the WQE contains a single
18545 * BDE then it is converted to a single sli_sge.
18546 * The WQE is still in cpu endianness so the contents of
18547 * the bpl can be used without byte swapping.
18548 *
18549 * Returns valid XRI = Success, NO_XRI = Failure.
18550 */
18551static uint16_t
18552lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
18553 struct lpfc_sglq *sglq)
18554{
18555 uint16_t xritag = NO_XRI;
18556 struct ulp_bde64 *bpl = NULL;
18557 struct ulp_bde64 bde;
18558 struct sli4_sge *sgl = NULL;
18559 struct lpfc_dmabuf *dmabuf;
18560 union lpfc_wqe *wqe;
18561 int numBdes = 0;
18562 int i = 0;
18563 uint32_t offset = 0; /* accumulated offset in the sg request list */
18564 int inbound = 0; /* number of sg reply entries inbound from firmware */
18565 uint32_t cmd;
18566
18567 if (!pwqeq || !sglq)
18568 return xritag;
18569
18570 sgl = (struct sli4_sge *)sglq->sgl;
18571 wqe = &pwqeq->wqe;
18572 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
18573
18574 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
18575 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
18576 return sglq->sli4_xritag;
18577 numBdes = pwqeq->rsvd2;
18578 if (numBdes) {
18579 /* The addrHigh and addrLow fields within the WQE
18580 * have not been byteswapped yet so there is no
18581 * need to swap them back.
18582 */
18583 if (pwqeq->context3)
18584 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
18585 else
18586 return xritag;
18587
18588 bpl = (struct ulp_bde64 *)dmabuf->virt;
18589 if (!bpl)
18590 return xritag;
18591
18592 for (i = 0; i < numBdes; i++) {
18593 /* Should already be byte swapped. */
18594 sgl->addr_hi = bpl->addrHigh;
18595 sgl->addr_lo = bpl->addrLow;
18596
18597 sgl->word2 = le32_to_cpu(sgl->word2);
18598 if ((i+1) == numBdes)
18599 bf_set(lpfc_sli4_sge_last, sgl, 1);
18600 else
18601 bf_set(lpfc_sli4_sge_last, sgl, 0);
18602 /* swap the size field back to the cpu so we
18603 * can assign it to the sgl.
18604 */
18605 bde.tus.w = le32_to_cpu(bpl->tus.w);
18606 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
18607 /* The offsets in the sgl need to be accumulated
18608 * separately for the request and reply lists.
18609 * The request is always first, the reply follows.
18610 */
18611 switch (cmd) {
18612 case CMD_GEN_REQUEST64_WQE:
18613 /* add up the reply sg entries */
18614 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
18615 inbound++;
18616 /* first inbound? reset the offset */
18617 if (inbound == 1)
18618 offset = 0;
18619 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18620 bf_set(lpfc_sli4_sge_type, sgl,
18621 LPFC_SGE_TYPE_DATA);
18622 offset += bde.tus.f.bdeSize;
18623 break;
18624 case CMD_FCP_TRSP64_WQE:
18625 bf_set(lpfc_sli4_sge_offset, sgl, 0);
18626 bf_set(lpfc_sli4_sge_type, sgl,
18627 LPFC_SGE_TYPE_DATA);
18628 break;
18629 case CMD_FCP_TSEND64_WQE:
18630 case CMD_FCP_TRECEIVE64_WQE:
18631 bf_set(lpfc_sli4_sge_type, sgl,
18632 bpl->tus.f.bdeFlags);
18633 if (i < 3)
18634 offset = 0;
18635 else
18636 offset += bde.tus.f.bdeSize;
18637 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18638 break;
18639 }
18640 sgl->word2 = cpu_to_le32(sgl->word2);
18641 bpl++;
18642 sgl++;
18643 }
18644 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
18645 /* The addrHigh and addrLow fields of the BDE have not
18646 * been byteswapped yet so they need to be swapped
18647 * before putting them in the sgl.
18648 */
18649 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
18650 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
18651 sgl->word2 = le32_to_cpu(sgl->word2);
18652 bf_set(lpfc_sli4_sge_last, sgl, 1);
18653 sgl->word2 = cpu_to_le32(sgl->word2);
18654 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
18655 }
18656 return sglq->sli4_xritag;
18657}
18658
18659/**
18660 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
18661 * @phba: Pointer to HBA context object.
18662 * @ring_number: Base sli ring number
18663 * @pwqe: Pointer to command WQE.
18664 **/
18665int
18666lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18667 struct lpfc_iocbq *pwqe)
18668{
18669 union lpfc_wqe *wqe = &pwqe->wqe;
f358dd0c 18670 struct lpfc_nvmet_rcv_ctx *ctxp;
895427bd
JS
18671 struct lpfc_queue *wq;
18672 struct lpfc_sglq *sglq;
18673 struct lpfc_sli_ring *pring;
18674 unsigned long iflags;
18675
18676 /* NVME_LS and NVME_LS ABTS requests. */
18677 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
18678 pring = phba->sli4_hba.nvmels_wq->pring;
18679 spin_lock_irqsave(&pring->ring_lock, iflags);
18680 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
18681 if (!sglq) {
18682 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18683 return WQE_BUSY;
18684 }
18685 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18686 pwqe->sli4_xritag = sglq->sli4_xritag;
18687 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
18688 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18689 return WQE_ERROR;
18690 }
18691 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18692 pwqe->sli4_xritag);
18693 if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
18694 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18695 return WQE_ERROR;
18696 }
18697 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18698 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18699 return 0;
18700 }
18701
18702 /* NVME_FCREQ and NVME_ABTS requests */
18703 if (pwqe->iocb_flag & LPFC_IO_NVME) {
18704 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18705 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18706
18707 spin_lock_irqsave(&pring->ring_lock, iflags);
18708 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18709 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18710 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18711 if (lpfc_sli4_wq_put(wq, wqe)) {
18712 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18713 return WQE_ERROR;
18714 }
18715 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18716 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18717 return 0;
18718 }
18719
f358dd0c
JS
18720 /* NVMET requests */
18721 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
18722 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18723 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18724
18725 spin_lock_irqsave(&pring->ring_lock, iflags);
18726 ctxp = pwqe->context2;
18727 sglq = ctxp->rqb_buffer->sglq;
18728 if (pwqe->sli4_xritag == NO_XRI) {
18729 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18730 pwqe->sli4_xritag = sglq->sli4_xritag;
18731 }
18732 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18733 pwqe->sli4_xritag);
18734 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18735 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18736 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18737 if (lpfc_sli4_wq_put(wq, wqe)) {
18738 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18739 return WQE_ERROR;
18740 }
18741 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18742 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18743 return 0;
18744 }
895427bd
JS
18745 return WQE_ERROR;
18746}