[SCSI] lpfc 8.3.35: Fix incorrect comment in T10 DIF attributes
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
bdcd2b92 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
dea3101e 27
91886523 28#include <scsi/scsi.h>
dea3101e 29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
f888ba3c 32#include <scsi/scsi_transport_fc.h>
da0436e9 33#include <scsi/fc/fc_fs.h>
0d878419 34#include <linux/aer.h>
dea3101e 35
da0436e9 36#include "lpfc_hw4.h"
dea3101e 37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e 41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_crtn.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_compat.h"
858c9f6c 47#include "lpfc_debugfs.h"
04c68496 48#include "lpfc_vport.h"
dea3101e 49
50/* There are only four IOCB completion types. */
51typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56} lpfc_iocb_type;
57
4f774513
JS
58
59/* Provide function prototypes local to this module. */
60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
63 uint8_t *, uint32_t *);
64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
6669f9bb
JS
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
0558056c
JS
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
8a9d2e80
JS
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
ba20c853
JS
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
0558056c 74
4f774513
JS
75static IOCB_t *
76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77{
78 return &iocbq->iocb;
79}
80
81/**
82 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83 * @q: The Work Queue to operate on.
84 * @wqe: The work Queue Entry to put on the Work queue.
85 *
86 * This routine will copy the contents of @wqe to the next available entry on
87 * the @q. This function will then ring the Work Queue Doorbell to signal the
88 * HBA to start processing the Work Queue Entry. This function returns 0 if
89 * successful. If no entries are available on @q then this function will return
90 * -ENOMEM.
91 * The caller is expected to hold the hbalock when calling this routine.
92 **/
93static uint32_t
94lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95{
2e90f4b5 96 union lpfc_wqe *temp_wqe;
4f774513
JS
97 struct lpfc_register doorbell;
98 uint32_t host_index;
027140ea 99 uint32_t idx;
4f774513 100
2e90f4b5
JS
101 /* sanity check on queue memory */
102 if (unlikely(!q))
103 return -ENOMEM;
104 temp_wqe = q->qe[q->host_index].wqe;
105
4f774513 106 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
b84daac9 109 q->WQ_overflow++;
4f774513 110 return -ENOMEM;
b84daac9
JS
111 }
112 q->WQ_posted++;
4f774513 113 /* set consumption flag every once in a while */
ff78d8f9 114 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
fedd3b7b
JS
116 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
4f774513
JS
118 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120 /* Update the host index before invoking device */
121 host_index = q->host_index;
027140ea
JS
122
123 q->host_index = idx;
4f774513
JS
124
125 /* Ring Doorbell */
126 doorbell.word0 = 0;
127 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
4f774513
JS
131
132 return 0;
133}
134
135/**
136 * lpfc_sli4_wq_release - Updates internal hba index for WQ
137 * @q: The Work Queue to operate on.
138 * @index: The index to advance the hba index to.
139 *
140 * This routine will update the HBA index of a queue to reflect consumption of
141 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
142 * an entry the host calls this function to update the queue's internal
143 * pointers. This routine returns the number of entries that were consumed by
144 * the HBA.
145 **/
146static uint32_t
147lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
148{
149 uint32_t released = 0;
150
2e90f4b5
JS
151 /* sanity check on queue memory */
152 if (unlikely(!q))
153 return 0;
154
4f774513
JS
155 if (q->hba_index == index)
156 return 0;
157 do {
158 q->hba_index = ((q->hba_index + 1) % q->entry_count);
159 released++;
160 } while (q->hba_index != index);
161 return released;
162}
163
164/**
165 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
166 * @q: The Mailbox Queue to operate on.
167 * @wqe: The Mailbox Queue Entry to put on the Work queue.
168 *
169 * This routine will copy the contents of @mqe to the next available entry on
170 * the @q. This function will then ring the Work Queue Doorbell to signal the
171 * HBA to start processing the Work Queue Entry. This function returns 0 if
172 * successful. If no entries are available on @q then this function will return
173 * -ENOMEM.
174 * The caller is expected to hold the hbalock when calling this routine.
175 **/
176static uint32_t
177lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
178{
2e90f4b5 179 struct lpfc_mqe *temp_mqe;
4f774513
JS
180 struct lpfc_register doorbell;
181 uint32_t host_index;
182
2e90f4b5
JS
183 /* sanity check on queue memory */
184 if (unlikely(!q))
185 return -ENOMEM;
186 temp_mqe = q->qe[q->host_index].mqe;
187
4f774513
JS
188 /* If the host has not yet processed the next entry then we are done */
189 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
190 return -ENOMEM;
191 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
192 /* Save off the mailbox pointer for completion */
193 q->phba->mbox = (MAILBOX_t *)temp_mqe;
194
195 /* Update the host index before invoking device */
196 host_index = q->host_index;
197 q->host_index = ((q->host_index + 1) % q->entry_count);
198
199 /* Ring Doorbell */
200 doorbell.word0 = 0;
201 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
202 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
203 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
204 return 0;
205}
206
207/**
208 * lpfc_sli4_mq_release - Updates internal hba index for MQ
209 * @q: The Mailbox Queue to operate on.
210 *
211 * This routine will update the HBA index of a queue to reflect consumption of
212 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
213 * an entry the host calls this function to update the queue's internal
214 * pointers. This routine returns the number of entries that were consumed by
215 * the HBA.
216 **/
217static uint32_t
218lpfc_sli4_mq_release(struct lpfc_queue *q)
219{
2e90f4b5
JS
220 /* sanity check on queue memory */
221 if (unlikely(!q))
222 return 0;
223
4f774513
JS
224 /* Clear the mailbox pointer for completion */
225 q->phba->mbox = NULL;
226 q->hba_index = ((q->hba_index + 1) % q->entry_count);
227 return 1;
228}
229
230/**
231 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
232 * @q: The Event Queue to get the first valid EQE from
233 *
234 * This routine will get the first valid Event Queue Entry from @q, update
235 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
236 * the Queue (no more work to do), or the Queue is full of EQEs that have been
237 * processed, but not popped back to the HBA then this routine will return NULL.
238 **/
239static struct lpfc_eqe *
240lpfc_sli4_eq_get(struct lpfc_queue *q)
241{
2e90f4b5 242 struct lpfc_eqe *eqe;
027140ea 243 uint32_t idx;
2e90f4b5
JS
244
245 /* sanity check on queue memory */
246 if (unlikely(!q))
247 return NULL;
248 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
249
250 /* If the next EQE is not valid then we are done */
cb5172ea 251 if (!bf_get_le32(lpfc_eqe_valid, eqe))
4f774513
JS
252 return NULL;
253 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
254 idx = ((q->hba_index + 1) % q->entry_count);
255 if (idx == q->host_index)
4f774513
JS
256 return NULL;
257
027140ea 258 q->hba_index = idx;
4f774513
JS
259 return eqe;
260}
261
ba20c853
JS
262/**
263 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
264 * @q: The Event Queue to disable interrupts
265 *
266 **/
267static inline void
268lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
269{
270 struct lpfc_register doorbell;
271
272 doorbell.word0 = 0;
273 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
274 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
275 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
276 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
277 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
278 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
279}
280
4f774513
JS
281/**
282 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
283 * @q: The Event Queue that the host has completed processing for.
284 * @arm: Indicates whether the host wants to arms this CQ.
285 *
286 * This routine will mark all Event Queue Entries on @q, from the last
287 * known completed entry to the last entry that was processed, as completed
288 * by clearing the valid bit for each completion queue entry. Then it will
289 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
290 * The internal host index in the @q will be updated by this routine to indicate
291 * that the host has finished processing the entries. The @arm parameter
292 * indicates that the queue should be rearmed when ringing the doorbell.
293 *
294 * This function will return the number of EQEs that were popped.
295 **/
296uint32_t
297lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
298{
299 uint32_t released = 0;
300 struct lpfc_eqe *temp_eqe;
301 struct lpfc_register doorbell;
302
2e90f4b5
JS
303 /* sanity check on queue memory */
304 if (unlikely(!q))
305 return 0;
306
4f774513
JS
307 /* while there are valid entries */
308 while (q->hba_index != q->host_index) {
309 temp_eqe = q->qe[q->host_index].eqe;
cb5172ea 310 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
4f774513
JS
311 released++;
312 q->host_index = ((q->host_index + 1) % q->entry_count);
313 }
314 if (unlikely(released == 0 && !arm))
315 return 0;
316
317 /* ring doorbell for number popped */
318 doorbell.word0 = 0;
319 if (arm) {
320 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
321 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
322 }
323 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
324 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
325 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
326 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
327 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
4f774513 328 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
329 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
330 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
331 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
332 return released;
333}
334
335/**
336 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
337 * @q: The Completion Queue to get the first valid CQE from
338 *
339 * This routine will get the first valid Completion Queue Entry from @q, update
340 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
341 * the Queue (no more work to do), or the Queue is full of CQEs that have been
342 * processed, but not popped back to the HBA then this routine will return NULL.
343 **/
344static struct lpfc_cqe *
345lpfc_sli4_cq_get(struct lpfc_queue *q)
346{
347 struct lpfc_cqe *cqe;
027140ea 348 uint32_t idx;
4f774513 349
2e90f4b5
JS
350 /* sanity check on queue memory */
351 if (unlikely(!q))
352 return NULL;
353
4f774513 354 /* If the next CQE is not valid then we are done */
cb5172ea 355 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
4f774513
JS
356 return NULL;
357 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
358 idx = ((q->hba_index + 1) % q->entry_count);
359 if (idx == q->host_index)
4f774513
JS
360 return NULL;
361
362 cqe = q->qe[q->hba_index].cqe;
027140ea 363 q->hba_index = idx;
4f774513
JS
364 return cqe;
365}
366
367/**
368 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
369 * @q: The Completion Queue that the host has completed processing for.
370 * @arm: Indicates whether the host wants to arms this CQ.
371 *
372 * This routine will mark all Completion queue entries on @q, from the last
373 * known completed entry to the last entry that was processed, as completed
374 * by clearing the valid bit for each completion queue entry. Then it will
375 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
376 * The internal host index in the @q will be updated by this routine to indicate
377 * that the host has finished processing the entries. The @arm parameter
378 * indicates that the queue should be rearmed when ringing the doorbell.
379 *
380 * This function will return the number of CQEs that were released.
381 **/
382uint32_t
383lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
384{
385 uint32_t released = 0;
386 struct lpfc_cqe *temp_qe;
387 struct lpfc_register doorbell;
388
2e90f4b5
JS
389 /* sanity check on queue memory */
390 if (unlikely(!q))
391 return 0;
4f774513
JS
392 /* while there are valid entries */
393 while (q->hba_index != q->host_index) {
394 temp_qe = q->qe[q->host_index].cqe;
cb5172ea 395 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
4f774513
JS
396 released++;
397 q->host_index = ((q->host_index + 1) % q->entry_count);
398 }
399 if (unlikely(released == 0 && !arm))
400 return 0;
401
402 /* ring doorbell for number popped */
403 doorbell.word0 = 0;
404 if (arm)
405 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
406 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
407 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
408 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
409 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
410 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
4f774513
JS
411 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
412 return released;
413}
414
415/**
416 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
417 * @q: The Header Receive Queue to operate on.
418 * @wqe: The Receive Queue Entry to put on the Receive queue.
419 *
420 * This routine will copy the contents of @wqe to the next available entry on
421 * the @q. This function will then ring the Receive Queue Doorbell to signal the
422 * HBA to start processing the Receive Queue Entry. This function returns the
423 * index that the rqe was copied to if successful. If no entries are available
424 * on @q then this function will return -ENOMEM.
425 * The caller is expected to hold the hbalock when calling this routine.
426 **/
427static int
428lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
429 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
430{
2e90f4b5
JS
431 struct lpfc_rqe *temp_hrqe;
432 struct lpfc_rqe *temp_drqe;
4f774513
JS
433 struct lpfc_register doorbell;
434 int put_index = hq->host_index;
435
2e90f4b5
JS
436 /* sanity check on queue memory */
437 if (unlikely(!hq) || unlikely(!dq))
438 return -ENOMEM;
439 temp_hrqe = hq->qe[hq->host_index].rqe;
440 temp_drqe = dq->qe[dq->host_index].rqe;
441
4f774513
JS
442 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
443 return -EINVAL;
444 if (hq->host_index != dq->host_index)
445 return -EINVAL;
446 /* If the host has not yet processed the next entry then we are done */
447 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
448 return -EBUSY;
449 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
450 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
451
452 /* Update the host index to point to the next slot */
453 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
454 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
455
456 /* Ring The Header Receive Queue Doorbell */
73d91e50 457 if (!(hq->host_index % hq->entry_repost)) {
4f774513
JS
458 doorbell.word0 = 0;
459 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
73d91e50 460 hq->entry_repost);
4f774513
JS
461 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
462 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
463 }
464 return put_index;
465}
466
467/**
468 * lpfc_sli4_rq_release - Updates internal hba index for RQ
469 * @q: The Header Receive Queue to operate on.
470 *
471 * This routine will update the HBA index of a queue to reflect consumption of
472 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
473 * consumed an entry the host calls this function to update the queue's
474 * internal pointers. This routine returns the number of entries that were
475 * consumed by the HBA.
476 **/
477static uint32_t
478lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
479{
2e90f4b5
JS
480 /* sanity check on queue memory */
481 if (unlikely(!hq) || unlikely(!dq))
482 return 0;
483
4f774513
JS
484 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
485 return 0;
486 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
487 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
488 return 1;
489}
490
e59058c4 491/**
3621a710 492 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
493 * @phba: Pointer to HBA context object.
494 * @pring: Pointer to driver SLI ring object.
495 *
496 * This function returns pointer to next command iocb entry
497 * in the command ring. The caller must hold hbalock to prevent
498 * other threads consume the next command iocb.
499 * SLI-2/SLI-3 provide different sized iocbs.
500 **/
ed957684
JS
501static inline IOCB_t *
502lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
503{
7e56aa25
JS
504 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
505 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
506}
507
e59058c4 508/**
3621a710 509 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
510 * @phba: Pointer to HBA context object.
511 * @pring: Pointer to driver SLI ring object.
512 *
513 * This function returns pointer to next response iocb entry
514 * in the response ring. The caller must hold hbalock to make sure
515 * that no other thread consume the next response iocb.
516 * SLI-2/SLI-3 provide different sized iocbs.
517 **/
ed957684
JS
518static inline IOCB_t *
519lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
520{
7e56aa25
JS
521 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
522 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
523}
524
e59058c4 525/**
3621a710 526 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
527 * @phba: Pointer to HBA context object.
528 *
529 * This function is called with hbalock held. This function
530 * allocates a new driver iocb object from the iocb pool. If the
531 * allocation is successful, it returns pointer to the newly
532 * allocated iocb object else it returns NULL.
533 **/
4f2e66c6 534struct lpfc_iocbq *
2e0fef85 535__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
536{
537 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
538 struct lpfc_iocbq * iocbq = NULL;
539
540 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
541 if (iocbq)
542 phba->iocb_cnt++;
543 if (phba->iocb_cnt > phba->iocb_max)
544 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
545 return iocbq;
546}
547
da0436e9
JS
548/**
549 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
550 * @phba: Pointer to HBA context object.
551 * @xritag: XRI value.
552 *
553 * This function clears the sglq pointer from the array of acive
554 * sglq's. The xritag that is passed in is used to index into the
555 * array. Before the xritag can be used it needs to be adjusted
556 * by subtracting the xribase.
557 *
558 * Returns sglq ponter = success, NULL = Failure.
559 **/
560static struct lpfc_sglq *
561__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
562{
da0436e9 563 struct lpfc_sglq *sglq;
6d368e53
JS
564
565 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
566 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
567 return sglq;
568}
569
570/**
571 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
572 * @phba: Pointer to HBA context object.
573 * @xritag: XRI value.
574 *
575 * This function returns the sglq pointer from the array of acive
576 * sglq's. The xritag that is passed in is used to index into the
577 * array. Before the xritag can be used it needs to be adjusted
578 * by subtracting the xribase.
579 *
580 * Returns sglq ponter = success, NULL = Failure.
581 **/
0f65ff68 582struct lpfc_sglq *
da0436e9
JS
583__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
584{
da0436e9 585 struct lpfc_sglq *sglq;
6d368e53
JS
586
587 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
588 return sglq;
589}
590
19ca7609 591/**
1151e3ec 592 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
593 * @phba: Pointer to HBA context object.
594 * @xritag: xri used in this exchange.
595 * @rrq: The RRQ to be cleared.
596 *
19ca7609 597 **/
1151e3ec
JS
598void
599lpfc_clr_rrq_active(struct lpfc_hba *phba,
600 uint16_t xritag,
601 struct lpfc_node_rrq *rrq)
19ca7609 602{
1151e3ec 603 struct lpfc_nodelist *ndlp = NULL;
19ca7609 604
1151e3ec
JS
605 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
606 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
607
608 /* The target DID could have been swapped (cable swap)
609 * we should use the ndlp from the findnode if it is
610 * available.
611 */
1151e3ec 612 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
613 ndlp = rrq->ndlp;
614
1151e3ec
JS
615 if (!ndlp)
616 goto out;
617
6d368e53 618 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
19ca7609
JS
619 rrq->send_rrq = 0;
620 rrq->xritag = 0;
621 rrq->rrq_stop_time = 0;
622 }
1151e3ec 623out:
19ca7609
JS
624 mempool_free(rrq, phba->rrq_pool);
625}
626
627/**
628 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
629 * @phba: Pointer to HBA context object.
630 *
631 * This function is called with hbalock held. This function
632 * Checks if stop_time (ratov from setting rrq active) has
633 * been reached, if it has and the send_rrq flag is set then
634 * it will call lpfc_send_rrq. If the send_rrq flag is not set
635 * then it will just call the routine to clear the rrq and
636 * free the rrq resource.
637 * The timer is set to the next rrq that is going to expire before
638 * leaving the routine.
639 *
640 **/
641void
642lpfc_handle_rrq_active(struct lpfc_hba *phba)
643{
644 struct lpfc_node_rrq *rrq;
645 struct lpfc_node_rrq *nextrrq;
646 unsigned long next_time;
647 unsigned long iflags;
1151e3ec 648 LIST_HEAD(send_rrq);
19ca7609
JS
649
650 spin_lock_irqsave(&phba->hbalock, iflags);
651 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
652 next_time = jiffies + HZ * (phba->fc_ratov + 1);
653 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
654 &phba->active_rrq_list, list) {
655 if (time_after(jiffies, rrq->rrq_stop_time))
656 list_move(&rrq->list, &send_rrq);
657 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
658 next_time = rrq->rrq_stop_time;
659 }
660 spin_unlock_irqrestore(&phba->hbalock, iflags);
661 if (!list_empty(&phba->active_rrq_list))
662 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
663 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
664 list_del(&rrq->list);
665 if (!rrq->send_rrq)
666 /* this call will free the rrq */
667 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
668 else if (lpfc_send_rrq(phba, rrq)) {
669 /* if we send the rrq then the completion handler
670 * will clear the bit in the xribitmap.
671 */
672 lpfc_clr_rrq_active(phba, rrq->xritag,
673 rrq);
674 }
675 }
19ca7609
JS
676}
677
678/**
679 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
680 * @vport: Pointer to vport context object.
681 * @xri: The xri used in the exchange.
682 * @did: The targets DID for this exchange.
683 *
684 * returns NULL = rrq not found in the phba->active_rrq_list.
685 * rrq = rrq for this xri and target.
686 **/
687struct lpfc_node_rrq *
688lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
689{
690 struct lpfc_hba *phba = vport->phba;
691 struct lpfc_node_rrq *rrq;
692 struct lpfc_node_rrq *nextrrq;
693 unsigned long iflags;
694
695 if (phba->sli_rev != LPFC_SLI_REV4)
696 return NULL;
697 spin_lock_irqsave(&phba->hbalock, iflags);
698 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
699 if (rrq->vport == vport && rrq->xritag == xri &&
700 rrq->nlp_DID == did){
701 list_del(&rrq->list);
702 spin_unlock_irqrestore(&phba->hbalock, iflags);
703 return rrq;
704 }
705 }
706 spin_unlock_irqrestore(&phba->hbalock, iflags);
707 return NULL;
708}
709
710/**
711 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
712 * @vport: Pointer to vport context object.
1151e3ec
JS
713 * @ndlp: Pointer to the lpfc_node_list structure.
714 * If ndlp is NULL Remove all active RRQs for this vport from the
715 * phba->active_rrq_list and clear the rrq.
716 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
717 **/
718void
1151e3ec 719lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
720
721{
722 struct lpfc_hba *phba = vport->phba;
723 struct lpfc_node_rrq *rrq;
724 struct lpfc_node_rrq *nextrrq;
725 unsigned long iflags;
1151e3ec 726 LIST_HEAD(rrq_list);
19ca7609
JS
727
728 if (phba->sli_rev != LPFC_SLI_REV4)
729 return;
1151e3ec
JS
730 if (!ndlp) {
731 lpfc_sli4_vport_delete_els_xri_aborted(vport);
732 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 733 }
1151e3ec
JS
734 spin_lock_irqsave(&phba->hbalock, iflags);
735 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
736 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
737 list_move(&rrq->list, &rrq_list);
19ca7609 738 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
739
740 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
741 list_del(&rrq->list);
742 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
743 }
19ca7609
JS
744}
745
746/**
747 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
748 * @phba: Pointer to HBA context object.
749 *
750 * Remove all rrqs from the phba->active_rrq_list and free them by
751 * calling __lpfc_clr_active_rrq
752 *
753 **/
754void
755lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
756{
757 struct lpfc_node_rrq *rrq;
758 struct lpfc_node_rrq *nextrrq;
759 unsigned long next_time;
760 unsigned long iflags;
1151e3ec 761 LIST_HEAD(rrq_list);
19ca7609
JS
762
763 if (phba->sli_rev != LPFC_SLI_REV4)
764 return;
765 spin_lock_irqsave(&phba->hbalock, iflags);
766 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
767 next_time = jiffies + HZ * (phba->fc_ratov * 2);
1151e3ec
JS
768 list_splice_init(&phba->active_rrq_list, &rrq_list);
769 spin_unlock_irqrestore(&phba->hbalock, iflags);
770
771 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
19ca7609 772 list_del(&rrq->list);
1151e3ec 773 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
19ca7609 774 }
19ca7609
JS
775 if (!list_empty(&phba->active_rrq_list))
776 mod_timer(&phba->rrq_tmr, next_time);
777}
778
779
780/**
1151e3ec 781 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
782 * @phba: Pointer to HBA context object.
783 * @ndlp: Targets nodelist pointer for this exchange.
784 * @xritag the xri in the bitmap to test.
785 *
786 * This function is called with hbalock held. This function
787 * returns 0 = rrq not active for this xri
788 * 1 = rrq is valid for this xri.
789 **/
1151e3ec
JS
790int
791lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
792 uint16_t xritag)
793{
19ca7609
JS
794 if (!ndlp)
795 return 0;
6d368e53 796 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
19ca7609
JS
797 return 1;
798 else
799 return 0;
800}
801
802/**
803 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
804 * @phba: Pointer to HBA context object.
805 * @ndlp: nodelist pointer for this target.
806 * @xritag: xri used in this exchange.
807 * @rxid: Remote Exchange ID.
808 * @send_rrq: Flag used to determine if we should send rrq els cmd.
809 *
810 * This function takes the hbalock.
811 * The active bit is always set in the active rrq xri_bitmap even
812 * if there is no slot avaiable for the other rrq information.
813 *
814 * returns 0 rrq actived for this xri
815 * < 0 No memory or invalid ndlp.
816 **/
817int
818lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 819 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 820{
19ca7609 821 unsigned long iflags;
b42c07c8
JS
822 struct lpfc_node_rrq *rrq;
823 int empty;
824
825 if (!ndlp)
826 return -EINVAL;
827
828 if (!phba->cfg_enable_rrq)
829 return -EINVAL;
19ca7609
JS
830
831 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
832 if (phba->pport->load_flag & FC_UNLOADING) {
833 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
834 goto out;
835 }
836
837 /*
838 * set the active bit even if there is no mem available.
839 */
840 if (NLP_CHK_FREE_REQ(ndlp))
841 goto out;
842
843 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
844 goto out;
845
846 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
847 goto out;
848
19ca7609 849 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
850 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
851 if (!rrq) {
852 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
853 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
854 " DID:0x%x Send:%d\n",
855 xritag, rxid, ndlp->nlp_DID, send_rrq);
856 return -EINVAL;
857 }
858 rrq->send_rrq = send_rrq;
859 rrq->xritag = xritag;
860 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
861 rrq->ndlp = ndlp;
862 rrq->nlp_DID = ndlp->nlp_DID;
863 rrq->vport = ndlp->vport;
864 rrq->rxid = rxid;
865 rrq->send_rrq = send_rrq;
866 spin_lock_irqsave(&phba->hbalock, iflags);
867 empty = list_empty(&phba->active_rrq_list);
868 list_add_tail(&rrq->list, &phba->active_rrq_list);
869 phba->hba_flag |= HBA_RRQ_ACTIVE;
870 if (empty)
871 lpfc_worker_wake_up(phba);
872 spin_unlock_irqrestore(&phba->hbalock, iflags);
873 return 0;
874out:
875 spin_unlock_irqrestore(&phba->hbalock, iflags);
876 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
877 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
878 " DID:0x%x Send:%d\n",
879 xritag, rxid, ndlp->nlp_DID, send_rrq);
880 return -EINVAL;
19ca7609
JS
881}
882
da0436e9
JS
883/**
884 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
885 * @phba: Pointer to HBA context object.
19ca7609 886 * @piocb: Pointer to the iocbq.
da0436e9
JS
887 *
888 * This function is called with hbalock held. This function
6d368e53 889 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
890 * list is not empty then it is successful, it returns pointer to the newly
891 * allocated sglq object else it returns NULL.
892 **/
893static struct lpfc_sglq *
19ca7609 894__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9
JS
895{
896 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
897 struct lpfc_sglq *sglq = NULL;
19ca7609 898 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
899 struct lpfc_scsi_buf *lpfc_cmd;
900 struct lpfc_nodelist *ndlp;
901 int found = 0;
902
903 if (piocbq->iocb_flag & LPFC_IO_FCP) {
904 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
905 ndlp = lpfc_cmd->rdata->pnode;
be858b65
JS
906 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
907 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
19ca7609 908 ndlp = piocbq->context_un.ndlp;
93d1379e
JS
909 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
910 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
911 ndlp = piocbq->context_un.ndlp;
19ca7609
JS
912 else
913 ndlp = piocbq->context1;
914
da0436e9 915 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
916 start_sglq = sglq;
917 while (!found) {
918 if (!sglq)
919 return NULL;
ee0f4fe1 920 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
19ca7609
JS
921 /* This xri has an rrq outstanding for this DID.
922 * put it back in the list and get another xri.
923 */
924 list_add_tail(&sglq->list, lpfc_sgl_list);
925 sglq = NULL;
926 list_remove_head(lpfc_sgl_list, sglq,
927 struct lpfc_sglq, list);
928 if (sglq == start_sglq) {
929 sglq = NULL;
930 break;
931 } else
932 continue;
933 }
934 sglq->ndlp = ndlp;
935 found = 1;
6d368e53 936 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
937 sglq->state = SGL_ALLOCATED;
938 }
da0436e9
JS
939 return sglq;
940}
941
e59058c4 942/**
3621a710 943 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
944 * @phba: Pointer to HBA context object.
945 *
946 * This function is called with no lock held. This function
947 * allocates a new driver iocb object from the iocb pool. If the
948 * allocation is successful, it returns pointer to the newly
949 * allocated iocb object else it returns NULL.
950 **/
2e0fef85
JS
951struct lpfc_iocbq *
952lpfc_sli_get_iocbq(struct lpfc_hba *phba)
953{
954 struct lpfc_iocbq * iocbq = NULL;
955 unsigned long iflags;
956
957 spin_lock_irqsave(&phba->hbalock, iflags);
958 iocbq = __lpfc_sli_get_iocbq(phba);
959 spin_unlock_irqrestore(&phba->hbalock, iflags);
960 return iocbq;
961}
962
4f774513
JS
963/**
964 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
965 * @phba: Pointer to HBA context object.
966 * @iocbq: Pointer to driver iocb object.
967 *
968 * This function is called with hbalock held to release driver
969 * iocb object to the iocb pool. The iotag in the iocb object
970 * does not change for each use of the iocb object. This function
971 * clears all other fields of the iocb object when it is freed.
972 * The sqlq structure that holds the xritag and phys and virtual
973 * mappings for the scatter gather list is retrieved from the
974 * active array of sglq. The get of the sglq pointer also clears
975 * the entry in the array. If the status of the IO indiactes that
976 * this IO was aborted then the sglq entry it put on the
977 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
978 * IO has good status or fails for any other reason then the sglq
979 * entry is added to the free list (lpfc_sgl_list).
980 **/
981static void
982__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
983{
984 struct lpfc_sglq *sglq;
985 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0
JS
986 unsigned long iflag = 0;
987 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4f774513
JS
988
989 if (iocbq->sli4_xritag == NO_XRI)
990 sglq = NULL;
991 else
6d368e53
JS
992 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
993
4f774513 994 if (sglq) {
0f65ff68
JS
995 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
996 (sglq->state != SGL_XRI_ABORTED)) {
4f774513
JS
997 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
998 iflag);
999 list_add(&sglq->list,
1000 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1001 spin_unlock_irqrestore(
1002 &phba->sli4_hba.abts_sgl_list_lock, iflag);
0f65ff68
JS
1003 } else {
1004 sglq->state = SGL_FREED;
19ca7609 1005 sglq->ndlp = NULL;
fedd3b7b
JS
1006 list_add_tail(&sglq->list,
1007 &phba->sli4_hba.lpfc_sgl_list);
2a9bf3d0
JS
1008
1009 /* Check if TXQ queue needs to be serviced */
589a52d6 1010 if (pring->txq_cnt)
2a9bf3d0 1011 lpfc_worker_wake_up(phba);
0f65ff68 1012 }
4f774513
JS
1013 }
1014
1015
1016 /*
1017 * Clean all volatile data fields, preserve iotag and node struct.
1018 */
1019 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1020 iocbq->sli4_lxritag = NO_XRI;
4f774513
JS
1021 iocbq->sli4_xritag = NO_XRI;
1022 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1023}
1024
2a9bf3d0 1025
e59058c4 1026/**
3772a991 1027 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1028 * @phba: Pointer to HBA context object.
1029 * @iocbq: Pointer to driver iocb object.
1030 *
1031 * This function is called with hbalock held to release driver
1032 * iocb object to the iocb pool. The iotag in the iocb object
1033 * does not change for each use of the iocb object. This function
1034 * clears all other fields of the iocb object when it is freed.
1035 **/
a6ababd2 1036static void
3772a991 1037__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1038{
2e0fef85 1039 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
1040
1041 /*
1042 * Clean all volatile data fields, preserve iotag and node struct.
1043 */
1044 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1045 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1046 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1047}
1048
3772a991
JS
1049/**
1050 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1051 * @phba: Pointer to HBA context object.
1052 * @iocbq: Pointer to driver iocb object.
1053 *
1054 * This function is called with hbalock held to release driver
1055 * iocb object to the iocb pool. The iotag in the iocb object
1056 * does not change for each use of the iocb object. This function
1057 * clears all other fields of the iocb object when it is freed.
1058 **/
1059static void
1060__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1061{
1062 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1063 phba->iocb_cnt--;
3772a991
JS
1064}
1065
e59058c4 1066/**
3621a710 1067 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1068 * @phba: Pointer to HBA context object.
1069 * @iocbq: Pointer to driver iocb object.
1070 *
1071 * This function is called with no lock held to release the iocb to
1072 * iocb pool.
1073 **/
2e0fef85
JS
1074void
1075lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1076{
1077 unsigned long iflags;
1078
1079 /*
1080 * Clean all volatile data fields, preserve iotag and node struct.
1081 */
1082 spin_lock_irqsave(&phba->hbalock, iflags);
1083 __lpfc_sli_release_iocbq(phba, iocbq);
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1085}
1086
a257bf90
JS
1087/**
1088 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1089 * @phba: Pointer to HBA context object.
1090 * @iocblist: List of IOCBs.
1091 * @ulpstatus: ULP status in IOCB command field.
1092 * @ulpWord4: ULP word-4 in IOCB command field.
1093 *
1094 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1095 * on the list by invoking the complete callback function associated with the
1096 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1097 * fields.
1098 **/
1099void
1100lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1101 uint32_t ulpstatus, uint32_t ulpWord4)
1102{
1103 struct lpfc_iocbq *piocb;
1104
1105 while (!list_empty(iocblist)) {
1106 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1107
1108 if (!piocb->iocb_cmpl)
1109 lpfc_sli_release_iocbq(phba, piocb);
1110 else {
1111 piocb->iocb.ulpStatus = ulpstatus;
1112 piocb->iocb.un.ulpWord[4] = ulpWord4;
1113 (piocb->iocb_cmpl) (phba, piocb, piocb);
1114 }
1115 }
1116 return;
1117}
1118
e59058c4 1119/**
3621a710
JS
1120 * lpfc_sli_iocb_cmd_type - Get the iocb type
1121 * @iocb_cmnd: iocb command code.
e59058c4
JS
1122 *
1123 * This function is called by ring event handler function to get the iocb type.
1124 * This function translates the iocb command to an iocb command type used to
1125 * decide the final disposition of each completed IOCB.
1126 * The function returns
1127 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1128 * LPFC_SOL_IOCB if it is a solicited iocb completion
1129 * LPFC_ABORT_IOCB if it is an abort iocb
1130 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1131 *
1132 * The caller is not required to hold any lock.
1133 **/
dea3101e 1134static lpfc_iocb_type
1135lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1136{
1137 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1138
1139 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1140 return 0;
1141
1142 switch (iocb_cmnd) {
1143 case CMD_XMIT_SEQUENCE_CR:
1144 case CMD_XMIT_SEQUENCE_CX:
1145 case CMD_XMIT_BCAST_CN:
1146 case CMD_XMIT_BCAST_CX:
1147 case CMD_ELS_REQUEST_CR:
1148 case CMD_ELS_REQUEST_CX:
1149 case CMD_CREATE_XRI_CR:
1150 case CMD_CREATE_XRI_CX:
1151 case CMD_GET_RPI_CN:
1152 case CMD_XMIT_ELS_RSP_CX:
1153 case CMD_GET_RPI_CR:
1154 case CMD_FCP_IWRITE_CR:
1155 case CMD_FCP_IWRITE_CX:
1156 case CMD_FCP_IREAD_CR:
1157 case CMD_FCP_IREAD_CX:
1158 case CMD_FCP_ICMND_CR:
1159 case CMD_FCP_ICMND_CX:
f5603511
JS
1160 case CMD_FCP_TSEND_CX:
1161 case CMD_FCP_TRSP_CX:
1162 case CMD_FCP_TRECEIVE_CX:
1163 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1164 case CMD_ADAPTER_MSG:
1165 case CMD_ADAPTER_DUMP:
1166 case CMD_XMIT_SEQUENCE64_CR:
1167 case CMD_XMIT_SEQUENCE64_CX:
1168 case CMD_XMIT_BCAST64_CN:
1169 case CMD_XMIT_BCAST64_CX:
1170 case CMD_ELS_REQUEST64_CR:
1171 case CMD_ELS_REQUEST64_CX:
1172 case CMD_FCP_IWRITE64_CR:
1173 case CMD_FCP_IWRITE64_CX:
1174 case CMD_FCP_IREAD64_CR:
1175 case CMD_FCP_IREAD64_CX:
1176 case CMD_FCP_ICMND64_CR:
1177 case CMD_FCP_ICMND64_CX:
f5603511
JS
1178 case CMD_FCP_TSEND64_CX:
1179 case CMD_FCP_TRSP64_CX:
1180 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1181 case CMD_GEN_REQUEST64_CR:
1182 case CMD_GEN_REQUEST64_CX:
1183 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1184 case DSSCMD_IWRITE64_CR:
1185 case DSSCMD_IWRITE64_CX:
1186 case DSSCMD_IREAD64_CR:
1187 case DSSCMD_IREAD64_CX:
dea3101e 1188 type = LPFC_SOL_IOCB;
1189 break;
1190 case CMD_ABORT_XRI_CN:
1191 case CMD_ABORT_XRI_CX:
1192 case CMD_CLOSE_XRI_CN:
1193 case CMD_CLOSE_XRI_CX:
1194 case CMD_XRI_ABORTED_CX:
1195 case CMD_ABORT_MXRI64_CN:
6669f9bb 1196 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1197 type = LPFC_ABORT_IOCB;
1198 break;
1199 case CMD_RCV_SEQUENCE_CX:
1200 case CMD_RCV_ELS_REQ_CX:
1201 case CMD_RCV_SEQUENCE64_CX:
1202 case CMD_RCV_ELS_REQ64_CX:
57127f15 1203 case CMD_ASYNC_STATUS:
ed957684
JS
1204 case CMD_IOCB_RCV_SEQ64_CX:
1205 case CMD_IOCB_RCV_ELS64_CX:
1206 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1207 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1208 type = LPFC_UNSOL_IOCB;
1209 break;
3163f725
JS
1210 case CMD_IOCB_XMIT_MSEQ64_CR:
1211 case CMD_IOCB_XMIT_MSEQ64_CX:
1212 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1213 case CMD_IOCB_RCV_ELS_LIST64_CX:
1214 case CMD_IOCB_CLOSE_EXTENDED_CN:
1215 case CMD_IOCB_ABORT_EXTENDED_CN:
1216 case CMD_IOCB_RET_HBQE64_CN:
1217 case CMD_IOCB_FCP_IBIDIR64_CR:
1218 case CMD_IOCB_FCP_IBIDIR64_CX:
1219 case CMD_IOCB_FCP_ITASKMGT64_CX:
1220 case CMD_IOCB_LOGENTRY_CN:
1221 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1222 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1223 __func__, iocb_cmnd);
3163f725
JS
1224 type = LPFC_UNKNOWN_IOCB;
1225 break;
dea3101e 1226 default:
1227 type = LPFC_UNKNOWN_IOCB;
1228 break;
1229 }
1230
1231 return type;
1232}
1233
e59058c4 1234/**
3621a710 1235 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1236 * @phba: Pointer to HBA context object.
1237 *
1238 * This function is called from SLI initialization code
1239 * to configure every ring of the HBA's SLI interface. The
1240 * caller is not required to hold any lock. This function issues
1241 * a config_ring mailbox command for each ring.
1242 * This function returns zero if successful else returns a negative
1243 * error code.
1244 **/
dea3101e 1245static int
ed957684 1246lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1247{
1248 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1249 LPFC_MBOXQ_t *pmb;
1250 MAILBOX_t *pmbox;
1251 int i, rc, ret = 0;
dea3101e 1252
ed957684
JS
1253 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1254 if (!pmb)
1255 return -ENOMEM;
04c68496 1256 pmbox = &pmb->u.mb;
ed957684 1257 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1258 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1259 lpfc_config_ring(phba, i, pmb);
1260 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1261 if (rc != MBX_SUCCESS) {
92d7f7b0 1262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1263 "0446 Adapter failed to init (%d), "
dea3101e 1264 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1265 "ring %d\n",
e8b62011
JS
1266 rc, pmbox->mbxCommand,
1267 pmbox->mbxStatus, i);
2e0fef85 1268 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1269 ret = -ENXIO;
1270 break;
dea3101e 1271 }
1272 }
ed957684
JS
1273 mempool_free(pmb, phba->mbox_mem_pool);
1274 return ret;
dea3101e 1275}
1276
e59058c4 1277/**
3621a710 1278 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1279 * @phba: Pointer to HBA context object.
1280 * @pring: Pointer to driver SLI ring object.
1281 * @piocb: Pointer to the driver iocb object.
1282 *
1283 * This function is called with hbalock held. The function adds the
1284 * new iocb to txcmplq of the given ring. This function always returns
1285 * 0. If this function is called for ELS ring, this function checks if
1286 * there is a vport associated with the ELS command. This function also
1287 * starts els_tmofunc timer if this is an ELS command.
1288 **/
dea3101e 1289static int
2e0fef85
JS
1290lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1291 struct lpfc_iocbq *piocb)
dea3101e 1292{
dea3101e 1293 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1294 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
dea3101e 1295 pring->txcmplq_cnt++;
2a9bf3d0
JS
1296 if (pring->txcmplq_cnt > pring->txcmplq_max)
1297 pring->txcmplq_max = pring->txcmplq_cnt;
1298
92d7f7b0
JS
1299 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1300 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1301 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1302 if (!piocb->vport)
1303 BUG();
1304 else
1305 mod_timer(&piocb->vport->els_tmofunc,
1306 jiffies + HZ * (phba->fc_ratov << 1));
1307 }
1308
dea3101e 1309
2e0fef85 1310 return 0;
dea3101e 1311}
1312
e59058c4 1313/**
3621a710 1314 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1315 * @phba: Pointer to HBA context object.
1316 * @pring: Pointer to driver SLI ring object.
1317 *
1318 * This function is called with hbalock held to get next
1319 * iocb in txq of the given ring. If there is any iocb in
1320 * the txq, the function returns first iocb in the list after
1321 * removing the iocb from the list, else it returns NULL.
1322 **/
2a9bf3d0 1323struct lpfc_iocbq *
2e0fef85 1324lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1325{
dea3101e 1326 struct lpfc_iocbq *cmd_iocb;
1327
858c9f6c
JS
1328 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1329 if (cmd_iocb != NULL)
dea3101e 1330 pring->txq_cnt--;
2e0fef85 1331 return cmd_iocb;
dea3101e 1332}
1333
e59058c4 1334/**
3621a710 1335 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1336 * @phba: Pointer to HBA context object.
1337 * @pring: Pointer to driver SLI ring object.
1338 *
1339 * This function is called with hbalock held and the caller must post the
1340 * iocb without releasing the lock. If the caller releases the lock,
1341 * iocb slot returned by the function is not guaranteed to be available.
1342 * The function returns pointer to the next available iocb slot if there
1343 * is available slot in the ring, else it returns NULL.
1344 * If the get index of the ring is ahead of the put index, the function
1345 * will post an error attention event to the worker thread to take the
1346 * HBA to offline state.
1347 **/
dea3101e 1348static IOCB_t *
1349lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1350{
34b02dcd 1351 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25
JS
1352 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1353 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1354 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1355 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1356
7e56aa25
JS
1357 if (unlikely(pring->sli.sli3.local_getidx ==
1358 pring->sli.sli3.next_cmdidx)) {
dea3101e 1359
7e56aa25 1360 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1361
7e56aa25 1362 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1364 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1365 "is bigger than cmd ring %d\n",
e8b62011 1366 pring->ringno,
7e56aa25
JS
1367 pring->sli.sli3.local_getidx,
1368 max_cmd_idx);
dea3101e 1369
2e0fef85 1370 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1371 /*
1372 * All error attention handlers are posted to
1373 * worker thread
1374 */
1375 phba->work_ha |= HA_ERATT;
1376 phba->work_hs = HS_FFER3;
92d7f7b0 1377
5e9d9b82 1378 lpfc_worker_wake_up(phba);
dea3101e 1379
1380 return NULL;
1381 }
1382
7e56aa25 1383 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1384 return NULL;
1385 }
1386
ed957684 1387 return lpfc_cmd_iocb(phba, pring);
dea3101e 1388}
1389
e59058c4 1390/**
3621a710 1391 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1392 * @phba: Pointer to HBA context object.
1393 * @iocbq: Pointer to driver iocb object.
1394 *
1395 * This function gets an iotag for the iocb. If there is no unused iotag and
1396 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1397 * array and assigns a new iotag.
1398 * The function returns the allocated iotag if successful, else returns zero.
1399 * Zero is not a valid iotag.
1400 * The caller is not required to hold any lock.
1401 **/
604a3e30 1402uint16_t
2e0fef85 1403lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1404{
2e0fef85
JS
1405 struct lpfc_iocbq **new_arr;
1406 struct lpfc_iocbq **old_arr;
604a3e30
JB
1407 size_t new_len;
1408 struct lpfc_sli *psli = &phba->sli;
1409 uint16_t iotag;
dea3101e 1410
2e0fef85 1411 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1412 iotag = psli->last_iotag;
1413 if(++iotag < psli->iocbq_lookup_len) {
1414 psli->last_iotag = iotag;
1415 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1416 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1417 iocbq->iotag = iotag;
1418 return iotag;
2e0fef85 1419 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1420 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1421 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
1422 spin_unlock_irq(&phba->hbalock);
1423 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1424 GFP_KERNEL);
1425 if (new_arr) {
2e0fef85 1426 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1427 old_arr = psli->iocbq_lookup;
1428 if (new_len <= psli->iocbq_lookup_len) {
1429 /* highly unprobable case */
1430 kfree(new_arr);
1431 iotag = psli->last_iotag;
1432 if(++iotag < psli->iocbq_lookup_len) {
1433 psli->last_iotag = iotag;
1434 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1435 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1436 iocbq->iotag = iotag;
1437 return iotag;
1438 }
2e0fef85 1439 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1440 return 0;
1441 }
1442 if (psli->iocbq_lookup)
1443 memcpy(new_arr, old_arr,
1444 ((psli->last_iotag + 1) *
311464ec 1445 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1446 psli->iocbq_lookup = new_arr;
1447 psli->iocbq_lookup_len = new_len;
1448 psli->last_iotag = iotag;
1449 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1450 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1451 iocbq->iotag = iotag;
1452 kfree(old_arr);
1453 return iotag;
1454 }
8f6d98d2 1455 } else
2e0fef85 1456 spin_unlock_irq(&phba->hbalock);
dea3101e 1457
bc73905a 1458 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1459 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1460 psli->last_iotag);
dea3101e 1461
604a3e30 1462 return 0;
dea3101e 1463}
1464
e59058c4 1465/**
3621a710 1466 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1467 * @phba: Pointer to HBA context object.
1468 * @pring: Pointer to driver SLI ring object.
1469 * @iocb: Pointer to iocb slot in the ring.
1470 * @nextiocb: Pointer to driver iocb object which need to be
1471 * posted to firmware.
1472 *
1473 * This function is called with hbalock held to post a new iocb to
1474 * the firmware. This function copies the new iocb to ring iocb slot and
1475 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1476 * a completion call back for this iocb else the function will free the
1477 * iocb object.
1478 **/
dea3101e 1479static void
1480lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1481 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1482{
1483 /*
604a3e30 1484 * Set up an iotag
dea3101e 1485 */
604a3e30 1486 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1487
e2a0a9d6 1488
a58cbd52
JS
1489 if (pring->ringno == LPFC_ELS_RING) {
1490 lpfc_debugfs_slow_ring_trc(phba,
1491 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1492 *(((uint32_t *) &nextiocb->iocb) + 4),
1493 *(((uint32_t *) &nextiocb->iocb) + 6),
1494 *(((uint32_t *) &nextiocb->iocb) + 7));
1495 }
1496
dea3101e 1497 /*
1498 * Issue iocb command to adapter
1499 */
92d7f7b0 1500 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1501 wmb();
1502 pring->stats.iocb_cmd++;
1503
1504 /*
1505 * If there is no completion routine to call, we can release the
1506 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1507 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1508 */
1509 if (nextiocb->iocb_cmpl)
1510 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1511 else
2e0fef85 1512 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1513
1514 /*
1515 * Let the HBA know what IOCB slot will be the next one the
1516 * driver will put a command into.
1517 */
7e56aa25
JS
1518 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1519 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1520}
1521
e59058c4 1522/**
3621a710 1523 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1524 * @phba: Pointer to HBA context object.
1525 * @pring: Pointer to driver SLI ring object.
1526 *
1527 * The caller is not required to hold any lock for calling this function.
1528 * This function updates the chip attention bits for the ring to inform firmware
1529 * that there are pending work to be done for this ring and requests an
1530 * interrupt when there is space available in the ring. This function is
1531 * called when the driver is unable to post more iocbs to the ring due
1532 * to unavailability of space in the ring.
1533 **/
dea3101e 1534static void
2e0fef85 1535lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1536{
1537 int ringno = pring->ringno;
1538
1539 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1540
1541 wmb();
1542
1543 /*
1544 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1545 * The HBA will tell us when an IOCB entry is available.
1546 */
1547 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1548 readl(phba->CAregaddr); /* flush */
1549
1550 pring->stats.iocb_cmd_full++;
1551}
1552
e59058c4 1553/**
3621a710 1554 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1555 * @phba: Pointer to HBA context object.
1556 * @pring: Pointer to driver SLI ring object.
1557 *
1558 * This function updates the chip attention register bit for the
1559 * given ring to inform HBA that there is more work to be done
1560 * in this ring. The caller is not required to hold any lock.
1561 **/
dea3101e 1562static void
2e0fef85 1563lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1564{
1565 int ringno = pring->ringno;
1566
1567 /*
1568 * Tell the HBA that there is work to do in this ring.
1569 */
34b02dcd
JS
1570 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1571 wmb();
1572 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1573 readl(phba->CAregaddr); /* flush */
1574 }
dea3101e 1575}
1576
e59058c4 1577/**
3621a710 1578 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1579 * @phba: Pointer to HBA context object.
1580 * @pring: Pointer to driver SLI ring object.
1581 *
1582 * This function is called with hbalock held to post pending iocbs
1583 * in the txq to the firmware. This function is called when driver
1584 * detects space available in the ring.
1585 **/
dea3101e 1586static void
2e0fef85 1587lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1588{
1589 IOCB_t *iocb;
1590 struct lpfc_iocbq *nextiocb;
1591
1592 /*
1593 * Check to see if:
1594 * (a) there is anything on the txq to send
1595 * (b) link is up
1596 * (c) link attention events can be processed (fcp ring only)
1597 * (d) IOCB processing is not blocked by the outstanding mbox command.
1598 */
1599 if (pring->txq_cnt &&
2e0fef85 1600 lpfc_is_link_up(phba) &&
dea3101e 1601 (pring->ringno != phba->sli.fcp_ring ||
0b727fea 1602 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1603
1604 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1605 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1606 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1607
1608 if (iocb)
1609 lpfc_sli_update_ring(phba, pring);
1610 else
1611 lpfc_sli_update_full_ring(phba, pring);
1612 }
1613
1614 return;
1615}
1616
e59058c4 1617/**
3621a710 1618 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1619 * @phba: Pointer to HBA context object.
1620 * @hbqno: HBQ number.
1621 *
1622 * This function is called with hbalock held to get the next
1623 * available slot for the given HBQ. If there is free slot
1624 * available for the HBQ it will return pointer to the next available
1625 * HBQ entry else it will return NULL.
1626 **/
a6ababd2 1627static struct lpfc_hbq_entry *
ed957684
JS
1628lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1629{
1630 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1631
1632 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1633 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1634 hbqp->next_hbqPutIdx = 0;
1635
1636 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1637 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1638 uint32_t getidx = le32_to_cpu(raw_index);
1639
1640 hbqp->local_hbqGetIdx = getidx;
1641
1642 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1643 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1644 LOG_SLI | LOG_VPORT,
e8b62011 1645 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1646 "%u is > than hbqp->entry_count %u\n",
e8b62011 1647 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1648 hbqp->entry_count);
1649
1650 phba->link_state = LPFC_HBA_ERROR;
1651 return NULL;
1652 }
1653
1654 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1655 return NULL;
1656 }
1657
51ef4c26
JS
1658 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1659 hbqp->hbqPutIdx;
ed957684
JS
1660}
1661
e59058c4 1662/**
3621a710 1663 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1664 * @phba: Pointer to HBA context object.
1665 *
1666 * This function is called with no lock held to free all the
1667 * hbq buffers while uninitializing the SLI interface. It also
1668 * frees the HBQ buffers returned by the firmware but not yet
1669 * processed by the upper layers.
1670 **/
ed957684
JS
1671void
1672lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1673{
92d7f7b0
JS
1674 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1675 struct hbq_dmabuf *hbq_buf;
3163f725 1676 unsigned long flags;
51ef4c26 1677 int i, hbq_count;
3163f725 1678 uint32_t hbqno;
ed957684 1679
51ef4c26 1680 hbq_count = lpfc_sli_hbq_count();
ed957684 1681 /* Return all memory used by all HBQs */
3163f725 1682 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1683 for (i = 0; i < hbq_count; ++i) {
1684 list_for_each_entry_safe(dmabuf, next_dmabuf,
1685 &phba->hbqs[i].hbq_buffer_list, list) {
1686 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1687 list_del(&hbq_buf->dbuf.list);
1688 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1689 }
a8adb832 1690 phba->hbqs[i].buffer_count = 0;
ed957684 1691 }
3163f725 1692 /* Return all HBQ buffer that are in-fly */
3772a991
JS
1693 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1694 list) {
3163f725
JS
1695 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1696 list_del(&hbq_buf->dbuf.list);
1697 if (hbq_buf->tag == -1) {
1698 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1699 (phba, hbq_buf);
1700 } else {
1701 hbqno = hbq_buf->tag >> 16;
1702 if (hbqno >= LPFC_MAX_HBQS)
1703 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1704 (phba, hbq_buf);
1705 else
1706 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1707 hbq_buf);
1708 }
1709 }
1710
1711 /* Mark the HBQs not in use */
1712 phba->hbq_in_use = 0;
1713 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1714}
1715
e59058c4 1716/**
3621a710 1717 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1718 * @phba: Pointer to HBA context object.
1719 * @hbqno: HBQ number.
1720 * @hbq_buf: Pointer to HBQ buffer.
1721 *
1722 * This function is called with the hbalock held to post a
1723 * hbq buffer to the firmware. If the function finds an empty
1724 * slot in the HBQ, it will post the buffer. The function will return
1725 * pointer to the hbq entry if it successfully post the buffer
1726 * else it will return NULL.
1727 **/
3772a991 1728static int
ed957684 1729lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1730 struct hbq_dmabuf *hbq_buf)
3772a991
JS
1731{
1732 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1733}
1734
1735/**
1736 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1737 * @phba: Pointer to HBA context object.
1738 * @hbqno: HBQ number.
1739 * @hbq_buf: Pointer to HBQ buffer.
1740 *
1741 * This function is called with the hbalock held to post a hbq buffer to the
1742 * firmware. If the function finds an empty slot in the HBQ, it will post the
1743 * buffer and place it on the hbq_buffer_list. The function will return zero if
1744 * it successfully post the buffer else it will return an error.
1745 **/
1746static int
1747lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1748 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1749{
1750 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1751 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
1752
1753 /* Get next HBQ entry slot to use */
1754 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1755 if (hbqe) {
1756 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1757
92d7f7b0
JS
1758 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1759 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 1760 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 1761 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1762 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1763 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1764 /* Sync SLIM */
ed957684
JS
1765 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1766 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1767 /* flush */
ed957684 1768 readl(phba->hbq_put + hbqno);
51ef4c26 1769 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1770 return 0;
1771 } else
1772 return -ENOMEM;
ed957684
JS
1773}
1774
4f774513
JS
1775/**
1776 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1777 * @phba: Pointer to HBA context object.
1778 * @hbqno: HBQ number.
1779 * @hbq_buf: Pointer to HBQ buffer.
1780 *
1781 * This function is called with the hbalock held to post an RQE to the SLI4
1782 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1783 * the hbq_buffer_list and return zero, otherwise it will return an error.
1784 **/
1785static int
1786lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1787 struct hbq_dmabuf *hbq_buf)
1788{
1789 int rc;
1790 struct lpfc_rqe hrqe;
1791 struct lpfc_rqe drqe;
1792
1793 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1794 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1795 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1796 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1797 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1798 &hrqe, &drqe);
1799 if (rc < 0)
1800 return rc;
1801 hbq_buf->tag = rc;
1802 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1803 return 0;
1804}
1805
e59058c4 1806/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1807static struct lpfc_hbq_init lpfc_els_hbq = {
1808 .rn = 1,
def9c7a9 1809 .entry_count = 256,
92d7f7b0
JS
1810 .mask_count = 0,
1811 .profile = 0,
51ef4c26 1812 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1813 .buffer_count = 0,
a257bf90
JS
1814 .init_count = 40,
1815 .add_count = 40,
92d7f7b0 1816};
ed957684 1817
e59058c4 1818/* HBQ for the extra ring if needed */
51ef4c26
JS
1819static struct lpfc_hbq_init lpfc_extra_hbq = {
1820 .rn = 1,
1821 .entry_count = 200,
1822 .mask_count = 0,
1823 .profile = 0,
1824 .ring_mask = (1 << LPFC_EXTRA_RING),
1825 .buffer_count = 0,
1826 .init_count = 0,
1827 .add_count = 5,
1828};
1829
e59058c4 1830/* Array of HBQs */
78b2d852 1831struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 1832 &lpfc_els_hbq,
51ef4c26 1833 &lpfc_extra_hbq,
92d7f7b0 1834};
ed957684 1835
e59058c4 1836/**
3621a710 1837 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1838 * @phba: Pointer to HBA context object.
1839 * @hbqno: HBQ number.
1840 * @count: Number of HBQ buffers to be posted.
1841 *
d7c255b2
JS
1842 * This function is called with no lock held to post more hbq buffers to the
1843 * given HBQ. The function returns the number of HBQ buffers successfully
1844 * posted.
e59058c4 1845 **/
311464ec 1846static int
92d7f7b0 1847lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1848{
d7c255b2 1849 uint32_t i, posted = 0;
3163f725 1850 unsigned long flags;
92d7f7b0 1851 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1852 LIST_HEAD(hbq_buf_list);
eafe1df9 1853 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1854 return 0;
51ef4c26 1855
d7c255b2
JS
1856 if ((phba->hbqs[hbqno].buffer_count + count) >
1857 lpfc_hbq_defs[hbqno]->entry_count)
1858 count = lpfc_hbq_defs[hbqno]->entry_count -
1859 phba->hbqs[hbqno].buffer_count;
1860 if (!count)
1861 return 0;
1862 /* Allocate HBQ entries */
1863 for (i = 0; i < count; i++) {
1864 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1865 if (!hbq_buffer)
1866 break;
1867 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1868 }
3163f725
JS
1869 /* Check whether HBQ is still in use */
1870 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1871 if (!phba->hbq_in_use)
d7c255b2
JS
1872 goto err;
1873 while (!list_empty(&hbq_buf_list)) {
1874 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1875 dbuf.list);
1876 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1877 (hbqno << 16));
3772a991 1878 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1879 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1880 posted++;
1881 } else
51ef4c26 1882 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1883 }
3163f725 1884 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1885 return posted;
1886err:
eafe1df9 1887 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1888 while (!list_empty(&hbq_buf_list)) {
1889 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1890 dbuf.list);
1891 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1892 }
1893 return 0;
ed957684
JS
1894}
1895
e59058c4 1896/**
3621a710 1897 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1898 * @phba: Pointer to HBA context object.
1899 * @qno: HBQ number.
1900 *
1901 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1902 * is called with no lock held. The function returns the number of HBQ entries
1903 * successfully allocated.
e59058c4 1904 **/
92d7f7b0
JS
1905int
1906lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1907{
def9c7a9
JS
1908 if (phba->sli_rev == LPFC_SLI_REV4)
1909 return 0;
1910 else
1911 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1912 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1913}
ed957684 1914
e59058c4 1915/**
3621a710 1916 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1917 * @phba: Pointer to HBA context object.
1918 * @qno: HBQ queue number.
1919 *
1920 * This function is called from SLI initialization code path with
1921 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 1922 * function returns the number of HBQ entries successfully allocated.
e59058c4 1923 **/
a6ababd2 1924static int
92d7f7b0
JS
1925lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1926{
def9c7a9
JS
1927 if (phba->sli_rev == LPFC_SLI_REV4)
1928 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 1929 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
1930 else
1931 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1932 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
1933}
1934
3772a991
JS
1935/**
1936 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1937 * @phba: Pointer to HBA context object.
1938 * @hbqno: HBQ number.
1939 *
1940 * This function removes the first hbq buffer on an hbq list and returns a
1941 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1942 **/
1943static struct hbq_dmabuf *
1944lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1945{
1946 struct lpfc_dmabuf *d_buf;
1947
1948 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1949 if (!d_buf)
1950 return NULL;
1951 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1952}
1953
e59058c4 1954/**
3621a710 1955 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
1956 * @phba: Pointer to HBA context object.
1957 * @tag: Tag of the hbq buffer.
1958 *
1959 * This function is called with hbalock held. This function searches
1960 * for the hbq buffer associated with the given tag in the hbq buffer
1961 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1962 * it returns NULL.
1963 **/
a6ababd2 1964static struct hbq_dmabuf *
92d7f7b0 1965lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 1966{
92d7f7b0
JS
1967 struct lpfc_dmabuf *d_buf;
1968 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
1969 uint32_t hbqno;
1970
1971 hbqno = tag >> 16;
a0a74e45 1972 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 1973 return NULL;
ed957684 1974
3772a991 1975 spin_lock_irq(&phba->hbalock);
51ef4c26 1976 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 1977 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 1978 if (hbq_buf->tag == tag) {
3772a991 1979 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1980 return hbq_buf;
ed957684
JS
1981 }
1982 }
3772a991 1983 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 1985 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 1986 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 1987 return NULL;
ed957684
JS
1988}
1989
e59058c4 1990/**
3621a710 1991 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
1992 * @phba: Pointer to HBA context object.
1993 * @hbq_buffer: Pointer to HBQ buffer.
1994 *
1995 * This function is called with hbalock. This function gives back
1996 * the hbq buffer to firmware. If the HBQ does not have space to
1997 * post the buffer, it will free the buffer.
1998 **/
ed957684 1999void
51ef4c26 2000lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2001{
2002 uint32_t hbqno;
2003
51ef4c26
JS
2004 if (hbq_buffer) {
2005 hbqno = hbq_buffer->tag >> 16;
3772a991 2006 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2007 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2008 }
2009}
2010
e59058c4 2011/**
3621a710 2012 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2013 * @mbxCommand: mailbox command code.
2014 *
2015 * This function is called by the mailbox event handler function to verify
2016 * that the completed mailbox command is a legitimate mailbox command. If the
2017 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2018 * and the mailbox event handler will take the HBA offline.
2019 **/
dea3101e 2020static int
2021lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2022{
2023 uint8_t ret;
2024
2025 switch (mbxCommand) {
2026 case MBX_LOAD_SM:
2027 case MBX_READ_NV:
2028 case MBX_WRITE_NV:
a8adb832 2029 case MBX_WRITE_VPARMS:
dea3101e 2030 case MBX_RUN_BIU_DIAG:
2031 case MBX_INIT_LINK:
2032 case MBX_DOWN_LINK:
2033 case MBX_CONFIG_LINK:
2034 case MBX_CONFIG_RING:
2035 case MBX_RESET_RING:
2036 case MBX_READ_CONFIG:
2037 case MBX_READ_RCONFIG:
2038 case MBX_READ_SPARM:
2039 case MBX_READ_STATUS:
2040 case MBX_READ_RPI:
2041 case MBX_READ_XRI:
2042 case MBX_READ_REV:
2043 case MBX_READ_LNK_STAT:
2044 case MBX_REG_LOGIN:
2045 case MBX_UNREG_LOGIN:
dea3101e 2046 case MBX_CLEAR_LA:
2047 case MBX_DUMP_MEMORY:
2048 case MBX_DUMP_CONTEXT:
2049 case MBX_RUN_DIAGS:
2050 case MBX_RESTART:
2051 case MBX_UPDATE_CFG:
2052 case MBX_DOWN_LOAD:
2053 case MBX_DEL_LD_ENTRY:
2054 case MBX_RUN_PROGRAM:
2055 case MBX_SET_MASK:
09372820 2056 case MBX_SET_VARIABLE:
dea3101e 2057 case MBX_UNREG_D_ID:
41415862 2058 case MBX_KILL_BOARD:
dea3101e 2059 case MBX_CONFIG_FARP:
41415862 2060 case MBX_BEACON:
dea3101e 2061 case MBX_LOAD_AREA:
2062 case MBX_RUN_BIU_DIAG64:
2063 case MBX_CONFIG_PORT:
2064 case MBX_READ_SPARM64:
2065 case MBX_READ_RPI64:
2066 case MBX_REG_LOGIN64:
76a95d75 2067 case MBX_READ_TOPOLOGY:
09372820 2068 case MBX_WRITE_WWN:
dea3101e 2069 case MBX_SET_DEBUG:
2070 case MBX_LOAD_EXP_ROM:
57127f15 2071 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2072 case MBX_REG_VPI:
2073 case MBX_UNREG_VPI:
858c9f6c 2074 case MBX_HEARTBEAT:
84774a4d
JS
2075 case MBX_PORT_CAPABILITIES:
2076 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2077 case MBX_SLI4_CONFIG:
2078 case MBX_SLI4_REQ_FTRS:
2079 case MBX_REG_FCFI:
2080 case MBX_UNREG_FCFI:
2081 case MBX_REG_VFI:
2082 case MBX_UNREG_VFI:
2083 case MBX_INIT_VPI:
2084 case MBX_INIT_VFI:
2085 case MBX_RESUME_RPI:
c7495937
JS
2086 case MBX_READ_EVENT_LOG_STATUS:
2087 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2088 case MBX_SECURITY_MGMT:
2089 case MBX_AUTH_PORT:
940eb687 2090 case MBX_ACCESS_VDATA:
dea3101e 2091 ret = mbxCommand;
2092 break;
2093 default:
2094 ret = MBX_SHUTDOWN;
2095 break;
2096 }
2e0fef85 2097 return ret;
dea3101e 2098}
e59058c4
JS
2099
2100/**
3621a710 2101 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2102 * @phba: Pointer to HBA context object.
2103 * @pmboxq: Pointer to mailbox command.
2104 *
2105 * This is completion handler function for mailbox commands issued from
2106 * lpfc_sli_issue_mbox_wait function. This function is called by the
2107 * mailbox event handler function with no lock held. This function
2108 * will wake up thread waiting on the wait queue pointed by context1
2109 * of the mailbox.
2110 **/
04c68496 2111void
2e0fef85 2112lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2113{
2114 wait_queue_head_t *pdone_q;
858c9f6c 2115 unsigned long drvr_flag;
dea3101e 2116
2117 /*
2118 * If pdone_q is empty, the driver thread gave up waiting and
2119 * continued running.
2120 */
7054a606 2121 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2122 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2123 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2124 if (pdone_q)
2125 wake_up_interruptible(pdone_q);
858c9f6c 2126 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2127 return;
2128}
2129
e59058c4
JS
2130
2131/**
3621a710 2132 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2133 * @phba: Pointer to HBA context object.
2134 * @pmb: Pointer to mailbox object.
2135 *
2136 * This function is the default mailbox completion handler. It
2137 * frees the memory resources associated with the completed mailbox
2138 * command. If the completed command is a REG_LOGIN mailbox command,
2139 * this function will issue a UREG_LOGIN to re-claim the RPI.
2140 **/
dea3101e 2141void
2e0fef85 2142lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2143{
d439d286 2144 struct lpfc_vport *vport = pmb->vport;
dea3101e 2145 struct lpfc_dmabuf *mp;
d439d286 2146 struct lpfc_nodelist *ndlp;
5af5eee7 2147 struct Scsi_Host *shost;
04c68496 2148 uint16_t rpi, vpi;
7054a606
JS
2149 int rc;
2150
dea3101e 2151 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 2152
dea3101e 2153 if (mp) {
2154 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2155 kfree(mp);
2156 }
7054a606
JS
2157
2158 /*
2159 * If a REG_LOGIN succeeded after node is destroyed or node
2160 * is in re-discovery driver need to cleanup the RPI.
2161 */
2e0fef85 2162 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2163 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2164 !pmb->u.mb.mbxStatus) {
2165 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2166 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2167 lpfc_unreg_login(phba, vpi, rpi, pmb);
92d7f7b0 2168 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2169 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2170 if (rc != MBX_NOT_FINISHED)
2171 return;
2172 }
2173
695a814e
JS
2174 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2175 !(phba->pport->load_flag & FC_UNLOADING) &&
2176 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2177 shost = lpfc_shost_from_vport(vport);
2178 spin_lock_irq(shost->host_lock);
2179 vport->vpi_state |= LPFC_VPI_REGISTERED;
2180 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2181 spin_unlock_irq(shost->host_lock);
695a814e
JS
2182 }
2183
d439d286
JS
2184 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2185 ndlp = (struct lpfc_nodelist *)pmb->context2;
2186 lpfc_nlp_put(ndlp);
2187 pmb->context2 = NULL;
2188 }
2189
dcf2a4e0
JS
2190 /* Check security permission status on INIT_LINK mailbox command */
2191 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2192 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2193 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2194 "2860 SLI authentication is required "
2195 "for INIT_LINK but has not done yet\n");
2196
04c68496
JS
2197 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2198 lpfc_sli4_mbox_cmd_free(phba, pmb);
2199 else
2200 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2201}
2202
e59058c4 2203/**
3621a710 2204 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2205 * @phba: Pointer to HBA context object.
2206 *
2207 * This function is called with no lock held. This function processes all
2208 * the completed mailbox commands and gives it to upper layers. The interrupt
2209 * service routine processes mailbox completion interrupt and adds completed
2210 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2211 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2212 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2213 * function returns the mailbox commands to the upper layer by calling the
2214 * completion handler function of each mailbox.
2215 **/
dea3101e 2216int
2e0fef85 2217lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2218{
92d7f7b0 2219 MAILBOX_t *pmbox;
dea3101e 2220 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2221 int rc;
2222 LIST_HEAD(cmplq);
dea3101e 2223
2224 phba->sli.slistat.mbox_event++;
2225
92d7f7b0
JS
2226 /* Get all completed mailboxe buffers into the cmplq */
2227 spin_lock_irq(&phba->hbalock);
2228 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2229 spin_unlock_irq(&phba->hbalock);
dea3101e 2230
92d7f7b0
JS
2231 /* Get a Mailbox buffer to setup mailbox commands for callback */
2232 do {
2233 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2234 if (pmb == NULL)
2235 break;
2e0fef85 2236
04c68496 2237 pmbox = &pmb->u.mb;
dea3101e 2238
858c9f6c
JS
2239 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2240 if (pmb->vport) {
2241 lpfc_debugfs_disc_trc(pmb->vport,
2242 LPFC_DISC_TRC_MBOX_VPORT,
2243 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2244 (uint32_t)pmbox->mbxCommand,
2245 pmbox->un.varWords[0],
2246 pmbox->un.varWords[1]);
2247 }
2248 else {
2249 lpfc_debugfs_disc_trc(phba->pport,
2250 LPFC_DISC_TRC_MBOX,
2251 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2252 (uint32_t)pmbox->mbxCommand,
2253 pmbox->un.varWords[0],
2254 pmbox->un.varWords[1]);
2255 }
2256 }
2257
dea3101e 2258 /*
2259 * It is a fatal error if unknown mbox command completion.
2260 */
2261 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2262 MBX_SHUTDOWN) {
af901ca1 2263 /* Unknown mailbox command compl */
92d7f7b0 2264 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2265 "(%d):0323 Unknown Mailbox command "
a183a15f 2266 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2267 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2268 pmbox->mbxCommand,
a183a15f
JS
2269 lpfc_sli_config_mbox_subsys_get(phba,
2270 pmb),
2271 lpfc_sli_config_mbox_opcode_get(phba,
2272 pmb));
2e0fef85 2273 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2274 phba->work_hs = HS_FFER3;
2275 lpfc_handle_eratt(phba);
92d7f7b0 2276 continue;
dea3101e 2277 }
2278
dea3101e 2279 if (pmbox->mbxStatus) {
2280 phba->sli.slistat.mbox_stat_err++;
2281 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2282 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2283 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2284 LOG_MBOX | LOG_SLI,
2285 "(%d):0305 Mbox cmd cmpl "
2286 "error - RETRYing Data: x%x "
2287 "(x%x/x%x) x%x x%x x%x\n",
2288 pmb->vport ? pmb->vport->vpi : 0,
2289 pmbox->mbxCommand,
2290 lpfc_sli_config_mbox_subsys_get(phba,
2291 pmb),
2292 lpfc_sli_config_mbox_opcode_get(phba,
2293 pmb),
2294 pmbox->mbxStatus,
2295 pmbox->un.varWords[0],
2296 pmb->vport->port_state);
dea3101e 2297 pmbox->mbxStatus = 0;
2298 pmbox->mbxOwner = OWN_HOST;
dea3101e 2299 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2300 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2301 continue;
dea3101e 2302 }
2303 }
2304
2305 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2306 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2307 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
dea3101e 2308 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
92d7f7b0 2309 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2310 pmbox->mbxCommand,
a183a15f
JS
2311 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2312 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2313 pmb->mbox_cmpl,
2314 *((uint32_t *) pmbox),
2315 pmbox->un.varWords[0],
2316 pmbox->un.varWords[1],
2317 pmbox->un.varWords[2],
2318 pmbox->un.varWords[3],
2319 pmbox->un.varWords[4],
2320 pmbox->un.varWords[5],
2321 pmbox->un.varWords[6],
2322 pmbox->un.varWords[7]);
2323
92d7f7b0 2324 if (pmb->mbox_cmpl)
dea3101e 2325 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2326 } while (1);
2327 return 0;
2328}
dea3101e 2329
e59058c4 2330/**
3621a710 2331 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2332 * @phba: Pointer to HBA context object.
2333 * @pring: Pointer to driver SLI ring object.
2334 * @tag: buffer tag.
2335 *
2336 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2337 * is set in the tag the buffer is posted for a particular exchange,
2338 * the function will return the buffer without replacing the buffer.
2339 * If the buffer is for unsolicited ELS or CT traffic, this function
2340 * returns the buffer and also posts another buffer to the firmware.
2341 **/
76bb24ef
JS
2342static struct lpfc_dmabuf *
2343lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2344 struct lpfc_sli_ring *pring,
2345 uint32_t tag)
76bb24ef 2346{
9f1e1b50
JS
2347 struct hbq_dmabuf *hbq_entry;
2348
76bb24ef
JS
2349 if (tag & QUE_BUFTAG_BIT)
2350 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2351 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2352 if (!hbq_entry)
2353 return NULL;
2354 return &hbq_entry->dbuf;
76bb24ef 2355}
57127f15 2356
3772a991
JS
2357/**
2358 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2359 * @phba: Pointer to HBA context object.
2360 * @pring: Pointer to driver SLI ring object.
2361 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2362 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2363 * @fch_type: the type for the first frame of the sequence.
2364 *
2365 * This function is called with no lock held. This function uses the r_ctl and
2366 * type of the received sequence to find the correct callback function to call
2367 * to process the sequence.
2368 **/
2369static int
2370lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2371 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2372 uint32_t fch_type)
2373{
2374 int i;
2375
2376 /* unSolicited Responses */
2377 if (pring->prt[0].profile) {
2378 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2379 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2380 saveq);
2381 return 1;
2382 }
2383 /* We must search, based on rctl / type
2384 for the right routine */
2385 for (i = 0; i < pring->num_mask; i++) {
2386 if ((pring->prt[i].rctl == fch_r_ctl) &&
2387 (pring->prt[i].type == fch_type)) {
2388 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2389 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2390 (phba, pring, saveq);
2391 return 1;
2392 }
2393 }
2394 return 0;
2395}
e59058c4
JS
2396
2397/**
3621a710 2398 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2399 * @phba: Pointer to HBA context object.
2400 * @pring: Pointer to driver SLI ring object.
2401 * @saveq: Pointer to the unsolicited iocb.
2402 *
2403 * This function is called with no lock held by the ring event handler
2404 * when there is an unsolicited iocb posted to the response ring by the
2405 * firmware. This function gets the buffer associated with the iocbs
2406 * and calls the event handler for the ring. This function handles both
2407 * qring buffers and hbq buffers.
2408 * When the function returns 1 the caller can free the iocb object otherwise
2409 * upper layer functions will free the iocb objects.
2410 **/
dea3101e 2411static int
2412lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2413 struct lpfc_iocbq *saveq)
2414{
2415 IOCB_t * irsp;
2416 WORD5 * w5p;
2417 uint32_t Rctl, Type;
3772a991 2418 uint32_t match;
76bb24ef 2419 struct lpfc_iocbq *iocbq;
3163f725 2420 struct lpfc_dmabuf *dmzbuf;
dea3101e 2421
2422 match = 0;
2423 irsp = &(saveq->iocb);
57127f15
JS
2424
2425 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2426 if (pring->lpfc_sli_rcv_async_status)
2427 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2428 else
2429 lpfc_printf_log(phba,
2430 KERN_WARNING,
2431 LOG_SLI,
2432 "0316 Ring %d handler: unexpected "
2433 "ASYNC_STATUS iocb received evt_code "
2434 "0x%x\n",
2435 pring->ringno,
2436 irsp->un.asyncstat.evt_code);
2437 return 1;
2438 }
2439
3163f725
JS
2440 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2441 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2442 if (irsp->ulpBdeCount > 0) {
2443 dmzbuf = lpfc_sli_get_buff(phba, pring,
2444 irsp->un.ulpWord[3]);
2445 lpfc_in_buf_free(phba, dmzbuf);
2446 }
2447
2448 if (irsp->ulpBdeCount > 1) {
2449 dmzbuf = lpfc_sli_get_buff(phba, pring,
2450 irsp->unsli3.sli3Words[3]);
2451 lpfc_in_buf_free(phba, dmzbuf);
2452 }
2453
2454 if (irsp->ulpBdeCount > 2) {
2455 dmzbuf = lpfc_sli_get_buff(phba, pring,
2456 irsp->unsli3.sli3Words[7]);
2457 lpfc_in_buf_free(phba, dmzbuf);
2458 }
2459
2460 return 1;
2461 }
2462
92d7f7b0 2463 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2464 if (irsp->ulpBdeCount != 0) {
2465 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2466 irsp->un.ulpWord[3]);
2467 if (!saveq->context2)
2468 lpfc_printf_log(phba,
2469 KERN_ERR,
2470 LOG_SLI,
2471 "0341 Ring %d Cannot find buffer for "
2472 "an unsolicited iocb. tag 0x%x\n",
2473 pring->ringno,
2474 irsp->un.ulpWord[3]);
76bb24ef
JS
2475 }
2476 if (irsp->ulpBdeCount == 2) {
2477 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2478 irsp->unsli3.sli3Words[7]);
2479 if (!saveq->context3)
2480 lpfc_printf_log(phba,
2481 KERN_ERR,
2482 LOG_SLI,
2483 "0342 Ring %d Cannot find buffer for an"
2484 " unsolicited iocb. tag 0x%x\n",
2485 pring->ringno,
2486 irsp->unsli3.sli3Words[7]);
2487 }
2488 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2489 irsp = &(iocbq->iocb);
76bb24ef
JS
2490 if (irsp->ulpBdeCount != 0) {
2491 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2492 irsp->un.ulpWord[3]);
9c2face6 2493 if (!iocbq->context2)
76bb24ef
JS
2494 lpfc_printf_log(phba,
2495 KERN_ERR,
2496 LOG_SLI,
2497 "0343 Ring %d Cannot find "
2498 "buffer for an unsolicited iocb"
2499 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2500 irsp->un.ulpWord[3]);
76bb24ef
JS
2501 }
2502 if (irsp->ulpBdeCount == 2) {
2503 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2504 irsp->unsli3.sli3Words[7]);
9c2face6 2505 if (!iocbq->context3)
76bb24ef
JS
2506 lpfc_printf_log(phba,
2507 KERN_ERR,
2508 LOG_SLI,
2509 "0344 Ring %d Cannot find "
2510 "buffer for an unsolicited "
2511 "iocb. tag 0x%x\n",
2512 pring->ringno,
2513 irsp->unsli3.sli3Words[7]);
2514 }
2515 }
92d7f7b0 2516 }
9c2face6
JS
2517 if (irsp->ulpBdeCount != 0 &&
2518 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2519 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2520 int found = 0;
2521
2522 /* search continue save q for same XRI */
2523 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2524 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2525 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2526 list_add_tail(&saveq->list, &iocbq->list);
2527 found = 1;
2528 break;
2529 }
2530 }
2531 if (!found)
2532 list_add_tail(&saveq->clist,
2533 &pring->iocb_continue_saveq);
2534 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2535 list_del_init(&iocbq->clist);
2536 saveq = iocbq;
2537 irsp = &(saveq->iocb);
2538 } else
2539 return 0;
2540 }
2541 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2542 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2543 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2544 Rctl = FC_RCTL_ELS_REQ;
2545 Type = FC_TYPE_ELS;
9c2face6
JS
2546 } else {
2547 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2548 Rctl = w5p->hcsw.Rctl;
2549 Type = w5p->hcsw.Type;
2550
2551 /* Firmware Workaround */
2552 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2553 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2554 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2555 Rctl = FC_RCTL_ELS_REQ;
2556 Type = FC_TYPE_ELS;
9c2face6
JS
2557 w5p->hcsw.Rctl = Rctl;
2558 w5p->hcsw.Type = Type;
2559 }
2560 }
92d7f7b0 2561
3772a991 2562 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2563 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2564 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2565 "Type x%x received\n",
e8b62011 2566 pring->ringno, Rctl, Type);
3772a991 2567
92d7f7b0 2568 return 1;
dea3101e 2569}
2570
e59058c4 2571/**
3621a710 2572 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2573 * @phba: Pointer to HBA context object.
2574 * @pring: Pointer to driver SLI ring object.
2575 * @prspiocb: Pointer to response iocb object.
2576 *
2577 * This function looks up the iocb_lookup table to get the command iocb
2578 * corresponding to the given response iocb using the iotag of the
2579 * response iocb. This function is called with the hbalock held.
2580 * This function returns the command iocb object if it finds the command
2581 * iocb else returns NULL.
2582 **/
dea3101e 2583static struct lpfc_iocbq *
2e0fef85
JS
2584lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2585 struct lpfc_sli_ring *pring,
2586 struct lpfc_iocbq *prspiocb)
dea3101e 2587{
dea3101e 2588 struct lpfc_iocbq *cmd_iocb = NULL;
2589 uint16_t iotag;
2590
604a3e30
JB
2591 iotag = prspiocb->iocb.ulpIoTag;
2592
2593 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2594 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 2595 list_del_init(&cmd_iocb->list);
4f2e66c6 2596 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2a9bf3d0 2597 pring->txcmplq_cnt--;
4f2e66c6 2598 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 2599 }
604a3e30 2600 return cmd_iocb;
dea3101e 2601 }
2602
dea3101e 2603 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2604 "0317 iotag x%x is out off "
604a3e30 2605 "range: max iotag x%x wd0 x%x\n",
e8b62011 2606 iotag, phba->sli.last_iotag,
604a3e30 2607 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 2608 return NULL;
2609}
2610
3772a991
JS
2611/**
2612 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2613 * @phba: Pointer to HBA context object.
2614 * @pring: Pointer to driver SLI ring object.
2615 * @iotag: IOCB tag.
2616 *
2617 * This function looks up the iocb_lookup table to get the command iocb
2618 * corresponding to the given iotag. This function is called with the
2619 * hbalock held.
2620 * This function returns the command iocb object if it finds the command
2621 * iocb else returns NULL.
2622 **/
2623static struct lpfc_iocbq *
2624lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2625 struct lpfc_sli_ring *pring, uint16_t iotag)
2626{
2627 struct lpfc_iocbq *cmd_iocb;
2628
2629 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2630 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
2631 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2632 /* remove from txcmpl queue list */
2633 list_del_init(&cmd_iocb->list);
2634 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 2635 pring->txcmplq_cnt--;
4f2e66c6 2636 return cmd_iocb;
2a9bf3d0 2637 }
3772a991 2638 }
3772a991
JS
2639 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2640 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2641 iotag, phba->sli.last_iotag);
2642 return NULL;
2643}
2644
e59058c4 2645/**
3621a710 2646 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2647 * @phba: Pointer to HBA context object.
2648 * @pring: Pointer to driver SLI ring object.
2649 * @saveq: Pointer to the response iocb to be processed.
2650 *
2651 * This function is called by the ring event handler for non-fcp
2652 * rings when there is a new response iocb in the response ring.
2653 * The caller is not required to hold any locks. This function
2654 * gets the command iocb associated with the response iocb and
2655 * calls the completion handler for the command iocb. If there
2656 * is no completion handler, the function will free the resources
2657 * associated with command iocb. If the response iocb is for
2658 * an already aborted command iocb, the status of the completion
2659 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2660 * This function always returns 1.
2661 **/
dea3101e 2662static int
2e0fef85 2663lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 2664 struct lpfc_iocbq *saveq)
2665{
2e0fef85 2666 struct lpfc_iocbq *cmdiocbp;
dea3101e 2667 int rc = 1;
2668 unsigned long iflag;
2669
2670 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2671 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2672 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2673 spin_unlock_irqrestore(&phba->hbalock, iflag);
2674
dea3101e 2675 if (cmdiocbp) {
2676 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2677 /*
2678 * If an ELS command failed send an event to mgmt
2679 * application.
2680 */
2681 if (saveq->iocb.ulpStatus &&
2682 (pring->ringno == LPFC_ELS_RING) &&
2683 (cmdiocbp->iocb.ulpCommand ==
2684 CMD_ELS_REQUEST64_CR))
2685 lpfc_send_els_failure_event(phba,
2686 cmdiocbp, saveq);
2687
dea3101e 2688 /*
2689 * Post all ELS completions to the worker thread.
2690 * All other are passed to the completion callback.
2691 */
2692 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
2693 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2694 (cmdiocbp->iocb_flag &
2695 LPFC_DRIVER_ABORTED)) {
2696 spin_lock_irqsave(&phba->hbalock,
2697 iflag);
07951076
JS
2698 cmdiocbp->iocb_flag &=
2699 ~LPFC_DRIVER_ABORTED;
341af102
JS
2700 spin_unlock_irqrestore(&phba->hbalock,
2701 iflag);
07951076
JS
2702 saveq->iocb.ulpStatus =
2703 IOSTAT_LOCAL_REJECT;
2704 saveq->iocb.un.ulpWord[4] =
2705 IOERR_SLI_ABORTED;
0ff10d46
JS
2706
2707 /* Firmware could still be in progress
2708 * of DMAing payload, so don't free data
2709 * buffer till after a hbeat.
2710 */
341af102
JS
2711 spin_lock_irqsave(&phba->hbalock,
2712 iflag);
0ff10d46 2713 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
2714 spin_unlock_irqrestore(&phba->hbalock,
2715 iflag);
2716 }
0f65ff68
JS
2717 if (phba->sli_rev == LPFC_SLI_REV4) {
2718 if (saveq->iocb_flag &
2719 LPFC_EXCHANGE_BUSY) {
2720 /* Set cmdiocb flag for the
2721 * exchange busy so sgl (xri)
2722 * will not be released until
2723 * the abort xri is received
2724 * from hba.
2725 */
2726 spin_lock_irqsave(
2727 &phba->hbalock, iflag);
2728 cmdiocbp->iocb_flag |=
2729 LPFC_EXCHANGE_BUSY;
2730 spin_unlock_irqrestore(
2731 &phba->hbalock, iflag);
2732 }
2733 if (cmdiocbp->iocb_flag &
2734 LPFC_DRIVER_ABORTED) {
2735 /*
2736 * Clear LPFC_DRIVER_ABORTED
2737 * bit in case it was driver
2738 * initiated abort.
2739 */
2740 spin_lock_irqsave(
2741 &phba->hbalock, iflag);
2742 cmdiocbp->iocb_flag &=
2743 ~LPFC_DRIVER_ABORTED;
2744 spin_unlock_irqrestore(
2745 &phba->hbalock, iflag);
2746 cmdiocbp->iocb.ulpStatus =
2747 IOSTAT_LOCAL_REJECT;
2748 cmdiocbp->iocb.un.ulpWord[4] =
2749 IOERR_ABORT_REQUESTED;
2750 /*
2751 * For SLI4, irsiocb contains
2752 * NO_XRI in sli_xritag, it
2753 * shall not affect releasing
2754 * sgl (xri) process.
2755 */
2756 saveq->iocb.ulpStatus =
2757 IOSTAT_LOCAL_REJECT;
2758 saveq->iocb.un.ulpWord[4] =
2759 IOERR_SLI_ABORTED;
2760 spin_lock_irqsave(
2761 &phba->hbalock, iflag);
2762 saveq->iocb_flag |=
2763 LPFC_DELAY_MEM_FREE;
2764 spin_unlock_irqrestore(
2765 &phba->hbalock, iflag);
2766 }
07951076 2767 }
dea3101e 2768 }
2e0fef85 2769 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2770 } else
2771 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 2772 } else {
2773 /*
2774 * Unknown initiating command based on the response iotag.
2775 * This could be the case on the ELS ring because of
2776 * lpfc_els_abort().
2777 */
2778 if (pring->ringno != LPFC_ELS_RING) {
2779 /*
2780 * Ring <ringno> handler: unexpected completion IoTag
2781 * <IoTag>
2782 */
a257bf90 2783 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2784 "0322 Ring %d handler: "
2785 "unexpected completion IoTag x%x "
2786 "Data: x%x x%x x%x x%x\n",
2787 pring->ringno,
2788 saveq->iocb.ulpIoTag,
2789 saveq->iocb.ulpStatus,
2790 saveq->iocb.un.ulpWord[4],
2791 saveq->iocb.ulpCommand,
2792 saveq->iocb.ulpContext);
dea3101e 2793 }
2794 }
68876920 2795
dea3101e 2796 return rc;
2797}
2798
e59058c4 2799/**
3621a710 2800 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2801 * @phba: Pointer to HBA context object.
2802 * @pring: Pointer to driver SLI ring object.
2803 *
2804 * This function is called from the iocb ring event handlers when
2805 * put pointer is ahead of the get pointer for a ring. This function signal
2806 * an error attention condition to the worker thread and the worker
2807 * thread will transition the HBA to offline state.
2808 **/
2e0fef85
JS
2809static void
2810lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2811{
34b02dcd 2812 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2813 /*
025dfdaf 2814 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2815 * rsp ring <portRspMax>
2816 */
2817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2818 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2819 "is bigger than rsp ring %d\n",
e8b62011 2820 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 2821 pring->sli.sli3.numRiocb);
875fbdfe 2822
2e0fef85 2823 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2824
2825 /*
2826 * All error attention handlers are posted to
2827 * worker thread
2828 */
2829 phba->work_ha |= HA_ERATT;
2830 phba->work_hs = HS_FFER3;
92d7f7b0 2831
5e9d9b82 2832 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2833
2834 return;
2835}
2836
9399627f 2837/**
3621a710 2838 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2839 * @ptr: Pointer to address of HBA context object.
2840 *
2841 * This function is invoked by the Error Attention polling timer when the
2842 * timer times out. It will check the SLI Error Attention register for
2843 * possible attention events. If so, it will post an Error Attention event
2844 * and wake up worker thread to process it. Otherwise, it will set up the
2845 * Error Attention polling timer for the next poll.
2846 **/
2847void lpfc_poll_eratt(unsigned long ptr)
2848{
2849 struct lpfc_hba *phba;
aa6fbb75
JS
2850 uint32_t eratt = 0, rem;
2851 uint64_t sli_intr, cnt;
9399627f
JS
2852
2853 phba = (struct lpfc_hba *)ptr;
2854
aa6fbb75
JS
2855 /* Here we will also keep track of interrupts per sec of the hba */
2856 sli_intr = phba->sli.slistat.sli_intr;
2857
2858 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2859 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2860 sli_intr);
2861 else
2862 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2863
2864 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2865 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2866 phba->sli.slistat.sli_ips = cnt;
2867
2868 phba->sli.slistat.sli_prev_intr = sli_intr;
2869
9399627f
JS
2870 /* Check chip HA register for error event */
2871 eratt = lpfc_sli_check_eratt(phba);
2872
2873 if (eratt)
2874 /* Tell the worker thread there is work to do */
2875 lpfc_worker_wake_up(phba);
2876 else
2877 /* Restart the timer for next eratt poll */
2878 mod_timer(&phba->eratt_poll, jiffies +
2879 HZ * LPFC_ERATT_POLL_INTERVAL);
2880 return;
2881}
2882
875fbdfe 2883
e59058c4 2884/**
3621a710 2885 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
2886 * @phba: Pointer to HBA context object.
2887 * @pring: Pointer to driver SLI ring object.
2888 * @mask: Host attention register mask for this ring.
2889 *
2890 * This function is called from the interrupt context when there is a ring
2891 * event for the fcp ring. The caller does not hold any lock.
2892 * The function processes each response iocb in the response ring until it
25985edc 2893 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
2894 * LE bit set. The function will call the completion handler of the command iocb
2895 * if the response iocb indicates a completion for a command iocb or it is
2896 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2897 * function if this is an unsolicited iocb.
dea3101e 2898 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
2899 * to check it explicitly.
2900 */
2901int
2e0fef85
JS
2902lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2903 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2904{
34b02dcd 2905 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 2906 IOCB_t *irsp = NULL;
87f6eaff 2907 IOCB_t *entry = NULL;
dea3101e 2908 struct lpfc_iocbq *cmdiocbq = NULL;
2909 struct lpfc_iocbq rspiocbq;
dea3101e 2910 uint32_t status;
2911 uint32_t portRspPut, portRspMax;
2912 int rc = 1;
2913 lpfc_iocb_type type;
2914 unsigned long iflag;
2915 uint32_t rsp_cmpl = 0;
dea3101e 2916
2e0fef85 2917 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 2918 pring->stats.iocb_event++;
2919
dea3101e 2920 /*
2921 * The next available response entry should never exceed the maximum
2922 * entries. If it does, treat it as an adapter hardware error.
2923 */
7e56aa25 2924 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 2925 portRspPut = le32_to_cpu(pgp->rspPutInx);
2926 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 2927 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 2928 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 2929 return 1;
2930 }
45ed1190
JS
2931 if (phba->fcp_ring_in_use) {
2932 spin_unlock_irqrestore(&phba->hbalock, iflag);
2933 return 1;
2934 } else
2935 phba->fcp_ring_in_use = 1;
dea3101e 2936
2937 rmb();
7e56aa25 2938 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
2939 /*
2940 * Fetch an entry off the ring and copy it into a local data
2941 * structure. The copy involves a byte-swap since the
2942 * network byte order and pci byte orders are different.
2943 */
ed957684 2944 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 2945 phba->last_completion_time = jiffies;
875fbdfe 2946
7e56aa25
JS
2947 if (++pring->sli.sli3.rspidx >= portRspMax)
2948 pring->sli.sli3.rspidx = 0;
875fbdfe 2949
87f6eaff
JSEC
2950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2951 (uint32_t *) &rspiocbq.iocb,
ed957684 2952 phba->iocb_rsp_size);
a4bc3379 2953 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
2954 irsp = &rspiocbq.iocb;
2955
dea3101e 2956 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2957 pring->stats.iocb_rsp++;
2958 rsp_cmpl++;
2959
2960 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
2961 /*
2962 * If resource errors reported from HBA, reduce
2963 * queuedepths of the SCSI device.
2964 */
2965 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
2966 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2967 IOERR_NO_RESOURCES)) {
92d7f7b0 2968 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 2969 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
2970 spin_lock_irqsave(&phba->hbalock, iflag);
2971 }
2972
dea3101e 2973 /* Rsp ring <ringno> error: IOCB */
2974 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2975 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 2976 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 2977 pring->ringno,
92d7f7b0
JS
2978 irsp->un.ulpWord[0],
2979 irsp->un.ulpWord[1],
2980 irsp->un.ulpWord[2],
2981 irsp->un.ulpWord[3],
2982 irsp->un.ulpWord[4],
2983 irsp->un.ulpWord[5],
d7c255b2
JS
2984 *(uint32_t *)&irsp->un1,
2985 *((uint32_t *)&irsp->un1 + 1));
dea3101e 2986 }
2987
2988 switch (type) {
2989 case LPFC_ABORT_IOCB:
2990 case LPFC_SOL_IOCB:
2991 /*
2992 * Idle exchange closed via ABTS from port. No iocb
2993 * resources need to be recovered.
2994 */
2995 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 2996 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 2997 "0333 IOCB cmd 0x%x"
dca9479b 2998 " processed. Skipping"
92d7f7b0 2999 " completion\n",
dca9479b 3000 irsp->ulpCommand);
dea3101e 3001 break;
3002 }
3003
604a3e30
JB
3004 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3005 &rspiocbq);
0f65ff68
JS
3006 if (unlikely(!cmdiocbq))
3007 break;
3008 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3009 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3010 if (cmdiocbq->iocb_cmpl) {
3011 spin_unlock_irqrestore(&phba->hbalock, iflag);
3012 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3013 &rspiocbq);
3014 spin_lock_irqsave(&phba->hbalock, iflag);
3015 }
dea3101e 3016 break;
a4bc3379 3017 case LPFC_UNSOL_IOCB:
2e0fef85 3018 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3019 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3020 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3021 break;
dea3101e 3022 default:
3023 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3024 char adaptermsg[LPFC_MAX_ADPTMSG];
3025 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3026 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3027 MAX_MSG_DATA);
898eb71c
JP
3028 dev_warn(&((phba->pcidev)->dev),
3029 "lpfc%d: %s\n",
dea3101e 3030 phba->brd_no, adaptermsg);
3031 } else {
3032 /* Unknown IOCB command */
3033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3034 "0334 Unknown IOCB command "
92d7f7b0 3035 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3036 type, irsp->ulpCommand,
92d7f7b0
JS
3037 irsp->ulpStatus,
3038 irsp->ulpIoTag,
3039 irsp->ulpContext);
dea3101e 3040 }
3041 break;
3042 }
3043
3044 /*
3045 * The response IOCB has been processed. Update the ring
3046 * pointer in SLIM. If the port response put pointer has not
3047 * been updated, sync the pgp->rspPutInx and fetch the new port
3048 * response put pointer.
3049 */
7e56aa25
JS
3050 writel(pring->sli.sli3.rspidx,
3051 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3052
7e56aa25 3053 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3054 portRspPut = le32_to_cpu(pgp->rspPutInx);
3055 }
3056
3057 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3058 pring->stats.iocb_rsp_full++;
3059 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3060 writel(status, phba->CAregaddr);
3061 readl(phba->CAregaddr);
3062 }
3063 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3064 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3065 pring->stats.iocb_cmd_empty++;
3066
3067 /* Force update of the local copy of cmdGetInx */
7e56aa25 3068 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3069 lpfc_sli_resume_iocb(phba, pring);
3070
3071 if ((pring->lpfc_sli_cmd_available))
3072 (pring->lpfc_sli_cmd_available) (phba, pring);
3073
3074 }
3075
45ed1190 3076 phba->fcp_ring_in_use = 0;
2e0fef85 3077 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3078 return rc;
3079}
3080
e59058c4 3081/**
3772a991
JS
3082 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3083 * @phba: Pointer to HBA context object.
3084 * @pring: Pointer to driver SLI ring object.
3085 * @rspiocbp: Pointer to driver response IOCB object.
3086 *
3087 * This function is called from the worker thread when there is a slow-path
3088 * response IOCB to process. This function chains all the response iocbs until
3089 * seeing the iocb with the LE bit set. The function will call
3090 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3091 * completion of a command iocb. The function will call the
3092 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3093 * The function frees the resources or calls the completion handler if this
3094 * iocb is an abort completion. The function returns NULL when the response
3095 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3096 * this function shall chain the iocb on to the iocb_continueq and return the
3097 * response iocb passed in.
3098 **/
3099static struct lpfc_iocbq *
3100lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3101 struct lpfc_iocbq *rspiocbp)
3102{
3103 struct lpfc_iocbq *saveq;
3104 struct lpfc_iocbq *cmdiocbp;
3105 struct lpfc_iocbq *next_iocb;
3106 IOCB_t *irsp = NULL;
3107 uint32_t free_saveq;
3108 uint8_t iocb_cmd_type;
3109 lpfc_iocb_type type;
3110 unsigned long iflag;
3111 int rc;
3112
3113 spin_lock_irqsave(&phba->hbalock, iflag);
3114 /* First add the response iocb to the countinueq list */
3115 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3116 pring->iocb_continueq_cnt++;
3117
70f23fd6 3118 /* Now, determine whether the list is completed for processing */
3772a991
JS
3119 irsp = &rspiocbp->iocb;
3120 if (irsp->ulpLe) {
3121 /*
3122 * By default, the driver expects to free all resources
3123 * associated with this iocb completion.
3124 */
3125 free_saveq = 1;
3126 saveq = list_get_first(&pring->iocb_continueq,
3127 struct lpfc_iocbq, list);
3128 irsp = &(saveq->iocb);
3129 list_del_init(&pring->iocb_continueq);
3130 pring->iocb_continueq_cnt = 0;
3131
3132 pring->stats.iocb_rsp++;
3133
3134 /*
3135 * If resource errors reported from HBA, reduce
3136 * queuedepths of the SCSI device.
3137 */
3138 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3139 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3140 IOERR_NO_RESOURCES)) {
3772a991
JS
3141 spin_unlock_irqrestore(&phba->hbalock, iflag);
3142 phba->lpfc_rampdown_queue_depth(phba);
3143 spin_lock_irqsave(&phba->hbalock, iflag);
3144 }
3145
3146 if (irsp->ulpStatus) {
3147 /* Rsp ring <ringno> error: IOCB */
3148 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3149 "0328 Rsp Ring %d error: "
3150 "IOCB Data: "
3151 "x%x x%x x%x x%x "
3152 "x%x x%x x%x x%x "
3153 "x%x x%x x%x x%x "
3154 "x%x x%x x%x x%x\n",
3155 pring->ringno,
3156 irsp->un.ulpWord[0],
3157 irsp->un.ulpWord[1],
3158 irsp->un.ulpWord[2],
3159 irsp->un.ulpWord[3],
3160 irsp->un.ulpWord[4],
3161 irsp->un.ulpWord[5],
3162 *(((uint32_t *) irsp) + 6),
3163 *(((uint32_t *) irsp) + 7),
3164 *(((uint32_t *) irsp) + 8),
3165 *(((uint32_t *) irsp) + 9),
3166 *(((uint32_t *) irsp) + 10),
3167 *(((uint32_t *) irsp) + 11),
3168 *(((uint32_t *) irsp) + 12),
3169 *(((uint32_t *) irsp) + 13),
3170 *(((uint32_t *) irsp) + 14),
3171 *(((uint32_t *) irsp) + 15));
3172 }
3173
3174 /*
3175 * Fetch the IOCB command type and call the correct completion
3176 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3177 * get freed back to the lpfc_iocb_list by the discovery
3178 * kernel thread.
3179 */
3180 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3181 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3182 switch (type) {
3183 case LPFC_SOL_IOCB:
3184 spin_unlock_irqrestore(&phba->hbalock, iflag);
3185 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3186 spin_lock_irqsave(&phba->hbalock, iflag);
3187 break;
3188
3189 case LPFC_UNSOL_IOCB:
3190 spin_unlock_irqrestore(&phba->hbalock, iflag);
3191 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3192 spin_lock_irqsave(&phba->hbalock, iflag);
3193 if (!rc)
3194 free_saveq = 0;
3195 break;
3196
3197 case LPFC_ABORT_IOCB:
3198 cmdiocbp = NULL;
3199 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3200 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3201 saveq);
3202 if (cmdiocbp) {
3203 /* Call the specified completion routine */
3204 if (cmdiocbp->iocb_cmpl) {
3205 spin_unlock_irqrestore(&phba->hbalock,
3206 iflag);
3207 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3208 saveq);
3209 spin_lock_irqsave(&phba->hbalock,
3210 iflag);
3211 } else
3212 __lpfc_sli_release_iocbq(phba,
3213 cmdiocbp);
3214 }
3215 break;
3216
3217 case LPFC_UNKNOWN_IOCB:
3218 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3219 char adaptermsg[LPFC_MAX_ADPTMSG];
3220 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3221 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3222 MAX_MSG_DATA);
3223 dev_warn(&((phba->pcidev)->dev),
3224 "lpfc%d: %s\n",
3225 phba->brd_no, adaptermsg);
3226 } else {
3227 /* Unknown IOCB command */
3228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3229 "0335 Unknown IOCB "
3230 "command Data: x%x "
3231 "x%x x%x x%x\n",
3232 irsp->ulpCommand,
3233 irsp->ulpStatus,
3234 irsp->ulpIoTag,
3235 irsp->ulpContext);
3236 }
3237 break;
3238 }
3239
3240 if (free_saveq) {
3241 list_for_each_entry_safe(rspiocbp, next_iocb,
3242 &saveq->list, list) {
3243 list_del(&rspiocbp->list);
3244 __lpfc_sli_release_iocbq(phba, rspiocbp);
3245 }
3246 __lpfc_sli_release_iocbq(phba, saveq);
3247 }
3248 rspiocbp = NULL;
3249 }
3250 spin_unlock_irqrestore(&phba->hbalock, iflag);
3251 return rspiocbp;
3252}
3253
3254/**
3255 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3256 * @phba: Pointer to HBA context object.
3257 * @pring: Pointer to driver SLI ring object.
3258 * @mask: Host attention register mask for this ring.
3259 *
3772a991
JS
3260 * This routine wraps the actual slow_ring event process routine from the
3261 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3262 **/
3772a991 3263void
2e0fef85
JS
3264lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3265 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3266{
3267 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3268}
3269
3270/**
3271 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3272 * @phba: Pointer to HBA context object.
3273 * @pring: Pointer to driver SLI ring object.
3274 * @mask: Host attention register mask for this ring.
3275 *
3276 * This function is called from the worker thread when there is a ring event
3277 * for non-fcp rings. The caller does not hold any lock. The function will
3278 * remove each response iocb in the response ring and calls the handle
3279 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3280 **/
3281static void
3282lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3283 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3284{
34b02dcd 3285 struct lpfc_pgp *pgp;
dea3101e 3286 IOCB_t *entry;
3287 IOCB_t *irsp = NULL;
3288 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3289 uint32_t portRspPut, portRspMax;
dea3101e 3290 unsigned long iflag;
3772a991 3291 uint32_t status;
dea3101e 3292
34b02dcd 3293 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3294 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3295 pring->stats.iocb_event++;
3296
dea3101e 3297 /*
3298 * The next available response entry should never exceed the maximum
3299 * entries. If it does, treat it as an adapter hardware error.
3300 */
7e56aa25 3301 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3302 portRspPut = le32_to_cpu(pgp->rspPutInx);
3303 if (portRspPut >= portRspMax) {
3304 /*
025dfdaf 3305 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3306 * rsp ring <portRspMax>
3307 */
ed957684 3308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3309 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3310 "is bigger than rsp ring %d\n",
e8b62011 3311 pring->ringno, portRspPut, portRspMax);
dea3101e 3312
2e0fef85
JS
3313 phba->link_state = LPFC_HBA_ERROR;
3314 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3315
3316 phba->work_hs = HS_FFER3;
3317 lpfc_handle_eratt(phba);
3318
3772a991 3319 return;
dea3101e 3320 }
3321
3322 rmb();
7e56aa25 3323 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3324 /*
3325 * Build a completion list and call the appropriate handler.
3326 * The process is to get the next available response iocb, get
3327 * a free iocb from the list, copy the response data into the
3328 * free iocb, insert to the continuation list, and update the
3329 * next response index to slim. This process makes response
3330 * iocb's in the ring available to DMA as fast as possible but
3331 * pays a penalty for a copy operation. Since the iocb is
3332 * only 32 bytes, this penalty is considered small relative to
3333 * the PCI reads for register values and a slim write. When
3334 * the ulpLe field is set, the entire Command has been
3335 * received.
3336 */
ed957684
JS
3337 entry = lpfc_resp_iocb(phba, pring);
3338
858c9f6c 3339 phba->last_completion_time = jiffies;
2e0fef85 3340 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3341 if (rspiocbp == NULL) {
3342 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3343 "completion.\n", __func__);
dea3101e 3344 break;
3345 }
3346
ed957684
JS
3347 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3348 phba->iocb_rsp_size);
dea3101e 3349 irsp = &rspiocbp->iocb;
3350
7e56aa25
JS
3351 if (++pring->sli.sli3.rspidx >= portRspMax)
3352 pring->sli.sli3.rspidx = 0;
dea3101e 3353
a58cbd52
JS
3354 if (pring->ringno == LPFC_ELS_RING) {
3355 lpfc_debugfs_slow_ring_trc(phba,
3356 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3357 *(((uint32_t *) irsp) + 4),
3358 *(((uint32_t *) irsp) + 6),
3359 *(((uint32_t *) irsp) + 7));
3360 }
3361
7e56aa25
JS
3362 writel(pring->sli.sli3.rspidx,
3363 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3364
3772a991
JS
3365 spin_unlock_irqrestore(&phba->hbalock, iflag);
3366 /* Handle the response IOCB */
3367 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3368 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3369
3370 /*
3371 * If the port response put pointer has not been updated, sync
3372 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3373 * response put pointer.
3374 */
7e56aa25 3375 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3376 portRspPut = le32_to_cpu(pgp->rspPutInx);
3377 }
7e56aa25 3378 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3379
92d7f7b0 3380 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3381 /* At least one response entry has been freed */
3382 pring->stats.iocb_rsp_full++;
3383 /* SET RxRE_RSP in Chip Att register */
3384 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3385 writel(status, phba->CAregaddr);
3386 readl(phba->CAregaddr); /* flush */
3387 }
3388 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3389 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3390 pring->stats.iocb_cmd_empty++;
3391
3392 /* Force update of the local copy of cmdGetInx */
7e56aa25 3393 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3394 lpfc_sli_resume_iocb(phba, pring);
3395
3396 if ((pring->lpfc_sli_cmd_available))
3397 (pring->lpfc_sli_cmd_available) (phba, pring);
3398
3399 }
3400
2e0fef85 3401 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3402 return;
dea3101e 3403}
3404
4f774513
JS
3405/**
3406 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3407 * @phba: Pointer to HBA context object.
3408 * @pring: Pointer to driver SLI ring object.
3409 * @mask: Host attention register mask for this ring.
3410 *
3411 * This function is called from the worker thread when there is a pending
3412 * ELS response iocb on the driver internal slow-path response iocb worker
3413 * queue. The caller does not hold any lock. The function will remove each
3414 * response iocb from the response worker queue and calls the handle
3415 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3416 **/
3417static void
3418lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3419 struct lpfc_sli_ring *pring, uint32_t mask)
3420{
3421 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3422 struct hbq_dmabuf *dmabuf;
3423 struct lpfc_cq_event *cq_event;
4f774513
JS
3424 unsigned long iflag;
3425
45ed1190
JS
3426 spin_lock_irqsave(&phba->hbalock, iflag);
3427 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3428 spin_unlock_irqrestore(&phba->hbalock, iflag);
3429 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3430 /* Get the response iocb from the head of work queue */
3431 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3432 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3433 cq_event, struct lpfc_cq_event, list);
4f774513 3434 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3435
3436 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3437 case CQE_CODE_COMPL_WQE:
3438 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3439 cq_event);
45ed1190
JS
3440 /* Translate ELS WCQE to response IOCBQ */
3441 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3442 irspiocbq);
3443 if (irspiocbq)
3444 lpfc_sli_sp_handle_rspiocb(phba, pring,
3445 irspiocbq);
4d9ab994
JS
3446 break;
3447 case CQE_CODE_RECEIVE:
7851fe2c 3448 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3449 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3450 cq_event);
3451 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3452 break;
3453 default:
3454 break;
3455 }
4f774513
JS
3456 }
3457}
3458
e59058c4 3459/**
3621a710 3460 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3461 * @phba: Pointer to HBA context object.
3462 * @pring: Pointer to driver SLI ring object.
3463 *
3464 * This function aborts all iocbs in the given ring and frees all the iocb
3465 * objects in txq. This function issues an abort iocb for all the iocb commands
3466 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3467 * the return of this function. The caller is not required to hold any locks.
3468 **/
2e0fef85 3469void
dea3101e 3470lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3471{
2534ba75 3472 LIST_HEAD(completions);
dea3101e 3473 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3474
92d7f7b0
JS
3475 if (pring->ringno == LPFC_ELS_RING) {
3476 lpfc_fabric_abort_hba(phba);
3477 }
3478
dea3101e 3479 /* Error everything on txq and txcmplq
3480 * First do the txq.
3481 */
2e0fef85 3482 spin_lock_irq(&phba->hbalock);
2534ba75 3483 list_splice_init(&pring->txq, &completions);
dea3101e 3484 pring->txq_cnt = 0;
dea3101e 3485
3486 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
3487 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3488 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 3489
2e0fef85 3490 spin_unlock_irq(&phba->hbalock);
dea3101e 3491
a257bf90
JS
3492 /* Cancel all the IOCBs from the completions list */
3493 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3494 IOERR_SLI_ABORTED);
dea3101e 3495}
3496
a8e497d5 3497/**
3621a710 3498 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3499 * @phba: Pointer to HBA context object.
3500 *
3501 * This function flushes all iocbs in the fcp ring and frees all the iocb
3502 * objects in txq and txcmplq. This function will not issue abort iocbs
3503 * for all the iocb commands in txcmplq, they will just be returned with
3504 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3505 * slot has been permanently disabled.
3506 **/
3507void
3508lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3509{
3510 LIST_HEAD(txq);
3511 LIST_HEAD(txcmplq);
a8e497d5
JS
3512 struct lpfc_sli *psli = &phba->sli;
3513 struct lpfc_sli_ring *pring;
3514
3515 /* Currently, only one fcp ring */
3516 pring = &psli->ring[psli->fcp_ring];
3517
3518 spin_lock_irq(&phba->hbalock);
3519 /* Retrieve everything on txq */
3520 list_splice_init(&pring->txq, &txq);
3521 pring->txq_cnt = 0;
3522
3523 /* Retrieve everything on the txcmplq */
3524 list_splice_init(&pring->txcmplq, &txcmplq);
3525 pring->txcmplq_cnt = 0;
4f2e66c6
JS
3526
3527 /* Indicate the I/O queues are flushed */
3528 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
3529 spin_unlock_irq(&phba->hbalock);
3530
3531 /* Flush the txq */
a257bf90
JS
3532 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3533 IOERR_SLI_DOWN);
a8e497d5
JS
3534
3535 /* Flush the txcmpq */
a257bf90
JS
3536 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3537 IOERR_SLI_DOWN);
a8e497d5
JS
3538}
3539
e59058c4 3540/**
3772a991 3541 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
3542 * @phba: Pointer to HBA context object.
3543 * @mask: Bit mask to be checked.
3544 *
3545 * This function reads the host status register and compares
3546 * with the provided bit mask to check if HBA completed
3547 * the restart. This function will wait in a loop for the
3548 * HBA to complete restart. If the HBA does not restart within
3549 * 15 iterations, the function will reset the HBA again. The
3550 * function returns 1 when HBA fail to restart otherwise returns
3551 * zero.
3552 **/
3772a991
JS
3553static int
3554lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3555{
41415862
JW
3556 uint32_t status;
3557 int i = 0;
3558 int retval = 0;
dea3101e 3559
41415862 3560 /* Read the HBA Host Status Register */
9940b97b
JS
3561 if (lpfc_readl(phba->HSregaddr, &status))
3562 return 1;
dea3101e 3563
41415862
JW
3564 /*
3565 * Check status register every 100ms for 5 retries, then every
3566 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3567 * every 2.5 sec for 4.
3568 * Break our of the loop if errors occurred during init.
3569 */
3570 while (((status & mask) != mask) &&
3571 !(status & HS_FFERM) &&
3572 i++ < 20) {
dea3101e 3573
41415862
JW
3574 if (i <= 5)
3575 msleep(10);
3576 else if (i <= 10)
3577 msleep(500);
3578 else
3579 msleep(2500);
dea3101e 3580
41415862 3581 if (i == 15) {
2e0fef85 3582 /* Do post */
92d7f7b0 3583 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3584 lpfc_sli_brdrestart(phba);
3585 }
3586 /* Read the HBA Host Status Register */
9940b97b
JS
3587 if (lpfc_readl(phba->HSregaddr, &status)) {
3588 retval = 1;
3589 break;
3590 }
41415862 3591 }
dea3101e 3592
41415862
JW
3593 /* Check to see if any errors occurred during init */
3594 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
3595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3596 "2751 Adapter failed to restart, "
3597 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3598 status,
3599 readl(phba->MBslimaddr + 0xa8),
3600 readl(phba->MBslimaddr + 0xac));
2e0fef85 3601 phba->link_state = LPFC_HBA_ERROR;
41415862 3602 retval = 1;
dea3101e 3603 }
dea3101e 3604
41415862
JW
3605 return retval;
3606}
dea3101e 3607
da0436e9
JS
3608/**
3609 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3610 * @phba: Pointer to HBA context object.
3611 * @mask: Bit mask to be checked.
3612 *
3613 * This function checks the host status register to check if HBA is
3614 * ready. This function will wait in a loop for the HBA to be ready
3615 * If the HBA is not ready , the function will will reset the HBA PCI
3616 * function again. The function returns 1 when HBA fail to be ready
3617 * otherwise returns zero.
3618 **/
3619static int
3620lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3621{
3622 uint32_t status;
3623 int retval = 0;
3624
3625 /* Read the HBA Host Status Register */
3626 status = lpfc_sli4_post_status_check(phba);
3627
3628 if (status) {
3629 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3630 lpfc_sli_brdrestart(phba);
3631 status = lpfc_sli4_post_status_check(phba);
3632 }
3633
3634 /* Check to see if any errors occurred during init */
3635 if (status) {
3636 phba->link_state = LPFC_HBA_ERROR;
3637 retval = 1;
3638 } else
3639 phba->sli4_hba.intr_enable = 0;
3640
3641 return retval;
3642}
3643
3644/**
3645 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3646 * @phba: Pointer to HBA context object.
3647 * @mask: Bit mask to be checked.
3648 *
3649 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3650 * from the API jump table function pointer from the lpfc_hba struct.
3651 **/
3652int
3653lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3654{
3655 return phba->lpfc_sli_brdready(phba, mask);
3656}
3657
9290831f
JS
3658#define BARRIER_TEST_PATTERN (0xdeadbeef)
3659
e59058c4 3660/**
3621a710 3661 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3662 * @phba: Pointer to HBA context object.
3663 *
1b51197d
JS
3664 * This function is called before resetting an HBA. This function is called
3665 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 3666 **/
2e0fef85 3667void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3668{
65a29c16
JS
3669 uint32_t __iomem *resp_buf;
3670 uint32_t __iomem *mbox_buf;
9290831f 3671 volatile uint32_t mbox;
9940b97b 3672 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
3673 int i;
3674 uint8_t hdrtype;
3675
3676 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3677 if (hdrtype != 0x80 ||
3678 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3679 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3680 return;
3681
3682 /*
3683 * Tell the other part of the chip to suspend temporarily all
3684 * its DMA activity.
3685 */
65a29c16 3686 resp_buf = phba->MBslimaddr;
9290831f
JS
3687
3688 /* Disable the error attention */
9940b97b
JS
3689 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3690 return;
9290831f
JS
3691 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3692 readl(phba->HCregaddr); /* flush */
2e0fef85 3693 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 3694
9940b97b
JS
3695 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3696 return;
3697 if (ha_copy & HA_ERATT) {
9290831f
JS
3698 /* Clear Chip error bit */
3699 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3700 phba->pport->stopped = 1;
9290831f
JS
3701 }
3702
3703 mbox = 0;
3704 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3705 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3706
3707 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 3708 mbox_buf = phba->MBslimaddr;
9290831f
JS
3709 writel(mbox, mbox_buf);
3710
9940b97b
JS
3711 for (i = 0; i < 50; i++) {
3712 if (lpfc_readl((resp_buf + 1), &resp_data))
3713 return;
3714 if (resp_data != ~(BARRIER_TEST_PATTERN))
3715 mdelay(1);
3716 else
3717 break;
3718 }
3719 resp_data = 0;
3720 if (lpfc_readl((resp_buf + 1), &resp_data))
3721 return;
3722 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 3723 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 3724 phba->pport->stopped)
9290831f
JS
3725 goto restore_hc;
3726 else
3727 goto clear_errat;
3728 }
3729
3730 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
3731 resp_data = 0;
3732 for (i = 0; i < 500; i++) {
3733 if (lpfc_readl(resp_buf, &resp_data))
3734 return;
3735 if (resp_data != mbox)
3736 mdelay(1);
3737 else
3738 break;
3739 }
9290831f
JS
3740
3741clear_errat:
3742
9940b97b
JS
3743 while (++i < 500) {
3744 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3745 return;
3746 if (!(ha_copy & HA_ERATT))
3747 mdelay(1);
3748 else
3749 break;
3750 }
9290831f
JS
3751
3752 if (readl(phba->HAregaddr) & HA_ERATT) {
3753 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3754 phba->pport->stopped = 1;
9290831f
JS
3755 }
3756
3757restore_hc:
2e0fef85 3758 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
3759 writel(hc_copy, phba->HCregaddr);
3760 readl(phba->HCregaddr); /* flush */
3761}
3762
e59058c4 3763/**
3621a710 3764 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
3765 * @phba: Pointer to HBA context object.
3766 *
3767 * This function issues a kill_board mailbox command and waits for
3768 * the error attention interrupt. This function is called for stopping
3769 * the firmware processing. The caller is not required to hold any
3770 * locks. This function calls lpfc_hba_down_post function to free
3771 * any pending commands after the kill. The function will return 1 when it
3772 * fails to kill the board else will return 0.
3773 **/
41415862 3774int
2e0fef85 3775lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
3776{
3777 struct lpfc_sli *psli;
3778 LPFC_MBOXQ_t *pmb;
3779 uint32_t status;
3780 uint32_t ha_copy;
3781 int retval;
3782 int i = 0;
dea3101e 3783
41415862 3784 psli = &phba->sli;
dea3101e 3785
41415862 3786 /* Kill HBA */
ed957684 3787 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3788 "0329 Kill HBA Data: x%x x%x\n",
3789 phba->pport->port_state, psli->sli_flag);
41415862 3790
98c9ea5c
JS
3791 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3792 if (!pmb)
41415862 3793 return 1;
41415862
JW
3794
3795 /* Disable the error attention */
2e0fef85 3796 spin_lock_irq(&phba->hbalock);
9940b97b
JS
3797 if (lpfc_readl(phba->HCregaddr, &status)) {
3798 spin_unlock_irq(&phba->hbalock);
3799 mempool_free(pmb, phba->mbox_mem_pool);
3800 return 1;
3801 }
41415862
JW
3802 status &= ~HC_ERINT_ENA;
3803 writel(status, phba->HCregaddr);
3804 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
3805 phba->link_flag |= LS_IGNORE_ERATT;
3806 spin_unlock_irq(&phba->hbalock);
41415862
JW
3807
3808 lpfc_kill_board(phba, pmb);
3809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3810 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3811
3812 if (retval != MBX_SUCCESS) {
3813 if (retval != MBX_BUSY)
3814 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816 "2752 KILL_BOARD command failed retval %d\n",
3817 retval);
2e0fef85
JS
3818 spin_lock_irq(&phba->hbalock);
3819 phba->link_flag &= ~LS_IGNORE_ERATT;
3820 spin_unlock_irq(&phba->hbalock);
41415862
JW
3821 return 1;
3822 }
3823
f4b4c68f
JS
3824 spin_lock_irq(&phba->hbalock);
3825 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3826 spin_unlock_irq(&phba->hbalock);
9290831f 3827
41415862
JW
3828 mempool_free(pmb, phba->mbox_mem_pool);
3829
3830 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3831 * attention every 100ms for 3 seconds. If we don't get ERATT after
3832 * 3 seconds we still set HBA_ERROR state because the status of the
3833 * board is now undefined.
3834 */
9940b97b
JS
3835 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3836 return 1;
41415862
JW
3837 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3838 mdelay(100);
9940b97b
JS
3839 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3840 return 1;
41415862
JW
3841 }
3842
3843 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
3844 if (ha_copy & HA_ERATT) {
3845 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3846 phba->pport->stopped = 1;
9290831f 3847 }
2e0fef85 3848 spin_lock_irq(&phba->hbalock);
41415862 3849 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 3850 psli->mbox_active = NULL;
2e0fef85
JS
3851 phba->link_flag &= ~LS_IGNORE_ERATT;
3852 spin_unlock_irq(&phba->hbalock);
41415862 3853
41415862 3854 lpfc_hba_down_post(phba);
2e0fef85 3855 phba->link_state = LPFC_HBA_ERROR;
41415862 3856
2e0fef85 3857 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 3858}
3859
e59058c4 3860/**
3772a991 3861 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
3862 * @phba: Pointer to HBA context object.
3863 *
3864 * This function resets the HBA by writing HC_INITFF to the control
3865 * register. After the HBA resets, this function resets all the iocb ring
3866 * indices. This function disables PCI layer parity checking during
3867 * the reset.
3868 * This function returns 0 always.
3869 * The caller is not required to hold any locks.
3870 **/
41415862 3871int
2e0fef85 3872lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 3873{
41415862 3874 struct lpfc_sli *psli;
dea3101e 3875 struct lpfc_sli_ring *pring;
41415862 3876 uint16_t cfg_value;
dea3101e 3877 int i;
dea3101e 3878
41415862 3879 psli = &phba->sli;
dea3101e 3880
41415862
JW
3881 /* Reset HBA */
3882 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3883 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 3884 phba->pport->port_state, psli->sli_flag);
dea3101e 3885
3886 /* perform board reset */
3887 phba->fc_eventTag = 0;
4d9ab994 3888 phba->link_events = 0;
2e0fef85
JS
3889 phba->pport->fc_myDID = 0;
3890 phba->pport->fc_prevDID = 0;
dea3101e 3891
41415862
JW
3892 /* Turn off parity checking and serr during the physical reset */
3893 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3894 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3895 (cfg_value &
3896 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3897
3772a991
JS
3898 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3899
41415862
JW
3900 /* Now toggle INITFF bit in the Host Control Register */
3901 writel(HC_INITFF, phba->HCregaddr);
3902 mdelay(1);
3903 readl(phba->HCregaddr); /* flush */
3904 writel(0, phba->HCregaddr);
3905 readl(phba->HCregaddr); /* flush */
3906
3907 /* Restore PCI cmd register */
3908 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 3909
3910 /* Initialize relevant SLI info */
41415862
JW
3911 for (i = 0; i < psli->num_rings; i++) {
3912 pring = &psli->ring[i];
dea3101e 3913 pring->flag = 0;
7e56aa25
JS
3914 pring->sli.sli3.rspidx = 0;
3915 pring->sli.sli3.next_cmdidx = 0;
3916 pring->sli.sli3.local_getidx = 0;
3917 pring->sli.sli3.cmdidx = 0;
dea3101e 3918 pring->missbufcnt = 0;
3919 }
dea3101e 3920
2e0fef85 3921 phba->link_state = LPFC_WARM_START;
41415862
JW
3922 return 0;
3923}
3924
e59058c4 3925/**
da0436e9
JS
3926 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3927 * @phba: Pointer to HBA context object.
3928 *
3929 * This function resets a SLI4 HBA. This function disables PCI layer parity
3930 * checking during resets the device. The caller is not required to hold
3931 * any locks.
3932 *
3933 * This function returns 0 always.
3934 **/
3935int
3936lpfc_sli4_brdreset(struct lpfc_hba *phba)
3937{
3938 struct lpfc_sli *psli = &phba->sli;
3939 uint16_t cfg_value;
27b01b82 3940 int rc;
da0436e9
JS
3941
3942 /* Reset HBA */
3943 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3944 "0295 Reset HBA Data: x%x x%x\n",
3945 phba->pport->port_state, psli->sli_flag);
3946
3947 /* perform board reset */
3948 phba->fc_eventTag = 0;
4d9ab994 3949 phba->link_events = 0;
da0436e9
JS
3950 phba->pport->fc_myDID = 0;
3951 phba->pport->fc_prevDID = 0;
3952
da0436e9
JS
3953 spin_lock_irq(&phba->hbalock);
3954 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3955 phba->fcf.fcf_flag = 0;
da0436e9
JS
3956 spin_unlock_irq(&phba->hbalock);
3957
3958 /* Now physically reset the device */
3959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3960 "0389 Performing PCI function reset!\n");
be858b65
JS
3961
3962 /* Turn off parity checking and serr during the physical reset */
3963 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3964 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3965 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3966
da0436e9 3967 /* Perform FCoE PCI function reset */
2e90f4b5 3968 lpfc_sli4_queue_destroy(phba);
27b01b82 3969 rc = lpfc_pci_function_reset(phba);
da0436e9 3970
be858b65
JS
3971 /* Restore PCI cmd register */
3972 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3973
27b01b82 3974 return rc;
da0436e9
JS
3975}
3976
3977/**
3978 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
3979 * @phba: Pointer to HBA context object.
3980 *
3981 * This function is called in the SLI initialization code path to
3982 * restart the HBA. The caller is not required to hold any lock.
3983 * This function writes MBX_RESTART mailbox command to the SLIM and
3984 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3985 * function to free any pending commands. The function enables
3986 * POST only during the first initialization. The function returns zero.
3987 * The function does not guarantee completion of MBX_RESTART mailbox
3988 * command before the return of this function.
3989 **/
da0436e9
JS
3990static int
3991lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
3992{
3993 MAILBOX_t *mb;
3994 struct lpfc_sli *psli;
41415862
JW
3995 volatile uint32_t word0;
3996 void __iomem *to_slim;
0d878419 3997 uint32_t hba_aer_enabled;
41415862 3998
2e0fef85 3999 spin_lock_irq(&phba->hbalock);
41415862 4000
0d878419
JS
4001 /* Take PCIe device Advanced Error Reporting (AER) state */
4002 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4003
41415862
JW
4004 psli = &phba->sli;
4005
4006 /* Restart HBA */
4007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4008 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 4009 phba->pport->port_state, psli->sli_flag);
41415862
JW
4010
4011 word0 = 0;
4012 mb = (MAILBOX_t *) &word0;
4013 mb->mbxCommand = MBX_RESTART;
4014 mb->mbxHc = 1;
4015
9290831f
JS
4016 lpfc_reset_barrier(phba);
4017
41415862
JW
4018 to_slim = phba->MBslimaddr;
4019 writel(*(uint32_t *) mb, to_slim);
4020 readl(to_slim); /* flush */
4021
4022 /* Only skip post after fc_ffinit is completed */
eaf15d5b 4023 if (phba->pport->port_state)
41415862 4024 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4025 else
41415862 4026 word0 = 0; /* This is really setting up word1 */
65a29c16 4027 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4028 writel(*(uint32_t *) mb, to_slim);
4029 readl(to_slim); /* flush */
dea3101e 4030
41415862 4031 lpfc_sli_brdreset(phba);
2e0fef85
JS
4032 phba->pport->stopped = 0;
4033 phba->link_state = LPFC_INIT_START;
da0436e9 4034 phba->hba_flag = 0;
2e0fef85 4035 spin_unlock_irq(&phba->hbalock);
41415862 4036
64ba8818
JS
4037 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4038 psli->stats_start = get_seconds();
4039
eaf15d5b
JS
4040 /* Give the INITFF and Post time to settle. */
4041 mdelay(100);
41415862 4042
0d878419
JS
4043 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4044 if (hba_aer_enabled)
4045 pci_disable_pcie_error_reporting(phba->pcidev);
4046
41415862 4047 lpfc_hba_down_post(phba);
dea3101e 4048
4049 return 0;
4050}
4051
da0436e9
JS
4052/**
4053 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4054 * @phba: Pointer to HBA context object.
4055 *
4056 * This function is called in the SLI initialization code path to restart
4057 * a SLI4 HBA. The caller is not required to hold any lock.
4058 * At the end of the function, it calls lpfc_hba_down_post function to
4059 * free any pending commands.
4060 **/
4061static int
4062lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4063{
4064 struct lpfc_sli *psli = &phba->sli;
75baf696 4065 uint32_t hba_aer_enabled;
27b01b82 4066 int rc;
da0436e9
JS
4067
4068 /* Restart HBA */
4069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4070 "0296 Restart HBA Data: x%x x%x\n",
4071 phba->pport->port_state, psli->sli_flag);
4072
75baf696
JS
4073 /* Take PCIe device Advanced Error Reporting (AER) state */
4074 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4075
27b01b82 4076 rc = lpfc_sli4_brdreset(phba);
da0436e9
JS
4077
4078 spin_lock_irq(&phba->hbalock);
4079 phba->pport->stopped = 0;
4080 phba->link_state = LPFC_INIT_START;
4081 phba->hba_flag = 0;
4082 spin_unlock_irq(&phba->hbalock);
4083
4084 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4085 psli->stats_start = get_seconds();
4086
75baf696
JS
4087 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4088 if (hba_aer_enabled)
4089 pci_disable_pcie_error_reporting(phba->pcidev);
4090
da0436e9
JS
4091 lpfc_hba_down_post(phba);
4092
27b01b82 4093 return rc;
da0436e9
JS
4094}
4095
4096/**
4097 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4098 * @phba: Pointer to HBA context object.
4099 *
4100 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4101 * API jump table function pointer from the lpfc_hba struct.
4102**/
4103int
4104lpfc_sli_brdrestart(struct lpfc_hba *phba)
4105{
4106 return phba->lpfc_sli_brdrestart(phba);
4107}
4108
e59058c4 4109/**
3621a710 4110 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4111 * @phba: Pointer to HBA context object.
4112 *
4113 * This function is called after a HBA restart to wait for successful
4114 * restart of the HBA. Successful restart of the HBA is indicated by
4115 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4116 * iteration, the function will restart the HBA again. The function returns
4117 * zero if HBA successfully restarted else returns negative error code.
4118 **/
dea3101e 4119static int
4120lpfc_sli_chipset_init(struct lpfc_hba *phba)
4121{
4122 uint32_t status, i = 0;
4123
4124 /* Read the HBA Host Status Register */
9940b97b
JS
4125 if (lpfc_readl(phba->HSregaddr, &status))
4126 return -EIO;
dea3101e 4127
4128 /* Check status register to see what current state is */
4129 i = 0;
4130 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4131
dcf2a4e0
JS
4132 /* Check every 10ms for 10 retries, then every 100ms for 90
4133 * retries, then every 1 sec for 50 retires for a total of
4134 * ~60 seconds before reset the board again and check every
4135 * 1 sec for 50 retries. The up to 60 seconds before the
4136 * board ready is required by the Falcon FIPS zeroization
4137 * complete, and any reset the board in between shall cause
4138 * restart of zeroization, further delay the board ready.
dea3101e 4139 */
dcf2a4e0 4140 if (i++ >= 200) {
dea3101e 4141 /* Adapter failed to init, timeout, status reg
4142 <status> */
ed957684 4143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4144 "0436 Adapter failed to init, "
09372820
JS
4145 "timeout, status reg x%x, "
4146 "FW Data: A8 x%x AC x%x\n", status,
4147 readl(phba->MBslimaddr + 0xa8),
4148 readl(phba->MBslimaddr + 0xac));
2e0fef85 4149 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4150 return -ETIMEDOUT;
4151 }
4152
4153 /* Check to see if any errors occurred during init */
4154 if (status & HS_FFERM) {
4155 /* ERROR: During chipset initialization */
4156 /* Adapter failed to init, chipset, status reg
4157 <status> */
ed957684 4158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4159 "0437 Adapter failed to init, "
09372820
JS
4160 "chipset, status reg x%x, "
4161 "FW Data: A8 x%x AC x%x\n", status,
4162 readl(phba->MBslimaddr + 0xa8),
4163 readl(phba->MBslimaddr + 0xac));
2e0fef85 4164 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4165 return -EIO;
4166 }
4167
dcf2a4e0 4168 if (i <= 10)
dea3101e 4169 msleep(10);
dcf2a4e0
JS
4170 else if (i <= 100)
4171 msleep(100);
4172 else
4173 msleep(1000);
dea3101e 4174
dcf2a4e0
JS
4175 if (i == 150) {
4176 /* Do post */
92d7f7b0 4177 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4178 lpfc_sli_brdrestart(phba);
dea3101e 4179 }
4180 /* Read the HBA Host Status Register */
9940b97b
JS
4181 if (lpfc_readl(phba->HSregaddr, &status))
4182 return -EIO;
dea3101e 4183 }
4184
4185 /* Check to see if any errors occurred during init */
4186 if (status & HS_FFERM) {
4187 /* ERROR: During chipset initialization */
4188 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4190 "0438 Adapter failed to init, chipset, "
09372820
JS
4191 "status reg x%x, "
4192 "FW Data: A8 x%x AC x%x\n", status,
4193 readl(phba->MBslimaddr + 0xa8),
4194 readl(phba->MBslimaddr + 0xac));
2e0fef85 4195 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4196 return -EIO;
4197 }
4198
4199 /* Clear all interrupt enable conditions */
4200 writel(0, phba->HCregaddr);
4201 readl(phba->HCregaddr); /* flush */
4202
4203 /* setup host attn register */
4204 writel(0xffffffff, phba->HAregaddr);
4205 readl(phba->HAregaddr); /* flush */
4206 return 0;
4207}
4208
e59058c4 4209/**
3621a710 4210 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4211 *
4212 * This function calculates and returns the number of HBQs required to be
4213 * configured.
4214 **/
78b2d852 4215int
ed957684
JS
4216lpfc_sli_hbq_count(void)
4217{
92d7f7b0 4218 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4219}
4220
e59058c4 4221/**
3621a710 4222 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4223 *
4224 * This function adds the number of hbq entries in every HBQ to get
4225 * the total number of hbq entries required for the HBA and returns
4226 * the total count.
4227 **/
ed957684
JS
4228static int
4229lpfc_sli_hbq_entry_count(void)
4230{
4231 int hbq_count = lpfc_sli_hbq_count();
4232 int count = 0;
4233 int i;
4234
4235 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4236 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4237 return count;
4238}
4239
e59058c4 4240/**
3621a710 4241 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4242 *
4243 * This function calculates amount of memory required for all hbq entries
4244 * to be configured and returns the total memory required.
4245 **/
dea3101e 4246int
ed957684
JS
4247lpfc_sli_hbq_size(void)
4248{
4249 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4250}
4251
e59058c4 4252/**
3621a710 4253 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4254 * @phba: Pointer to HBA context object.
4255 *
4256 * This function is called during the SLI initialization to configure
4257 * all the HBQs and post buffers to the HBQ. The caller is not
4258 * required to hold any locks. This function will return zero if successful
4259 * else it will return negative error code.
4260 **/
ed957684
JS
4261static int
4262lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4263{
4264 int hbq_count = lpfc_sli_hbq_count();
4265 LPFC_MBOXQ_t *pmb;
4266 MAILBOX_t *pmbox;
4267 uint32_t hbqno;
4268 uint32_t hbq_entry_index;
ed957684 4269
92d7f7b0
JS
4270 /* Get a Mailbox buffer to setup mailbox
4271 * commands for HBA initialization
4272 */
ed957684
JS
4273 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4274
4275 if (!pmb)
4276 return -ENOMEM;
4277
04c68496 4278 pmbox = &pmb->u.mb;
ed957684
JS
4279
4280 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4281 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4282 phba->hbq_in_use = 1;
ed957684
JS
4283
4284 hbq_entry_index = 0;
4285 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4286 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4287 phba->hbqs[hbqno].hbqPutIdx = 0;
4288 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4289 phba->hbqs[hbqno].entry_count =
92d7f7b0 4290 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4291 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4292 hbq_entry_index, pmb);
ed957684
JS
4293 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4294
4295 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4296 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4297 mbxStatus <status>, ring <num> */
4298
4299 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4300 LOG_SLI | LOG_VPORT,
e8b62011 4301 "1805 Adapter failed to init. "
ed957684 4302 "Data: x%x x%x x%x\n",
e8b62011 4303 pmbox->mbxCommand,
ed957684
JS
4304 pmbox->mbxStatus, hbqno);
4305
4306 phba->link_state = LPFC_HBA_ERROR;
4307 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4308 return -ENXIO;
ed957684
JS
4309 }
4310 }
4311 phba->hbq_count = hbq_count;
4312
ed957684
JS
4313 mempool_free(pmb, phba->mbox_mem_pool);
4314
92d7f7b0 4315 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4316 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4317 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4318 return 0;
4319}
4320
4f774513
JS
4321/**
4322 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4323 * @phba: Pointer to HBA context object.
4324 *
4325 * This function is called during the SLI initialization to configure
4326 * all the HBQs and post buffers to the HBQ. The caller is not
4327 * required to hold any locks. This function will return zero if successful
4328 * else it will return negative error code.
4329 **/
4330static int
4331lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4332{
4333 phba->hbq_in_use = 1;
4334 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4335 phba->hbq_count = 1;
4336 /* Initially populate or replenish the HBQs */
4337 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4338 return 0;
4339}
4340
e59058c4 4341/**
3621a710 4342 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4343 * @phba: Pointer to HBA context object.
4344 * @sli_mode: sli mode - 2/3
4345 *
4346 * This function is called by the sli intialization code path
4347 * to issue config_port mailbox command. This function restarts the
4348 * HBA firmware and issues a config_port mailbox command to configure
4349 * the SLI interface in the sli mode specified by sli_mode
4350 * variable. The caller is not required to hold any locks.
4351 * The function returns 0 if successful, else returns negative error
4352 * code.
4353 **/
9399627f
JS
4354int
4355lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4356{
4357 LPFC_MBOXQ_t *pmb;
4358 uint32_t resetcount = 0, rc = 0, done = 0;
4359
4360 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4361 if (!pmb) {
2e0fef85 4362 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4363 return -ENOMEM;
4364 }
4365
ed957684 4366 phba->sli_rev = sli_mode;
dea3101e 4367 while (resetcount < 2 && !done) {
2e0fef85 4368 spin_lock_irq(&phba->hbalock);
1c067a42 4369 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4370 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4371 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4372 lpfc_sli_brdrestart(phba);
dea3101e 4373 rc = lpfc_sli_chipset_init(phba);
4374 if (rc)
4375 break;
4376
2e0fef85 4377 spin_lock_irq(&phba->hbalock);
1c067a42 4378 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4379 spin_unlock_irq(&phba->hbalock);
dea3101e 4380 resetcount++;
4381
ed957684
JS
4382 /* Call pre CONFIG_PORT mailbox command initialization. A
4383 * value of 0 means the call was successful. Any other
4384 * nonzero value is a failure, but if ERESTART is returned,
4385 * the driver may reset the HBA and try again.
4386 */
dea3101e 4387 rc = lpfc_config_port_prep(phba);
4388 if (rc == -ERESTART) {
ed957684 4389 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4390 continue;
34b02dcd 4391 } else if (rc)
dea3101e 4392 break;
6d368e53 4393
2e0fef85 4394 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 4395 lpfc_config_port(phba, pmb);
4396 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4397 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4398 LPFC_SLI3_HBQ_ENABLED |
4399 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
4400 LPFC_SLI3_BG_ENABLED |
4401 LPFC_SLI3_DSS_ENABLED);
ed957684 4402 if (rc != MBX_SUCCESS) {
dea3101e 4403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4404 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 4405 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 4406 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 4407 spin_lock_irq(&phba->hbalock);
04c68496 4408 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
4409 spin_unlock_irq(&phba->hbalock);
4410 rc = -ENXIO;
04c68496
JS
4411 } else {
4412 /* Allow asynchronous mailbox command to go through */
4413 spin_lock_irq(&phba->hbalock);
4414 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4415 spin_unlock_irq(&phba->hbalock);
ed957684 4416 done = 1;
cb69f7de
JS
4417
4418 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4419 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4420 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4421 "3110 Port did not grant ASABT\n");
04c68496 4422 }
dea3101e 4423 }
ed957684
JS
4424 if (!done) {
4425 rc = -EINVAL;
4426 goto do_prep_failed;
4427 }
04c68496
JS
4428 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4429 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
4430 rc = -ENXIO;
4431 goto do_prep_failed;
4432 }
04c68496 4433 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 4434 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
4435 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4436 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4437 phba->max_vpi : phba->max_vports;
4438
34b02dcd
JS
4439 } else
4440 phba->max_vpi = 0;
bc73905a
JS
4441 phba->fips_level = 0;
4442 phba->fips_spec_rev = 0;
4443 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 4444 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
4445 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4446 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4447 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4448 "2850 Security Crypto Active. FIPS x%d "
4449 "(Spec Rev: x%d)",
4450 phba->fips_level, phba->fips_spec_rev);
4451 }
4452 if (pmb->u.mb.un.varCfgPort.sec_err) {
4453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4454 "2856 Config Port Security Crypto "
4455 "Error: x%x ",
4456 pmb->u.mb.un.varCfgPort.sec_err);
4457 }
04c68496 4458 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 4459 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 4460 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 4461 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
4462
4463 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4464 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6
JS
4465
4466 if (phba->cfg_enable_bg) {
04c68496 4467 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
4468 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4469 else
4470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4471 "0443 Adapter did not grant "
4472 "BlockGuard\n");
4473 }
34b02dcd 4474 } else {
8f34f4ce 4475 phba->hbq_get = NULL;
34b02dcd 4476 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 4477 phba->max_vpi = 0;
ed957684 4478 }
92d7f7b0 4479do_prep_failed:
ed957684
JS
4480 mempool_free(pmb, phba->mbox_mem_pool);
4481 return rc;
4482}
4483
e59058c4
JS
4484
4485/**
3621a710 4486 * lpfc_sli_hba_setup - SLI intialization function
e59058c4
JS
4487 * @phba: Pointer to HBA context object.
4488 *
4489 * This function is the main SLI intialization function. This function
4490 * is called by the HBA intialization code, HBA reset code and HBA
4491 * error attention handler code. Caller is not required to hold any
4492 * locks. This function issues config_port mailbox command to configure
4493 * the SLI, setup iocb rings and HBQ rings. In the end the function
4494 * calls the config_port_post function to issue init_link mailbox
4495 * command and to start the discovery. The function will return zero
4496 * if successful, else it will return negative error code.
4497 **/
ed957684
JS
4498int
4499lpfc_sli_hba_setup(struct lpfc_hba *phba)
4500{
4501 uint32_t rc;
6d368e53
JS
4502 int mode = 3, i;
4503 int longs;
ed957684
JS
4504
4505 switch (lpfc_sli_mode) {
4506 case 2:
78b2d852 4507 if (phba->cfg_enable_npiv) {
92d7f7b0 4508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 4509 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 4510 "parameter (%d) to auto (0).\n",
e8b62011 4511 lpfc_sli_mode);
92d7f7b0
JS
4512 break;
4513 }
ed957684
JS
4514 mode = 2;
4515 break;
4516 case 0:
4517 case 3:
4518 break;
4519 default:
92d7f7b0 4520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4521 "1819 Unrecognized lpfc_sli_mode "
4522 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
4523
4524 break;
4525 }
4526
9399627f
JS
4527 rc = lpfc_sli_config_port(phba, mode);
4528
ed957684 4529 if (rc && lpfc_sli_mode == 3)
92d7f7b0 4530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4531 "1820 Unable to select SLI-3. "
4532 "Not supported by adapter.\n");
ed957684 4533 if (rc && mode != 2)
9399627f 4534 rc = lpfc_sli_config_port(phba, 2);
ed957684 4535 if (rc)
dea3101e 4536 goto lpfc_sli_hba_setup_error;
4537
0d878419
JS
4538 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4539 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4540 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4541 if (!rc) {
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2709 This device supports "
4544 "Advanced Error Reporting (AER)\n");
4545 spin_lock_irq(&phba->hbalock);
4546 phba->hba_flag |= HBA_AER_ENABLED;
4547 spin_unlock_irq(&phba->hbalock);
4548 } else {
4549 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4550 "2708 This device does not support "
4551 "Advanced Error Reporting (AER)\n");
4552 phba->cfg_aer_support = 0;
4553 }
4554 }
4555
ed957684
JS
4556 if (phba->sli_rev == 3) {
4557 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4558 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
4559 } else {
4560 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4561 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 4562 phba->sli3_options = 0;
ed957684
JS
4563 }
4564
4565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4566 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4567 phba->sli_rev, phba->max_vpi);
ed957684 4568 rc = lpfc_sli_ring_map(phba);
dea3101e 4569
4570 if (rc)
4571 goto lpfc_sli_hba_setup_error;
4572
6d368e53
JS
4573 /* Initialize VPIs. */
4574 if (phba->sli_rev == LPFC_SLI_REV3) {
4575 /*
4576 * The VPI bitmask and physical ID array are allocated
4577 * and initialized once only - at driver load. A port
4578 * reset doesn't need to reinitialize this memory.
4579 */
4580 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4581 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4582 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4583 GFP_KERNEL);
4584 if (!phba->vpi_bmask) {
4585 rc = -ENOMEM;
4586 goto lpfc_sli_hba_setup_error;
4587 }
4588
4589 phba->vpi_ids = kzalloc(
4590 (phba->max_vpi+1) * sizeof(uint16_t),
4591 GFP_KERNEL);
4592 if (!phba->vpi_ids) {
4593 kfree(phba->vpi_bmask);
4594 rc = -ENOMEM;
4595 goto lpfc_sli_hba_setup_error;
4596 }
4597 for (i = 0; i < phba->max_vpi; i++)
4598 phba->vpi_ids[i] = i;
4599 }
4600 }
4601
9399627f 4602 /* Init HBQs */
ed957684
JS
4603 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4604 rc = lpfc_sli_hbq_setup(phba);
4605 if (rc)
4606 goto lpfc_sli_hba_setup_error;
4607 }
04c68496 4608 spin_lock_irq(&phba->hbalock);
dea3101e 4609 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 4610 spin_unlock_irq(&phba->hbalock);
dea3101e 4611
4612 rc = lpfc_config_port_post(phba);
4613 if (rc)
4614 goto lpfc_sli_hba_setup_error;
4615
ed957684
JS
4616 return rc;
4617
92d7f7b0 4618lpfc_sli_hba_setup_error:
2e0fef85 4619 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 4620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4621 "0445 Firmware initialization failed\n");
dea3101e 4622 return rc;
4623}
4624
e59058c4 4625/**
da0436e9
JS
4626 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4627 * @phba: Pointer to HBA context object.
4628 * @mboxq: mailbox pointer.
4629 * This function issue a dump mailbox command to read config region
4630 * 23 and parse the records in the region and populate driver
4631 * data structure.
e59058c4 4632 **/
da0436e9 4633static int
ff78d8f9 4634lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 4635{
ff78d8f9 4636 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
4637 struct lpfc_dmabuf *mp;
4638 struct lpfc_mqe *mqe;
4639 uint32_t data_length;
4640 int rc;
dea3101e 4641
da0436e9
JS
4642 /* Program the default value of vlan_id and fc_map */
4643 phba->valid_vlan = 0;
4644 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4645 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4646 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4647
ff78d8f9
JS
4648 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4649 if (!mboxq)
da0436e9
JS
4650 return -ENOMEM;
4651
ff78d8f9
JS
4652 mqe = &mboxq->u.mqe;
4653 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4654 rc = -ENOMEM;
4655 goto out_free_mboxq;
4656 }
4657
da0436e9
JS
4658 mp = (struct lpfc_dmabuf *) mboxq->context1;
4659 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4660
4661 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4662 "(%d):2571 Mailbox cmd x%x Status x%x "
4663 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4664 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4665 "CQ: x%x x%x x%x x%x\n",
4666 mboxq->vport ? mboxq->vport->vpi : 0,
4667 bf_get(lpfc_mqe_command, mqe),
4668 bf_get(lpfc_mqe_status, mqe),
4669 mqe->un.mb_words[0], mqe->un.mb_words[1],
4670 mqe->un.mb_words[2], mqe->un.mb_words[3],
4671 mqe->un.mb_words[4], mqe->un.mb_words[5],
4672 mqe->un.mb_words[6], mqe->un.mb_words[7],
4673 mqe->un.mb_words[8], mqe->un.mb_words[9],
4674 mqe->un.mb_words[10], mqe->un.mb_words[11],
4675 mqe->un.mb_words[12], mqe->un.mb_words[13],
4676 mqe->un.mb_words[14], mqe->un.mb_words[15],
4677 mqe->un.mb_words[16], mqe->un.mb_words[50],
4678 mboxq->mcqe.word0,
4679 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4680 mboxq->mcqe.trailer);
4681
4682 if (rc) {
4683 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4684 kfree(mp);
ff78d8f9
JS
4685 rc = -EIO;
4686 goto out_free_mboxq;
da0436e9
JS
4687 }
4688 data_length = mqe->un.mb_words[5];
a0c87cbd 4689 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
4690 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4691 kfree(mp);
ff78d8f9
JS
4692 rc = -EIO;
4693 goto out_free_mboxq;
d11e31dd 4694 }
dea3101e 4695
da0436e9
JS
4696 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4697 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4698 kfree(mp);
ff78d8f9
JS
4699 rc = 0;
4700
4701out_free_mboxq:
4702 mempool_free(mboxq, phba->mbox_mem_pool);
4703 return rc;
da0436e9 4704}
e59058c4
JS
4705
4706/**
da0436e9
JS
4707 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4708 * @phba: pointer to lpfc hba data structure.
4709 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4710 * @vpd: pointer to the memory to hold resulting port vpd data.
4711 * @vpd_size: On input, the number of bytes allocated to @vpd.
4712 * On output, the number of data bytes in @vpd.
e59058c4 4713 *
da0436e9
JS
4714 * This routine executes a READ_REV SLI4 mailbox command. In
4715 * addition, this routine gets the port vpd data.
4716 *
4717 * Return codes
af901ca1 4718 * 0 - successful
d439d286 4719 * -ENOMEM - could not allocated memory.
e59058c4 4720 **/
da0436e9
JS
4721static int
4722lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4723 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 4724{
da0436e9
JS
4725 int rc = 0;
4726 uint32_t dma_size;
4727 struct lpfc_dmabuf *dmabuf;
4728 struct lpfc_mqe *mqe;
dea3101e 4729
da0436e9
JS
4730 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4731 if (!dmabuf)
4732 return -ENOMEM;
4733
4734 /*
4735 * Get a DMA buffer for the vpd data resulting from the READ_REV
4736 * mailbox command.
a257bf90 4737 */
da0436e9
JS
4738 dma_size = *vpd_size;
4739 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4740 dma_size,
4741 &dmabuf->phys,
4742 GFP_KERNEL);
4743 if (!dmabuf->virt) {
4744 kfree(dmabuf);
4745 return -ENOMEM;
a257bf90 4746 }
da0436e9 4747 memset(dmabuf->virt, 0, dma_size);
a257bf90 4748
da0436e9
JS
4749 /*
4750 * The SLI4 implementation of READ_REV conflicts at word1,
4751 * bits 31:16 and SLI4 adds vpd functionality not present
4752 * in SLI3. This code corrects the conflicts.
1dcb58e5 4753 */
da0436e9
JS
4754 lpfc_read_rev(phba, mboxq);
4755 mqe = &mboxq->u.mqe;
4756 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4757 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4758 mqe->un.read_rev.word1 &= 0x0000FFFF;
4759 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4760 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4761
4762 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4763 if (rc) {
4764 dma_free_coherent(&phba->pcidev->dev, dma_size,
4765 dmabuf->virt, dmabuf->phys);
def9c7a9 4766 kfree(dmabuf);
da0436e9
JS
4767 return -EIO;
4768 }
1dcb58e5 4769
da0436e9
JS
4770 /*
4771 * The available vpd length cannot be bigger than the
4772 * DMA buffer passed to the port. Catch the less than
4773 * case and update the caller's size.
4774 */
4775 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4776 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 4777
d7c47992
JS
4778 memcpy(vpd, dmabuf->virt, *vpd_size);
4779
da0436e9
JS
4780 dma_free_coherent(&phba->pcidev->dev, dma_size,
4781 dmabuf->virt, dmabuf->phys);
4782 kfree(dmabuf);
4783 return 0;
dea3101e 4784}
4785
cd1c8301
JS
4786/**
4787 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4788 * @phba: pointer to lpfc hba data structure.
4789 *
4790 * This routine retrieves SLI4 device physical port name this PCI function
4791 * is attached to.
4792 *
4793 * Return codes
4907cb7b 4794 * 0 - successful
cd1c8301
JS
4795 * otherwise - failed to retrieve physical port name
4796 **/
4797static int
4798lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4799{
4800 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
4801 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4802 struct lpfc_controller_attribute *cntl_attr;
4803 struct lpfc_mbx_get_port_name *get_port_name;
4804 void *virtaddr = NULL;
4805 uint32_t alloclen, reqlen;
4806 uint32_t shdr_status, shdr_add_status;
4807 union lpfc_sli4_cfg_shdr *shdr;
4808 char cport_name = 0;
4809 int rc;
4810
4811 /* We assume nothing at this point */
4812 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4813 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4814
4815 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4816 if (!mboxq)
4817 return -ENOMEM;
cd1c8301 4818 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
4819 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4820 lpfc_sli4_read_config(phba);
4821 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4822 goto retrieve_ppname;
cd1c8301
JS
4823
4824 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4825 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4826 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4827 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4828 LPFC_SLI4_MBX_NEMBED);
4829 if (alloclen < reqlen) {
4830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4831 "3084 Allocated DMA memory size (%d) is "
4832 "less than the requested DMA memory size "
4833 "(%d)\n", alloclen, reqlen);
4834 rc = -ENOMEM;
4835 goto out_free_mboxq;
4836 }
4837 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4838 virtaddr = mboxq->sge_array->addr[0];
4839 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4840 shdr = &mbx_cntl_attr->cfg_shdr;
4841 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4842 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4843 if (shdr_status || shdr_add_status || rc) {
4844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4845 "3085 Mailbox x%x (x%x/x%x) failed, "
4846 "rc:x%x, status:x%x, add_status:x%x\n",
4847 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4848 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4849 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4850 rc, shdr_status, shdr_add_status);
4851 rc = -ENXIO;
4852 goto out_free_mboxq;
4853 }
4854 cntl_attr = &mbx_cntl_attr->cntl_attr;
4855 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4856 phba->sli4_hba.lnk_info.lnk_tp =
4857 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4858 phba->sli4_hba.lnk_info.lnk_no =
4859 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4860 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4861 "3086 lnk_type:%d, lnk_numb:%d\n",
4862 phba->sli4_hba.lnk_info.lnk_tp,
4863 phba->sli4_hba.lnk_info.lnk_no);
4864
4865retrieve_ppname:
4866 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4867 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4868 sizeof(struct lpfc_mbx_get_port_name) -
4869 sizeof(struct lpfc_sli4_cfg_mhdr),
4870 LPFC_SLI4_MBX_EMBED);
4871 get_port_name = &mboxq->u.mqe.un.get_port_name;
4872 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4873 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4874 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4875 phba->sli4_hba.lnk_info.lnk_tp);
4876 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4877 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4878 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4879 if (shdr_status || shdr_add_status || rc) {
4880 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4881 "3087 Mailbox x%x (x%x/x%x) failed: "
4882 "rc:x%x, status:x%x, add_status:x%x\n",
4883 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4884 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4885 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4886 rc, shdr_status, shdr_add_status);
4887 rc = -ENXIO;
4888 goto out_free_mboxq;
4889 }
4890 switch (phba->sli4_hba.lnk_info.lnk_no) {
4891 case LPFC_LINK_NUMBER_0:
4892 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4893 &get_port_name->u.response);
4894 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4895 break;
4896 case LPFC_LINK_NUMBER_1:
4897 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4898 &get_port_name->u.response);
4899 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4900 break;
4901 case LPFC_LINK_NUMBER_2:
4902 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4903 &get_port_name->u.response);
4904 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4905 break;
4906 case LPFC_LINK_NUMBER_3:
4907 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4908 &get_port_name->u.response);
4909 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4910 break;
4911 default:
4912 break;
4913 }
4914
4915 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4916 phba->Port[0] = cport_name;
4917 phba->Port[1] = '\0';
4918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4919 "3091 SLI get port name: %s\n", phba->Port);
4920 }
4921
4922out_free_mboxq:
4923 if (rc != MBX_TIMEOUT) {
4924 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4925 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4926 else
4927 mempool_free(mboxq, phba->mbox_mem_pool);
4928 }
4929 return rc;
4930}
4931
e59058c4 4932/**
da0436e9
JS
4933 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4934 * @phba: pointer to lpfc hba data structure.
e59058c4 4935 *
da0436e9
JS
4936 * This routine is called to explicitly arm the SLI4 device's completion and
4937 * event queues
4938 **/
4939static void
4940lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4941{
4942 uint8_t fcp_eqidx;
4943
4944 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
0558056c 4946 fcp_eqidx = 0;
2e90f4b5 4947 if (phba->sli4_hba.fcp_cq) {
67d12733 4948 do {
2e90f4b5
JS
4949 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4950 LPFC_QUEUE_REARM);
67d12733 4951 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
2e90f4b5 4952 }
67d12733
JS
4953 if (phba->sli4_hba.hba_eq) {
4954 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
2e90f4b5 4955 fcp_eqidx++)
67d12733 4956 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
2e90f4b5
JS
4957 LPFC_QUEUE_REARM);
4958 }
da0436e9
JS
4959}
4960
6d368e53
JS
4961/**
4962 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4963 * @phba: Pointer to HBA context object.
4964 * @type: The resource extent type.
b76f2dc9
JS
4965 * @extnt_count: buffer to hold port available extent count.
4966 * @extnt_size: buffer to hold element count per extent.
6d368e53 4967 *
b76f2dc9
JS
4968 * This function calls the port and retrievs the number of available
4969 * extents and their size for a particular extent type.
4970 *
4971 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 4972 **/
b76f2dc9 4973int
6d368e53
JS
4974lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4975 uint16_t *extnt_count, uint16_t *extnt_size)
4976{
4977 int rc = 0;
4978 uint32_t length;
4979 uint32_t mbox_tmo;
4980 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4981 LPFC_MBOXQ_t *mbox;
4982
4983 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4984 if (!mbox)
4985 return -ENOMEM;
4986
4987 /* Find out how many extents are available for this resource type */
4988 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4989 sizeof(struct lpfc_sli4_cfg_mhdr));
4990 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4991 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4992 length, LPFC_SLI4_MBX_EMBED);
4993
4994 /* Send an extents count of 0 - the GET doesn't use it. */
4995 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4996 LPFC_SLI4_MBX_EMBED);
4997 if (unlikely(rc)) {
4998 rc = -EIO;
4999 goto err_exit;
5000 }
5001
5002 if (!phba->sli4_hba.intr_enable)
5003 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5004 else {
a183a15f 5005 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5006 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5007 }
5008 if (unlikely(rc)) {
5009 rc = -EIO;
5010 goto err_exit;
5011 }
5012
5013 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5014 if (bf_get(lpfc_mbox_hdr_status,
5015 &rsrc_info->header.cfg_shdr.response)) {
5016 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5017 "2930 Failed to get resource extents "
5018 "Status 0x%x Add'l Status 0x%x\n",
5019 bf_get(lpfc_mbox_hdr_status,
5020 &rsrc_info->header.cfg_shdr.response),
5021 bf_get(lpfc_mbox_hdr_add_status,
5022 &rsrc_info->header.cfg_shdr.response));
5023 rc = -EIO;
5024 goto err_exit;
5025 }
5026
5027 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5028 &rsrc_info->u.rsp);
5029 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5030 &rsrc_info->u.rsp);
8a9d2e80
JS
5031
5032 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5033 "3162 Retrieved extents type-%d from port: count:%d, "
5034 "size:%d\n", type, *extnt_count, *extnt_size);
5035
5036err_exit:
6d368e53
JS
5037 mempool_free(mbox, phba->mbox_mem_pool);
5038 return rc;
5039}
5040
5041/**
5042 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5043 * @phba: Pointer to HBA context object.
5044 * @type: The extent type to check.
5045 *
5046 * This function reads the current available extents from the port and checks
5047 * if the extent count or extent size has changed since the last access.
5048 * Callers use this routine post port reset to understand if there is a
5049 * extent reprovisioning requirement.
5050 *
5051 * Returns:
5052 * -Error: error indicates problem.
5053 * 1: Extent count or size has changed.
5054 * 0: No changes.
5055 **/
5056static int
5057lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5058{
5059 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5060 uint16_t size_diff, rsrc_ext_size;
5061 int rc = 0;
5062 struct lpfc_rsrc_blks *rsrc_entry;
5063 struct list_head *rsrc_blk_list = NULL;
5064
5065 size_diff = 0;
5066 curr_ext_cnt = 0;
5067 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5068 &rsrc_ext_cnt,
5069 &rsrc_ext_size);
5070 if (unlikely(rc))
5071 return -EIO;
5072
5073 switch (type) {
5074 case LPFC_RSC_TYPE_FCOE_RPI:
5075 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5076 break;
5077 case LPFC_RSC_TYPE_FCOE_VPI:
5078 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5079 break;
5080 case LPFC_RSC_TYPE_FCOE_XRI:
5081 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5082 break;
5083 case LPFC_RSC_TYPE_FCOE_VFI:
5084 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5085 break;
5086 default:
5087 break;
5088 }
5089
5090 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5091 curr_ext_cnt++;
5092 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5093 size_diff++;
5094 }
5095
5096 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5097 rc = 1;
5098
5099 return rc;
5100}
5101
5102/**
5103 * lpfc_sli4_cfg_post_extnts -
5104 * @phba: Pointer to HBA context object.
5105 * @extnt_cnt - number of available extents.
5106 * @type - the extent type (rpi, xri, vfi, vpi).
5107 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5108 * @mbox - pointer to the caller's allocated mailbox structure.
5109 *
5110 * This function executes the extents allocation request. It also
5111 * takes care of the amount of memory needed to allocate or get the
5112 * allocated extents. It is the caller's responsibility to evaluate
5113 * the response.
5114 *
5115 * Returns:
5116 * -Error: Error value describes the condition found.
5117 * 0: if successful
5118 **/
5119static int
8a9d2e80 5120lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5121 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5122{
5123 int rc = 0;
5124 uint32_t req_len;
5125 uint32_t emb_len;
5126 uint32_t alloc_len, mbox_tmo;
5127
5128 /* Calculate the total requested length of the dma memory */
8a9d2e80 5129 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5130
5131 /*
5132 * Calculate the size of an embedded mailbox. The uint32_t
5133 * accounts for extents-specific word.
5134 */
5135 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5136 sizeof(uint32_t);
5137
5138 /*
5139 * Presume the allocation and response will fit into an embedded
5140 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5141 */
5142 *emb = LPFC_SLI4_MBX_EMBED;
5143 if (req_len > emb_len) {
8a9d2e80 5144 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5145 sizeof(union lpfc_sli4_cfg_shdr) +
5146 sizeof(uint32_t);
5147 *emb = LPFC_SLI4_MBX_NEMBED;
5148 }
5149
5150 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5151 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5152 req_len, *emb);
5153 if (alloc_len < req_len) {
5154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5155 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5156 "less than the requested DMA memory "
5157 "size (x%x)\n", alloc_len, req_len);
5158 return -ENOMEM;
5159 }
8a9d2e80 5160 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5161 if (unlikely(rc))
5162 return -EIO;
5163
5164 if (!phba->sli4_hba.intr_enable)
5165 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5166 else {
a183a15f 5167 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5168 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5169 }
5170
5171 if (unlikely(rc))
5172 rc = -EIO;
5173 return rc;
5174}
5175
5176/**
5177 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5178 * @phba: Pointer to HBA context object.
5179 * @type: The resource extent type to allocate.
5180 *
5181 * This function allocates the number of elements for the specified
5182 * resource type.
5183 **/
5184static int
5185lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5186{
5187 bool emb = false;
5188 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5189 uint16_t rsrc_id, rsrc_start, j, k;
5190 uint16_t *ids;
5191 int i, rc;
5192 unsigned long longs;
5193 unsigned long *bmask;
5194 struct lpfc_rsrc_blks *rsrc_blks;
5195 LPFC_MBOXQ_t *mbox;
5196 uint32_t length;
5197 struct lpfc_id_range *id_array = NULL;
5198 void *virtaddr = NULL;
5199 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5200 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5201 struct list_head *ext_blk_list;
5202
5203 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5204 &rsrc_cnt,
5205 &rsrc_size);
5206 if (unlikely(rc))
5207 return -EIO;
5208
5209 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5211 "3009 No available Resource Extents "
5212 "for resource type 0x%x: Count: 0x%x, "
5213 "Size 0x%x\n", type, rsrc_cnt,
5214 rsrc_size);
5215 return -ENOMEM;
5216 }
5217
8a9d2e80
JS
5218 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5219 "2903 Post resource extents type-0x%x: "
5220 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5221
5222 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5223 if (!mbox)
5224 return -ENOMEM;
5225
8a9d2e80 5226 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5227 if (unlikely(rc)) {
5228 rc = -EIO;
5229 goto err_exit;
5230 }
5231
5232 /*
5233 * Figure out where the response is located. Then get local pointers
5234 * to the response data. The port does not guarantee to respond to
5235 * all extents counts request so update the local variable with the
5236 * allocated count from the port.
5237 */
5238 if (emb == LPFC_SLI4_MBX_EMBED) {
5239 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5240 id_array = &rsrc_ext->u.rsp.id[0];
5241 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5242 } else {
5243 virtaddr = mbox->sge_array->addr[0];
5244 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5245 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5246 id_array = &n_rsrc->id;
5247 }
5248
5249 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5250 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5251
5252 /*
5253 * Based on the resource size and count, correct the base and max
5254 * resource values.
5255 */
5256 length = sizeof(struct lpfc_rsrc_blks);
5257 switch (type) {
5258 case LPFC_RSC_TYPE_FCOE_RPI:
5259 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5260 sizeof(unsigned long),
5261 GFP_KERNEL);
5262 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5263 rc = -ENOMEM;
5264 goto err_exit;
5265 }
5266 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5267 sizeof(uint16_t),
5268 GFP_KERNEL);
5269 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5270 kfree(phba->sli4_hba.rpi_bmask);
5271 rc = -ENOMEM;
5272 goto err_exit;
5273 }
5274
5275 /*
5276 * The next_rpi was initialized with the maximum available
5277 * count but the port may allocate a smaller number. Catch
5278 * that case and update the next_rpi.
5279 */
5280 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5281
5282 /* Initialize local ptrs for common extent processing later. */
5283 bmask = phba->sli4_hba.rpi_bmask;
5284 ids = phba->sli4_hba.rpi_ids;
5285 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5286 break;
5287 case LPFC_RSC_TYPE_FCOE_VPI:
5288 phba->vpi_bmask = kzalloc(longs *
5289 sizeof(unsigned long),
5290 GFP_KERNEL);
5291 if (unlikely(!phba->vpi_bmask)) {
5292 rc = -ENOMEM;
5293 goto err_exit;
5294 }
5295 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5296 sizeof(uint16_t),
5297 GFP_KERNEL);
5298 if (unlikely(!phba->vpi_ids)) {
5299 kfree(phba->vpi_bmask);
5300 rc = -ENOMEM;
5301 goto err_exit;
5302 }
5303
5304 /* Initialize local ptrs for common extent processing later. */
5305 bmask = phba->vpi_bmask;
5306 ids = phba->vpi_ids;
5307 ext_blk_list = &phba->lpfc_vpi_blk_list;
5308 break;
5309 case LPFC_RSC_TYPE_FCOE_XRI:
5310 phba->sli4_hba.xri_bmask = kzalloc(longs *
5311 sizeof(unsigned long),
5312 GFP_KERNEL);
5313 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5314 rc = -ENOMEM;
5315 goto err_exit;
5316 }
8a9d2e80 5317 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5318 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5319 sizeof(uint16_t),
5320 GFP_KERNEL);
5321 if (unlikely(!phba->sli4_hba.xri_ids)) {
5322 kfree(phba->sli4_hba.xri_bmask);
5323 rc = -ENOMEM;
5324 goto err_exit;
5325 }
5326
5327 /* Initialize local ptrs for common extent processing later. */
5328 bmask = phba->sli4_hba.xri_bmask;
5329 ids = phba->sli4_hba.xri_ids;
5330 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5331 break;
5332 case LPFC_RSC_TYPE_FCOE_VFI:
5333 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5334 sizeof(unsigned long),
5335 GFP_KERNEL);
5336 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5337 rc = -ENOMEM;
5338 goto err_exit;
5339 }
5340 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5341 sizeof(uint16_t),
5342 GFP_KERNEL);
5343 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5344 kfree(phba->sli4_hba.vfi_bmask);
5345 rc = -ENOMEM;
5346 goto err_exit;
5347 }
5348
5349 /* Initialize local ptrs for common extent processing later. */
5350 bmask = phba->sli4_hba.vfi_bmask;
5351 ids = phba->sli4_hba.vfi_ids;
5352 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5353 break;
5354 default:
5355 /* Unsupported Opcode. Fail call. */
5356 id_array = NULL;
5357 bmask = NULL;
5358 ids = NULL;
5359 ext_blk_list = NULL;
5360 goto err_exit;
5361 }
5362
5363 /*
5364 * Complete initializing the extent configuration with the
5365 * allocated ids assigned to this function. The bitmask serves
5366 * as an index into the array and manages the available ids. The
5367 * array just stores the ids communicated to the port via the wqes.
5368 */
5369 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5370 if ((i % 2) == 0)
5371 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5372 &id_array[k]);
5373 else
5374 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5375 &id_array[k]);
5376
5377 rsrc_blks = kzalloc(length, GFP_KERNEL);
5378 if (unlikely(!rsrc_blks)) {
5379 rc = -ENOMEM;
5380 kfree(bmask);
5381 kfree(ids);
5382 goto err_exit;
5383 }
5384 rsrc_blks->rsrc_start = rsrc_id;
5385 rsrc_blks->rsrc_size = rsrc_size;
5386 list_add_tail(&rsrc_blks->list, ext_blk_list);
5387 rsrc_start = rsrc_id;
5388 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5389 phba->sli4_hba.scsi_xri_start = rsrc_start +
5390 lpfc_sli4_get_els_iocb_cnt(phba);
5391
5392 while (rsrc_id < (rsrc_start + rsrc_size)) {
5393 ids[j] = rsrc_id;
5394 rsrc_id++;
5395 j++;
5396 }
5397 /* Entire word processed. Get next word.*/
5398 if ((i % 2) == 1)
5399 k++;
5400 }
5401 err_exit:
5402 lpfc_sli4_mbox_cmd_free(phba, mbox);
5403 return rc;
5404}
5405
5406/**
5407 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5408 * @phba: Pointer to HBA context object.
5409 * @type: the extent's type.
5410 *
5411 * This function deallocates all extents of a particular resource type.
5412 * SLI4 does not allow for deallocating a particular extent range. It
5413 * is the caller's responsibility to release all kernel memory resources.
5414 **/
5415static int
5416lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5417{
5418 int rc;
5419 uint32_t length, mbox_tmo = 0;
5420 LPFC_MBOXQ_t *mbox;
5421 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5422 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5423
5424 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5425 if (!mbox)
5426 return -ENOMEM;
5427
5428 /*
5429 * This function sends an embedded mailbox because it only sends the
5430 * the resource type. All extents of this type are released by the
5431 * port.
5432 */
5433 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5434 sizeof(struct lpfc_sli4_cfg_mhdr));
5435 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5436 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5437 length, LPFC_SLI4_MBX_EMBED);
5438
5439 /* Send an extents count of 0 - the dealloc doesn't use it. */
5440 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5441 LPFC_SLI4_MBX_EMBED);
5442 if (unlikely(rc)) {
5443 rc = -EIO;
5444 goto out_free_mbox;
5445 }
5446 if (!phba->sli4_hba.intr_enable)
5447 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5448 else {
a183a15f 5449 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5450 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5451 }
5452 if (unlikely(rc)) {
5453 rc = -EIO;
5454 goto out_free_mbox;
5455 }
5456
5457 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5458 if (bf_get(lpfc_mbox_hdr_status,
5459 &dealloc_rsrc->header.cfg_shdr.response)) {
5460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5461 "2919 Failed to release resource extents "
5462 "for type %d - Status 0x%x Add'l Status 0x%x. "
5463 "Resource memory not released.\n",
5464 type,
5465 bf_get(lpfc_mbox_hdr_status,
5466 &dealloc_rsrc->header.cfg_shdr.response),
5467 bf_get(lpfc_mbox_hdr_add_status,
5468 &dealloc_rsrc->header.cfg_shdr.response));
5469 rc = -EIO;
5470 goto out_free_mbox;
5471 }
5472
5473 /* Release kernel memory resources for the specific type. */
5474 switch (type) {
5475 case LPFC_RSC_TYPE_FCOE_VPI:
5476 kfree(phba->vpi_bmask);
5477 kfree(phba->vpi_ids);
5478 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5479 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5480 &phba->lpfc_vpi_blk_list, list) {
5481 list_del_init(&rsrc_blk->list);
5482 kfree(rsrc_blk);
5483 }
5484 break;
5485 case LPFC_RSC_TYPE_FCOE_XRI:
5486 kfree(phba->sli4_hba.xri_bmask);
5487 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5488 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5489 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5490 list_del_init(&rsrc_blk->list);
5491 kfree(rsrc_blk);
5492 }
5493 break;
5494 case LPFC_RSC_TYPE_FCOE_VFI:
5495 kfree(phba->sli4_hba.vfi_bmask);
5496 kfree(phba->sli4_hba.vfi_ids);
5497 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5498 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5499 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5500 list_del_init(&rsrc_blk->list);
5501 kfree(rsrc_blk);
5502 }
5503 break;
5504 case LPFC_RSC_TYPE_FCOE_RPI:
5505 /* RPI bitmask and physical id array are cleaned up earlier. */
5506 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5507 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5508 list_del_init(&rsrc_blk->list);
5509 kfree(rsrc_blk);
5510 }
5511 break;
5512 default:
5513 break;
5514 }
5515
5516 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5517
5518 out_free_mbox:
5519 mempool_free(mbox, phba->mbox_mem_pool);
5520 return rc;
5521}
5522
5523/**
5524 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5525 * @phba: Pointer to HBA context object.
5526 *
5527 * This function allocates all SLI4 resource identifiers.
5528 **/
5529int
5530lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5531{
5532 int i, rc, error = 0;
5533 uint16_t count, base;
5534 unsigned long longs;
5535
ff78d8f9
JS
5536 if (!phba->sli4_hba.rpi_hdrs_in_use)
5537 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
5538 if (phba->sli4_hba.extents_in_use) {
5539 /*
5540 * The port supports resource extents. The XRI, VPI, VFI, RPI
5541 * resource extent count must be read and allocated before
5542 * provisioning the resource id arrays.
5543 */
5544 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5545 LPFC_IDX_RSRC_RDY) {
5546 /*
5547 * Extent-based resources are set - the driver could
5548 * be in a port reset. Figure out if any corrective
5549 * actions need to be taken.
5550 */
5551 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5552 LPFC_RSC_TYPE_FCOE_VFI);
5553 if (rc != 0)
5554 error++;
5555 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5556 LPFC_RSC_TYPE_FCOE_VPI);
5557 if (rc != 0)
5558 error++;
5559 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5560 LPFC_RSC_TYPE_FCOE_XRI);
5561 if (rc != 0)
5562 error++;
5563 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5564 LPFC_RSC_TYPE_FCOE_RPI);
5565 if (rc != 0)
5566 error++;
5567
5568 /*
5569 * It's possible that the number of resources
5570 * provided to this port instance changed between
5571 * resets. Detect this condition and reallocate
5572 * resources. Otherwise, there is no action.
5573 */
5574 if (error) {
5575 lpfc_printf_log(phba, KERN_INFO,
5576 LOG_MBOX | LOG_INIT,
5577 "2931 Detected extent resource "
5578 "change. Reallocating all "
5579 "extents.\n");
5580 rc = lpfc_sli4_dealloc_extent(phba,
5581 LPFC_RSC_TYPE_FCOE_VFI);
5582 rc = lpfc_sli4_dealloc_extent(phba,
5583 LPFC_RSC_TYPE_FCOE_VPI);
5584 rc = lpfc_sli4_dealloc_extent(phba,
5585 LPFC_RSC_TYPE_FCOE_XRI);
5586 rc = lpfc_sli4_dealloc_extent(phba,
5587 LPFC_RSC_TYPE_FCOE_RPI);
5588 } else
5589 return 0;
5590 }
5591
5592 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5593 if (unlikely(rc))
5594 goto err_exit;
5595
5596 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5597 if (unlikely(rc))
5598 goto err_exit;
5599
5600 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5601 if (unlikely(rc))
5602 goto err_exit;
5603
5604 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5605 if (unlikely(rc))
5606 goto err_exit;
5607 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5608 LPFC_IDX_RSRC_RDY);
5609 return rc;
5610 } else {
5611 /*
5612 * The port does not support resource extents. The XRI, VPI,
5613 * VFI, RPI resource ids were determined from READ_CONFIG.
5614 * Just allocate the bitmasks and provision the resource id
5615 * arrays. If a port reset is active, the resources don't
5616 * need any action - just exit.
5617 */
5618 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
5619 LPFC_IDX_RSRC_RDY) {
5620 lpfc_sli4_dealloc_resource_identifiers(phba);
5621 lpfc_sli4_remove_rpis(phba);
5622 }
6d368e53
JS
5623 /* RPIs. */
5624 count = phba->sli4_hba.max_cfg_param.max_rpi;
5625 base = phba->sli4_hba.max_cfg_param.rpi_base;
5626 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5627 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5628 sizeof(unsigned long),
5629 GFP_KERNEL);
5630 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5631 rc = -ENOMEM;
5632 goto err_exit;
5633 }
5634 phba->sli4_hba.rpi_ids = kzalloc(count *
5635 sizeof(uint16_t),
5636 GFP_KERNEL);
5637 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5638 rc = -ENOMEM;
5639 goto free_rpi_bmask;
5640 }
5641
5642 for (i = 0; i < count; i++)
5643 phba->sli4_hba.rpi_ids[i] = base + i;
5644
5645 /* VPIs. */
5646 count = phba->sli4_hba.max_cfg_param.max_vpi;
5647 base = phba->sli4_hba.max_cfg_param.vpi_base;
5648 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5649 phba->vpi_bmask = kzalloc(longs *
5650 sizeof(unsigned long),
5651 GFP_KERNEL);
5652 if (unlikely(!phba->vpi_bmask)) {
5653 rc = -ENOMEM;
5654 goto free_rpi_ids;
5655 }
5656 phba->vpi_ids = kzalloc(count *
5657 sizeof(uint16_t),
5658 GFP_KERNEL);
5659 if (unlikely(!phba->vpi_ids)) {
5660 rc = -ENOMEM;
5661 goto free_vpi_bmask;
5662 }
5663
5664 for (i = 0; i < count; i++)
5665 phba->vpi_ids[i] = base + i;
5666
5667 /* XRIs. */
5668 count = phba->sli4_hba.max_cfg_param.max_xri;
5669 base = phba->sli4_hba.max_cfg_param.xri_base;
5670 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5671 phba->sli4_hba.xri_bmask = kzalloc(longs *
5672 sizeof(unsigned long),
5673 GFP_KERNEL);
5674 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5675 rc = -ENOMEM;
5676 goto free_vpi_ids;
5677 }
41899be7 5678 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5679 phba->sli4_hba.xri_ids = kzalloc(count *
5680 sizeof(uint16_t),
5681 GFP_KERNEL);
5682 if (unlikely(!phba->sli4_hba.xri_ids)) {
5683 rc = -ENOMEM;
5684 goto free_xri_bmask;
5685 }
5686
5687 for (i = 0; i < count; i++)
5688 phba->sli4_hba.xri_ids[i] = base + i;
5689
5690 /* VFIs. */
5691 count = phba->sli4_hba.max_cfg_param.max_vfi;
5692 base = phba->sli4_hba.max_cfg_param.vfi_base;
5693 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5694 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5695 sizeof(unsigned long),
5696 GFP_KERNEL);
5697 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5698 rc = -ENOMEM;
5699 goto free_xri_ids;
5700 }
5701 phba->sli4_hba.vfi_ids = kzalloc(count *
5702 sizeof(uint16_t),
5703 GFP_KERNEL);
5704 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5705 rc = -ENOMEM;
5706 goto free_vfi_bmask;
5707 }
5708
5709 for (i = 0; i < count; i++)
5710 phba->sli4_hba.vfi_ids[i] = base + i;
5711
5712 /*
5713 * Mark all resources ready. An HBA reset doesn't need
5714 * to reset the initialization.
5715 */
5716 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5717 LPFC_IDX_RSRC_RDY);
5718 return 0;
5719 }
5720
5721 free_vfi_bmask:
5722 kfree(phba->sli4_hba.vfi_bmask);
5723 free_xri_ids:
5724 kfree(phba->sli4_hba.xri_ids);
5725 free_xri_bmask:
5726 kfree(phba->sli4_hba.xri_bmask);
5727 free_vpi_ids:
5728 kfree(phba->vpi_ids);
5729 free_vpi_bmask:
5730 kfree(phba->vpi_bmask);
5731 free_rpi_ids:
5732 kfree(phba->sli4_hba.rpi_ids);
5733 free_rpi_bmask:
5734 kfree(phba->sli4_hba.rpi_bmask);
5735 err_exit:
5736 return rc;
5737}
5738
5739/**
5740 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5741 * @phba: Pointer to HBA context object.
5742 *
5743 * This function allocates the number of elements for the specified
5744 * resource type.
5745 **/
5746int
5747lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5748{
5749 if (phba->sli4_hba.extents_in_use) {
5750 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5751 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5752 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5753 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5754 } else {
5755 kfree(phba->vpi_bmask);
5756 kfree(phba->vpi_ids);
5757 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5758 kfree(phba->sli4_hba.xri_bmask);
5759 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5760 kfree(phba->sli4_hba.vfi_bmask);
5761 kfree(phba->sli4_hba.vfi_ids);
5762 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5763 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5764 }
5765
5766 return 0;
5767}
5768
b76f2dc9
JS
5769/**
5770 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5771 * @phba: Pointer to HBA context object.
5772 * @type: The resource extent type.
5773 * @extnt_count: buffer to hold port extent count response
5774 * @extnt_size: buffer to hold port extent size response.
5775 *
5776 * This function calls the port to read the host allocated extents
5777 * for a particular type.
5778 **/
5779int
5780lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5781 uint16_t *extnt_cnt, uint16_t *extnt_size)
5782{
5783 bool emb;
5784 int rc = 0;
5785 uint16_t curr_blks = 0;
5786 uint32_t req_len, emb_len;
5787 uint32_t alloc_len, mbox_tmo;
5788 struct list_head *blk_list_head;
5789 struct lpfc_rsrc_blks *rsrc_blk;
5790 LPFC_MBOXQ_t *mbox;
5791 void *virtaddr = NULL;
5792 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5793 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5794 union lpfc_sli4_cfg_shdr *shdr;
5795
5796 switch (type) {
5797 case LPFC_RSC_TYPE_FCOE_VPI:
5798 blk_list_head = &phba->lpfc_vpi_blk_list;
5799 break;
5800 case LPFC_RSC_TYPE_FCOE_XRI:
5801 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5802 break;
5803 case LPFC_RSC_TYPE_FCOE_VFI:
5804 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5805 break;
5806 case LPFC_RSC_TYPE_FCOE_RPI:
5807 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5808 break;
5809 default:
5810 return -EIO;
5811 }
5812
5813 /* Count the number of extents currently allocatd for this type. */
5814 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5815 if (curr_blks == 0) {
5816 /*
5817 * The GET_ALLOCATED mailbox does not return the size,
5818 * just the count. The size should be just the size
5819 * stored in the current allocated block and all sizes
5820 * for an extent type are the same so set the return
5821 * value now.
5822 */
5823 *extnt_size = rsrc_blk->rsrc_size;
5824 }
5825 curr_blks++;
5826 }
5827
5828 /* Calculate the total requested length of the dma memory. */
5829 req_len = curr_blks * sizeof(uint16_t);
5830
5831 /*
5832 * Calculate the size of an embedded mailbox. The uint32_t
5833 * accounts for extents-specific word.
5834 */
5835 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5836 sizeof(uint32_t);
5837
5838 /*
5839 * Presume the allocation and response will fit into an embedded
5840 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5841 */
5842 emb = LPFC_SLI4_MBX_EMBED;
5843 req_len = emb_len;
5844 if (req_len > emb_len) {
5845 req_len = curr_blks * sizeof(uint16_t) +
5846 sizeof(union lpfc_sli4_cfg_shdr) +
5847 sizeof(uint32_t);
5848 emb = LPFC_SLI4_MBX_NEMBED;
5849 }
5850
5851 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (!mbox)
5853 return -ENOMEM;
5854 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5855
5856 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5857 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5858 req_len, emb);
5859 if (alloc_len < req_len) {
5860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5861 "2983 Allocated DMA memory size (x%x) is "
5862 "less than the requested DMA memory "
5863 "size (x%x)\n", alloc_len, req_len);
5864 rc = -ENOMEM;
5865 goto err_exit;
5866 }
5867 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5868 if (unlikely(rc)) {
5869 rc = -EIO;
5870 goto err_exit;
5871 }
5872
5873 if (!phba->sli4_hba.intr_enable)
5874 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5875 else {
a183a15f 5876 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
5877 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5878 }
5879
5880 if (unlikely(rc)) {
5881 rc = -EIO;
5882 goto err_exit;
5883 }
5884
5885 /*
5886 * Figure out where the response is located. Then get local pointers
5887 * to the response data. The port does not guarantee to respond to
5888 * all extents counts request so update the local variable with the
5889 * allocated count from the port.
5890 */
5891 if (emb == LPFC_SLI4_MBX_EMBED) {
5892 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5893 shdr = &rsrc_ext->header.cfg_shdr;
5894 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5895 } else {
5896 virtaddr = mbox->sge_array->addr[0];
5897 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5898 shdr = &n_rsrc->cfg_shdr;
5899 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5900 }
5901
5902 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5903 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5904 "2984 Failed to read allocated resources "
5905 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5906 type,
5907 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5908 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5909 rc = -EIO;
5910 goto err_exit;
5911 }
5912 err_exit:
5913 lpfc_sli4_mbox_cmd_free(phba, mbox);
5914 return rc;
5915}
5916
8a9d2e80
JS
5917/**
5918 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5919 * @phba: pointer to lpfc hba data structure.
5920 *
5921 * This routine walks the list of els buffers that have been allocated and
5922 * repost them to the port by using SGL block post. This is needed after a
5923 * pci_function_reset/warm_start or start. It attempts to construct blocks
5924 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5925 * SGL block post mailbox commands to post them to the port. For single els
5926 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5927 * mailbox command for posting.
5928 *
5929 * Returns: 0 = success, non-zero failure.
5930 **/
5931static int
5932lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5933{
5934 struct lpfc_sglq *sglq_entry = NULL;
5935 struct lpfc_sglq *sglq_entry_next = NULL;
5936 struct lpfc_sglq *sglq_entry_first = NULL;
5937 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5938 int last_xritag = NO_XRI;
5939 LIST_HEAD(prep_sgl_list);
5940 LIST_HEAD(blck_sgl_list);
5941 LIST_HEAD(allc_sgl_list);
5942 LIST_HEAD(post_sgl_list);
5943 LIST_HEAD(free_sgl_list);
5944
5945 spin_lock(&phba->hbalock);
5946 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5947 spin_unlock(&phba->hbalock);
5948
5949 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5950 &allc_sgl_list, list) {
5951 list_del_init(&sglq_entry->list);
5952 block_cnt++;
5953 if ((last_xritag != NO_XRI) &&
5954 (sglq_entry->sli4_xritag != last_xritag + 1)) {
5955 /* a hole in xri block, form a sgl posting block */
5956 list_splice_init(&prep_sgl_list, &blck_sgl_list);
5957 post_cnt = block_cnt - 1;
5958 /* prepare list for next posting block */
5959 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5960 block_cnt = 1;
5961 } else {
5962 /* prepare list for next posting block */
5963 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5964 /* enough sgls for non-embed sgl mbox command */
5965 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5966 list_splice_init(&prep_sgl_list,
5967 &blck_sgl_list);
5968 post_cnt = block_cnt;
5969 block_cnt = 0;
5970 }
5971 }
5972 num_posted++;
5973
5974 /* keep track of last sgl's xritag */
5975 last_xritag = sglq_entry->sli4_xritag;
5976
5977 /* end of repost sgl list condition for els buffers */
5978 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5979 if (post_cnt == 0) {
5980 list_splice_init(&prep_sgl_list,
5981 &blck_sgl_list);
5982 post_cnt = block_cnt;
5983 } else if (block_cnt == 1) {
5984 status = lpfc_sli4_post_sgl(phba,
5985 sglq_entry->phys, 0,
5986 sglq_entry->sli4_xritag);
5987 if (!status) {
5988 /* successful, put sgl to posted list */
5989 list_add_tail(&sglq_entry->list,
5990 &post_sgl_list);
5991 } else {
5992 /* Failure, put sgl to free list */
5993 lpfc_printf_log(phba, KERN_WARNING,
5994 LOG_SLI,
5995 "3159 Failed to post els "
5996 "sgl, xritag:x%x\n",
5997 sglq_entry->sli4_xritag);
5998 list_add_tail(&sglq_entry->list,
5999 &free_sgl_list);
6000 spin_lock_irq(&phba->hbalock);
6001 phba->sli4_hba.els_xri_cnt--;
6002 spin_unlock_irq(&phba->hbalock);
6003 }
6004 }
6005 }
6006
6007 /* continue until a nembed page worth of sgls */
6008 if (post_cnt == 0)
6009 continue;
6010
6011 /* post the els buffer list sgls as a block */
6012 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6013 post_cnt);
6014
6015 if (!status) {
6016 /* success, put sgl list to posted sgl list */
6017 list_splice_init(&blck_sgl_list, &post_sgl_list);
6018 } else {
6019 /* Failure, put sgl list to free sgl list */
6020 sglq_entry_first = list_first_entry(&blck_sgl_list,
6021 struct lpfc_sglq,
6022 list);
6023 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6024 "3160 Failed to post els sgl-list, "
6025 "xritag:x%x-x%x\n",
6026 sglq_entry_first->sli4_xritag,
6027 (sglq_entry_first->sli4_xritag +
6028 post_cnt - 1));
6029 list_splice_init(&blck_sgl_list, &free_sgl_list);
6030 spin_lock_irq(&phba->hbalock);
6031 phba->sli4_hba.els_xri_cnt -= post_cnt;
6032 spin_unlock_irq(&phba->hbalock);
6033 }
6034
6035 /* don't reset xirtag due to hole in xri block */
6036 if (block_cnt == 0)
6037 last_xritag = NO_XRI;
6038
6039 /* reset els sgl post count for next round of posting */
6040 post_cnt = 0;
6041 }
6042
6043 /* free the els sgls failed to post */
6044 lpfc_free_sgl_list(phba, &free_sgl_list);
6045
6046 /* push els sgls posted to the availble list */
6047 if (!list_empty(&post_sgl_list)) {
6048 spin_lock(&phba->hbalock);
6049 list_splice_init(&post_sgl_list,
6050 &phba->sli4_hba.lpfc_sgl_list);
6051 spin_unlock(&phba->hbalock);
6052 } else {
6053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6054 "3161 Failure to post els sgl to port.\n");
6055 return -EIO;
6056 }
6057 return 0;
6058}
6059
da0436e9
JS
6060/**
6061 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6062 * @phba: Pointer to HBA context object.
6063 *
6064 * This function is the main SLI4 device intialization PCI function. This
6065 * function is called by the HBA intialization code, HBA reset code and
6066 * HBA error attention handler code. Caller is not required to hold any
6067 * locks.
6068 **/
6069int
6070lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6071{
6072 int rc;
6073 LPFC_MBOXQ_t *mboxq;
6074 struct lpfc_mqe *mqe;
6075 uint8_t *vpd;
6076 uint32_t vpd_size;
6077 uint32_t ftr_rsp = 0;
6078 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6079 struct lpfc_vport *vport = phba->pport;
6080 struct lpfc_dmabuf *mp;
6081
6082 /* Perform a PCI function reset to start from clean */
6083 rc = lpfc_pci_function_reset(phba);
6084 if (unlikely(rc))
6085 return -ENODEV;
6086
6087 /* Check the HBA Host Status Register for readyness */
6088 rc = lpfc_sli4_post_status_check(phba);
6089 if (unlikely(rc))
6090 return -ENODEV;
6091 else {
6092 spin_lock_irq(&phba->hbalock);
6093 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6094 spin_unlock_irq(&phba->hbalock);
6095 }
6096
6097 /*
6098 * Allocate a single mailbox container for initializing the
6099 * port.
6100 */
6101 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6102 if (!mboxq)
6103 return -ENOMEM;
6104
da0436e9 6105 /* Issue READ_REV to collect vpd and FW information. */
49198b37 6106 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
6107 vpd = kzalloc(vpd_size, GFP_KERNEL);
6108 if (!vpd) {
6109 rc = -ENOMEM;
6110 goto out_free_mbox;
6111 }
6112
6113 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
6114 if (unlikely(rc)) {
6115 kfree(vpd);
6116 goto out_free_mbox;
6117 }
da0436e9 6118 mqe = &mboxq->u.mqe;
f1126688
JS
6119 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6120 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
76a95d75
JS
6121 phba->hba_flag |= HBA_FCOE_MODE;
6122 else
6123 phba->hba_flag &= ~HBA_FCOE_MODE;
45ed1190
JS
6124
6125 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6126 LPFC_DCBX_CEE_MODE)
6127 phba->hba_flag |= HBA_FIP_SUPPORT;
6128 else
6129 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6130
4f2e66c6
JS
6131 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6132
c31098ce 6133 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
6134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6135 "0376 READ_REV Error. SLI Level %d "
6136 "FCoE enabled %d\n",
76a95d75 6137 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 6138 rc = -EIO;
76a95d75
JS
6139 kfree(vpd);
6140 goto out_free_mbox;
da0436e9 6141 }
cd1c8301 6142
ff78d8f9
JS
6143 /*
6144 * Continue initialization with default values even if driver failed
6145 * to read FCoE param config regions, only read parameters if the
6146 * board is FCoE
6147 */
6148 if (phba->hba_flag & HBA_FCOE_MODE &&
6149 lpfc_sli4_read_fcoe_params(phba))
6150 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6151 "2570 Failed to read FCoE parameters\n");
6152
cd1c8301
JS
6153 /*
6154 * Retrieve sli4 device physical port name, failure of doing it
6155 * is considered as non-fatal.
6156 */
6157 rc = lpfc_sli4_retrieve_pport_name(phba);
6158 if (!rc)
6159 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6160 "3080 Successful retrieving SLI4 device "
6161 "physical port name: %s.\n", phba->Port);
6162
da0436e9
JS
6163 /*
6164 * Evaluate the read rev and vpd data. Populate the driver
6165 * state with the results. If this routine fails, the failure
6166 * is not fatal as the driver will use generic values.
6167 */
6168 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6169 if (unlikely(!rc)) {
6170 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6171 "0377 Error %d parsing vpd. "
6172 "Using defaults.\n", rc);
6173 rc = 0;
6174 }
76a95d75 6175 kfree(vpd);
da0436e9 6176
f1126688
JS
6177 /* Save information as VPD data */
6178 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6179 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6180 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6181 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6182 &mqe->un.read_rev);
6183 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6184 &mqe->un.read_rev);
6185 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6186 &mqe->un.read_rev);
6187 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6188 &mqe->un.read_rev);
6189 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6190 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6191 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6192 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6193 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6194 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6195 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6196 "(%d):0380 READ_REV Status x%x "
6197 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6198 mboxq->vport ? mboxq->vport->vpi : 0,
6199 bf_get(lpfc_mqe_status, mqe),
6200 phba->vpd.rev.opFwName,
6201 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6202 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9
JS
6203
6204 /*
6205 * Discover the port's supported feature set and match it against the
6206 * hosts requests.
6207 */
6208 lpfc_request_features(phba, mboxq);
6209 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6210 if (unlikely(rc)) {
6211 rc = -EIO;
76a95d75 6212 goto out_free_mbox;
da0436e9
JS
6213 }
6214
6215 /*
6216 * The port must support FCP initiator mode as this is the
6217 * only mode running in the host.
6218 */
6219 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6220 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6221 "0378 No support for fcpi mode.\n");
6222 ftr_rsp++;
6223 }
fedd3b7b
JS
6224 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6225 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6226 else
6227 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
da0436e9
JS
6228 /*
6229 * If the port cannot support the host's requested features
6230 * then turn off the global config parameters to disable the
6231 * feature in the driver. This is not a fatal error.
6232 */
bf08611b
JS
6233 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6234 if (phba->cfg_enable_bg) {
6235 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6236 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6237 else
6238 ftr_rsp++;
6239 }
da0436e9
JS
6240
6241 if (phba->max_vpi && phba->cfg_enable_npiv &&
6242 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6243 ftr_rsp++;
6244
6245 if (ftr_rsp) {
6246 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6247 "0379 Feature Mismatch Data: x%08x %08x "
6248 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6249 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6250 phba->cfg_enable_npiv, phba->max_vpi);
6251 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6252 phba->cfg_enable_bg = 0;
6253 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6254 phba->cfg_enable_npiv = 0;
6255 }
6256
6257 /* These SLI3 features are assumed in SLI4 */
6258 spin_lock_irq(&phba->hbalock);
6259 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6260 spin_unlock_irq(&phba->hbalock);
6261
6d368e53
JS
6262 /*
6263 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6264 * calls depends on these resources to complete port setup.
6265 */
6266 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6267 if (rc) {
6268 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6269 "2920 Failed to alloc Resource IDs "
6270 "rc = x%x\n", rc);
6271 goto out_free_mbox;
6272 }
6273
da0436e9 6274 /* Read the port's service parameters. */
9f1177a3
JS
6275 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6276 if (rc) {
6277 phba->link_state = LPFC_HBA_ERROR;
6278 rc = -ENOMEM;
76a95d75 6279 goto out_free_mbox;
9f1177a3
JS
6280 }
6281
da0436e9
JS
6282 mboxq->vport = vport;
6283 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6284 mp = (struct lpfc_dmabuf *) mboxq->context1;
6285 if (rc == MBX_SUCCESS) {
6286 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6287 rc = 0;
6288 }
6289
6290 /*
6291 * This memory was allocated by the lpfc_read_sparam routine. Release
6292 * it to the mbuf pool.
6293 */
6294 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6295 kfree(mp);
6296 mboxq->context1 = NULL;
6297 if (unlikely(rc)) {
6298 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6299 "0382 READ_SPARAM command failed "
6300 "status %d, mbxStatus x%x\n",
6301 rc, bf_get(lpfc_mqe_status, mqe));
6302 phba->link_state = LPFC_HBA_ERROR;
6303 rc = -EIO;
76a95d75 6304 goto out_free_mbox;
da0436e9
JS
6305 }
6306
0558056c 6307 lpfc_update_vport_wwn(vport);
da0436e9
JS
6308
6309 /* Update the fc_host data structures with new wwn. */
6310 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6311 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6312
8a9d2e80
JS
6313 /* update host els and scsi xri-sgl sizes and mappings */
6314 rc = lpfc_sli4_xri_sgl_update(phba);
6315 if (unlikely(rc)) {
6316 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6317 "1400 Failed to update xri-sgl size and "
6318 "mapping: %d\n", rc);
6319 goto out_free_mbox;
da0436e9
JS
6320 }
6321
8a9d2e80
JS
6322 /* register the els sgl pool to the port */
6323 rc = lpfc_sli4_repost_els_sgl_list(phba);
6324 if (unlikely(rc)) {
6325 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6326 "0582 Error %d during els sgl post "
6327 "operation\n", rc);
6328 rc = -ENODEV;
6329 goto out_free_mbox;
6330 }
6331
6332 /* register the allocated scsi sgl pool to the port */
da0436e9
JS
6333 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6334 if (unlikely(rc)) {
6d368e53 6335 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
6336 "0383 Error %d during scsi sgl post "
6337 "operation\n", rc);
da0436e9
JS
6338 /* Some Scsi buffers were moved to the abort scsi list */
6339 /* A pci function reset will repost them */
6340 rc = -ENODEV;
76a95d75 6341 goto out_free_mbox;
da0436e9
JS
6342 }
6343
6344 /* Post the rpi header region to the device. */
6345 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6346 if (unlikely(rc)) {
6347 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6348 "0393 Error %d during rpi post operation\n",
6349 rc);
6350 rc = -ENODEV;
76a95d75 6351 goto out_free_mbox;
da0436e9 6352 }
97f2ecf1 6353 lpfc_sli4_node_prep(phba);
da0436e9 6354
5350d872
JS
6355 /* Create all the SLI4 queues */
6356 rc = lpfc_sli4_queue_create(phba);
6357 if (rc) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "3089 Failed to allocate queues\n");
6360 rc = -ENODEV;
6361 goto out_stop_timers;
6362 }
da0436e9
JS
6363 /* Set up all the queues to the device */
6364 rc = lpfc_sli4_queue_setup(phba);
6365 if (unlikely(rc)) {
6366 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6367 "0381 Error %d during queue setup.\n ", rc);
5350d872 6368 goto out_destroy_queue;
da0436e9
JS
6369 }
6370
6371 /* Arm the CQs and then EQs on device */
6372 lpfc_sli4_arm_cqeq_intr(phba);
6373
6374 /* Indicate device interrupt mode */
6375 phba->sli4_hba.intr_enable = 1;
6376
6377 /* Allow asynchronous mailbox command to go through */
6378 spin_lock_irq(&phba->hbalock);
6379 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6380 spin_unlock_irq(&phba->hbalock);
6381
6382 /* Post receive buffers to the device */
6383 lpfc_sli4_rb_setup(phba);
6384
fc2b989b
JS
6385 /* Reset HBA FCF states after HBA reset */
6386 phba->fcf.fcf_flag = 0;
6387 phba->fcf.current_rec.flag = 0;
6388
da0436e9 6389 /* Start the ELS watchdog timer */
8fa38513
JS
6390 mod_timer(&vport->els_tmofunc,
6391 jiffies + HZ * (phba->fc_ratov * 2));
da0436e9
JS
6392
6393 /* Start heart beat timer */
6394 mod_timer(&phba->hb_tmofunc,
6395 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6396 phba->hb_outstanding = 0;
6397 phba->last_completion_time = jiffies;
6398
6399 /* Start error attention (ERATT) polling timer */
6400 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6401
75baf696
JS
6402 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6403 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6404 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6405 if (!rc) {
6406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6407 "2829 This device supports "
6408 "Advanced Error Reporting (AER)\n");
6409 spin_lock_irq(&phba->hbalock);
6410 phba->hba_flag |= HBA_AER_ENABLED;
6411 spin_unlock_irq(&phba->hbalock);
6412 } else {
6413 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6414 "2830 This device does not support "
6415 "Advanced Error Reporting (AER)\n");
6416 phba->cfg_aer_support = 0;
6417 }
0a96e975 6418 rc = 0;
75baf696
JS
6419 }
6420
76a95d75
JS
6421 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6422 /*
6423 * The FC Port needs to register FCFI (index 0)
6424 */
6425 lpfc_reg_fcfi(phba, mboxq);
6426 mboxq->vport = phba->pport;
6427 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9589b062 6428 if (rc != MBX_SUCCESS)
76a95d75 6429 goto out_unset_queue;
9589b062
JS
6430 rc = 0;
6431 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6432 &mboxq->u.mqe.un.reg_fcfi);
026abb87
JS
6433
6434 /* Check if the port is configured to be disabled */
6435 lpfc_sli_read_link_ste(phba);
76a95d75 6436 }
026abb87 6437
da0436e9
JS
6438 /*
6439 * The port is ready, set the host's link state to LINK_DOWN
6440 * in preparation for link interrupts.
6441 */
da0436e9
JS
6442 spin_lock_irq(&phba->hbalock);
6443 phba->link_state = LPFC_LINK_DOWN;
6444 spin_unlock_irq(&phba->hbalock);
026abb87
JS
6445 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6446 (phba->hba_flag & LINK_DISABLED)) {
6447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6448 "3103 Adapter Link is disabled.\n");
6449 lpfc_down_link(phba, mboxq);
6450 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6451 if (rc != MBX_SUCCESS) {
6452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6453 "3104 Adapter failed to issue "
6454 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6455 goto out_unset_queue;
6456 }
6457 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
6458 /* don't perform init_link on SLI4 FC port loopback test */
6459 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6460 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6461 if (rc)
6462 goto out_unset_queue;
6463 }
5350d872
JS
6464 }
6465 mempool_free(mboxq, phba->mbox_mem_pool);
6466 return rc;
76a95d75 6467out_unset_queue:
da0436e9 6468 /* Unset all the queues set up in this routine when error out */
5350d872
JS
6469 lpfc_sli4_queue_unset(phba);
6470out_destroy_queue:
6471 lpfc_sli4_queue_destroy(phba);
da0436e9 6472out_stop_timers:
5350d872 6473 lpfc_stop_hba_timers(phba);
da0436e9
JS
6474out_free_mbox:
6475 mempool_free(mboxq, phba->mbox_mem_pool);
6476 return rc;
6477}
6478
6479/**
6480 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6481 * @ptr: context object - pointer to hba structure.
6482 *
6483 * This is the callback function for mailbox timer. The mailbox
6484 * timer is armed when a new mailbox command is issued and the timer
6485 * is deleted when the mailbox complete. The function is called by
6486 * the kernel timer code when a mailbox does not complete within
6487 * expected time. This function wakes up the worker thread to
6488 * process the mailbox timeout and returns. All the processing is
6489 * done by the worker thread function lpfc_mbox_timeout_handler.
6490 **/
6491void
6492lpfc_mbox_timeout(unsigned long ptr)
6493{
6494 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6495 unsigned long iflag;
6496 uint32_t tmo_posted;
6497
6498 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6499 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6500 if (!tmo_posted)
6501 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6502 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6503
6504 if (!tmo_posted)
6505 lpfc_worker_wake_up(phba);
6506 return;
6507}
6508
6509
6510/**
6511 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6512 * @phba: Pointer to HBA context object.
6513 *
6514 * This function is called from worker thread when a mailbox command times out.
6515 * The caller is not required to hold any locks. This function will reset the
6516 * HBA and recover all the pending commands.
6517 **/
6518void
6519lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6520{
6521 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
04c68496 6522 MAILBOX_t *mb = &pmbox->u.mb;
da0436e9
JS
6523 struct lpfc_sli *psli = &phba->sli;
6524 struct lpfc_sli_ring *pring;
6525
6526 /* Check the pmbox pointer first. There is a race condition
6527 * between the mbox timeout handler getting executed in the
6528 * worklist and the mailbox actually completing. When this
6529 * race condition occurs, the mbox_active will be NULL.
6530 */
6531 spin_lock_irq(&phba->hbalock);
6532 if (pmbox == NULL) {
6533 lpfc_printf_log(phba, KERN_WARNING,
6534 LOG_MBOX | LOG_SLI,
6535 "0353 Active Mailbox cleared - mailbox timeout "
6536 "exiting\n");
6537 spin_unlock_irq(&phba->hbalock);
6538 return;
6539 }
6540
6541 /* Mbox cmd <mbxCommand> timeout */
6542 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6543 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6544 mb->mbxCommand,
6545 phba->pport->port_state,
6546 phba->sli.sli_flag,
6547 phba->sli.mbox_active);
6548 spin_unlock_irq(&phba->hbalock);
6549
6550 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6551 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 6552 * it to fail all outstanding SCSI IO.
da0436e9
JS
6553 */
6554 spin_lock_irq(&phba->pport->work_port_lock);
6555 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6556 spin_unlock_irq(&phba->pport->work_port_lock);
6557 spin_lock_irq(&phba->hbalock);
6558 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 6559 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
6560 spin_unlock_irq(&phba->hbalock);
6561
6562 pring = &psli->ring[psli->fcp_ring];
6563 lpfc_sli_abort_iocb_ring(phba, pring);
6564
6565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6566 "0345 Resetting board due to mailbox timeout\n");
6567
6568 /* Reset the HBA device */
6569 lpfc_reset_hba(phba);
6570}
6571
6572/**
6573 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6574 * @phba: Pointer to HBA context object.
6575 * @pmbox: Pointer to mailbox object.
6576 * @flag: Flag indicating how the mailbox need to be processed.
6577 *
6578 * This function is called by discovery code and HBA management code
6579 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6580 * function gets the hbalock to protect the data structures.
6581 * The mailbox command can be submitted in polling mode, in which case
6582 * this function will wait in a polling loop for the completion of the
6583 * mailbox.
6584 * If the mailbox is submitted in no_wait mode (not polling) the
6585 * function will submit the command and returns immediately without waiting
6586 * for the mailbox completion. The no_wait is supported only when HBA
6587 * is in SLI2/SLI3 mode - interrupts are enabled.
6588 * The SLI interface allows only one mailbox pending at a time. If the
6589 * mailbox is issued in polling mode and there is already a mailbox
6590 * pending, then the function will return an error. If the mailbox is issued
6591 * in NO_WAIT mode and there is a mailbox pending already, the function
6592 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6593 * The sli layer owns the mailbox object until the completion of mailbox
6594 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6595 * return codes the caller owns the mailbox command after the return of
6596 * the function.
e59058c4 6597 **/
3772a991
JS
6598static int
6599lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6600 uint32_t flag)
dea3101e 6601{
dea3101e 6602 MAILBOX_t *mb;
2e0fef85 6603 struct lpfc_sli *psli = &phba->sli;
dea3101e 6604 uint32_t status, evtctr;
9940b97b 6605 uint32_t ha_copy, hc_copy;
dea3101e 6606 int i;
09372820 6607 unsigned long timeout;
dea3101e 6608 unsigned long drvr_flag = 0;
34b02dcd 6609 uint32_t word0, ldata;
dea3101e 6610 void __iomem *to_slim;
58da1ffb
JS
6611 int processing_queue = 0;
6612
6613 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6614 if (!pmbox) {
8568a4d2 6615 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 6616 /* processing mbox queue from intr_handler */
3772a991
JS
6617 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6618 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6619 return MBX_SUCCESS;
6620 }
58da1ffb 6621 processing_queue = 1;
58da1ffb
JS
6622 pmbox = lpfc_mbox_get(phba);
6623 if (!pmbox) {
6624 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6625 return MBX_SUCCESS;
6626 }
6627 }
dea3101e 6628
ed957684 6629 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 6630 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 6631 if(!pmbox->vport) {
58da1ffb 6632 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 6633 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 6634 LOG_MBOX | LOG_VPORT,
e8b62011 6635 "1806 Mbox x%x failed. No vport\n",
3772a991 6636 pmbox->u.mb.mbxCommand);
ed957684 6637 dump_stack();
58da1ffb 6638 goto out_not_finished;
ed957684
JS
6639 }
6640 }
6641
8d63f375 6642 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
6643 if (unlikely(pci_channel_offline(phba->pcidev))) {
6644 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6645 goto out_not_finished;
6646 }
8d63f375 6647
a257bf90
JS
6648 /* If HBA has a deferred error attention, fail the iocb. */
6649 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6650 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6651 goto out_not_finished;
6652 }
6653
dea3101e 6654 psli = &phba->sli;
92d7f7b0 6655
3772a991 6656 mb = &pmbox->u.mb;
dea3101e 6657 status = MBX_SUCCESS;
6658
2e0fef85
JS
6659 if (phba->link_state == LPFC_HBA_ERROR) {
6660 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
6661
6662 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6663 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6664 "(%d):0311 Mailbox command x%x cannot "
6665 "issue Data: x%x x%x\n",
6666 pmbox->vport ? pmbox->vport->vpi : 0,
6667 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 6668 goto out_not_finished;
41415862
JW
6669 }
6670
9940b97b
JS
6671 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6672 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6673 !(hc_copy & HC_MBINT_ENA)) {
6674 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6675 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
6676 "(%d):2528 Mailbox command x%x cannot "
6677 "issue Data: x%x x%x\n",
6678 pmbox->vport ? pmbox->vport->vpi : 0,
6679 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
6680 goto out_not_finished;
6681 }
9290831f
JS
6682 }
6683
dea3101e 6684 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6685 /* Polling for a mbox command when another one is already active
6686 * is not allowed in SLI. Also, the driver must have established
6687 * SLI2 mode to queue and process multiple mbox commands.
6688 */
6689
6690 if (flag & MBX_POLL) {
2e0fef85 6691 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6692
6693 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6694 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6695 "(%d):2529 Mailbox command x%x "
6696 "cannot issue Data: x%x x%x\n",
6697 pmbox->vport ? pmbox->vport->vpi : 0,
6698 pmbox->u.mb.mbxCommand,
6699 psli->sli_flag, flag);
58da1ffb 6700 goto out_not_finished;
dea3101e 6701 }
6702
3772a991 6703 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 6704 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6705 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6706 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6707 "(%d):2530 Mailbox command x%x "
6708 "cannot issue Data: x%x x%x\n",
6709 pmbox->vport ? pmbox->vport->vpi : 0,
6710 pmbox->u.mb.mbxCommand,
6711 psli->sli_flag, flag);
58da1ffb 6712 goto out_not_finished;
dea3101e 6713 }
6714
dea3101e 6715 /* Another mailbox command is still being processed, queue this
6716 * command to be processed later.
6717 */
6718 lpfc_mbox_put(phba, pmbox);
6719
6720 /* Mbox cmd issue - BUSY */
ed957684 6721 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6722 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 6723 "x%x x%x x%x x%x\n",
92d7f7b0
JS
6724 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6725 mb->mbxCommand, phba->pport->port_state,
6726 psli->sli_flag, flag);
dea3101e 6727
6728 psli->slistat.mbox_busy++;
2e0fef85 6729 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6730
858c9f6c
JS
6731 if (pmbox->vport) {
6732 lpfc_debugfs_disc_trc(pmbox->vport,
6733 LPFC_DISC_TRC_MBOX_VPORT,
6734 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
6735 (uint32_t)mb->mbxCommand,
6736 mb->un.varWords[0], mb->un.varWords[1]);
6737 }
6738 else {
6739 lpfc_debugfs_disc_trc(phba->pport,
6740 LPFC_DISC_TRC_MBOX,
6741 "MBOX Bsy: cmd:x%x mb:x%x x%x",
6742 (uint32_t)mb->mbxCommand,
6743 mb->un.varWords[0], mb->un.varWords[1]);
6744 }
6745
2e0fef85 6746 return MBX_BUSY;
dea3101e 6747 }
6748
dea3101e 6749 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6750
6751 /* If we are not polling, we MUST be in SLI2 mode */
6752 if (flag != MBX_POLL) {
3772a991 6753 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
41415862 6754 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 6755 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6756 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6757 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6758 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6759 "(%d):2531 Mailbox command x%x "
6760 "cannot issue Data: x%x x%x\n",
6761 pmbox->vport ? pmbox->vport->vpi : 0,
6762 pmbox->u.mb.mbxCommand,
6763 psli->sli_flag, flag);
58da1ffb 6764 goto out_not_finished;
dea3101e 6765 }
6766 /* timeout active mbox command */
a309a6b6 6767 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 6768 (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
dea3101e 6769 }
6770
6771 /* Mailbox cmd <cmd> issue */
ed957684 6772 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6773 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 6774 "x%x\n",
e8b62011 6775 pmbox->vport ? pmbox->vport->vpi : 0,
92d7f7b0
JS
6776 mb->mbxCommand, phba->pport->port_state,
6777 psli->sli_flag, flag);
dea3101e 6778
858c9f6c
JS
6779 if (mb->mbxCommand != MBX_HEARTBEAT) {
6780 if (pmbox->vport) {
6781 lpfc_debugfs_disc_trc(pmbox->vport,
6782 LPFC_DISC_TRC_MBOX_VPORT,
6783 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6784 (uint32_t)mb->mbxCommand,
6785 mb->un.varWords[0], mb->un.varWords[1]);
6786 }
6787 else {
6788 lpfc_debugfs_disc_trc(phba->pport,
6789 LPFC_DISC_TRC_MBOX,
6790 "MBOX Send: cmd:x%x mb:x%x x%x",
6791 (uint32_t)mb->mbxCommand,
6792 mb->un.varWords[0], mb->un.varWords[1]);
6793 }
6794 }
6795
dea3101e 6796 psli->slistat.mbox_cmd++;
6797 evtctr = psli->slistat.mbox_event;
6798
6799 /* next set own bit for the adapter and copy over command word */
6800 mb->mbxOwner = OWN_CHIP;
6801
3772a991 6802 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
6803 /* Populate mbox extension offset word. */
6804 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6805 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6806 = (uint8_t *)phba->mbox_ext
6807 - (uint8_t *)phba->mbox;
6808 }
6809
6810 /* Copy the mailbox extension data */
6811 if (pmbox->in_ext_byte_len && pmbox->context2) {
6812 lpfc_sli_pcimem_bcopy(pmbox->context2,
6813 (uint8_t *)phba->mbox_ext,
6814 pmbox->in_ext_byte_len);
6815 }
6816 /* Copy command data to host SLIM area */
34b02dcd 6817 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6818 } else {
7a470277
JS
6819 /* Populate mbox extension offset word. */
6820 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6821 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6822 = MAILBOX_HBA_EXT_OFFSET;
6823
6824 /* Copy the mailbox extension data */
6825 if (pmbox->in_ext_byte_len && pmbox->context2) {
6826 lpfc_memcpy_to_slim(phba->MBslimaddr +
6827 MAILBOX_HBA_EXT_OFFSET,
6828 pmbox->context2, pmbox->in_ext_byte_len);
6829
6830 }
9290831f 6831 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6832 /* copy command data into host mbox for cmpl */
34b02dcd 6833 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6834 }
6835
6836 /* First copy mbox command data to HBA SLIM, skip past first
6837 word */
6838 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6839 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
6840 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6841
6842 /* Next copy over first word, with mbxOwner set */
34b02dcd 6843 ldata = *((uint32_t *)mb);
dea3101e 6844 to_slim = phba->MBslimaddr;
6845 writel(ldata, to_slim);
6846 readl(to_slim); /* flush */
6847
6848 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6849 /* switch over to host mailbox */
3772a991 6850 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 6851 }
6852 }
6853
6854 wmb();
dea3101e 6855
6856 switch (flag) {
6857 case MBX_NOWAIT:
09372820 6858 /* Set up reference to mailbox command */
dea3101e 6859 psli->mbox_active = pmbox;
09372820
JS
6860 /* Interrupt board to do it */
6861 writel(CA_MBATT, phba->CAregaddr);
6862 readl(phba->CAregaddr); /* flush */
6863 /* Don't wait for it to finish, just return */
dea3101e 6864 break;
6865
6866 case MBX_POLL:
09372820 6867 /* Set up null reference to mailbox command */
dea3101e 6868 psli->mbox_active = NULL;
09372820
JS
6869 /* Interrupt board to do it */
6870 writel(CA_MBATT, phba->CAregaddr);
6871 readl(phba->CAregaddr); /* flush */
6872
3772a991 6873 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6874 /* First read mbox status word */
34b02dcd 6875 word0 = *((uint32_t *)phba->mbox);
dea3101e 6876 word0 = le32_to_cpu(word0);
6877 } else {
6878 /* First read mbox status word */
9940b97b
JS
6879 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6880 spin_unlock_irqrestore(&phba->hbalock,
6881 drvr_flag);
6882 goto out_not_finished;
6883 }
dea3101e 6884 }
6885
6886 /* Read the HBA Host Attention Register */
9940b97b
JS
6887 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6888 spin_unlock_irqrestore(&phba->hbalock,
6889 drvr_flag);
6890 goto out_not_finished;
6891 }
a183a15f
JS
6892 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6893 1000) + jiffies;
09372820 6894 i = 0;
dea3101e 6895 /* Wait for command to complete */
41415862
JW
6896 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6897 (!(ha_copy & HA_MBATT) &&
2e0fef85 6898 (phba->link_state > LPFC_WARM_START))) {
09372820 6899 if (time_after(jiffies, timeout)) {
dea3101e 6900 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6901 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 6902 drvr_flag);
58da1ffb 6903 goto out_not_finished;
dea3101e 6904 }
6905
6906 /* Check if we took a mbox interrupt while we were
6907 polling */
6908 if (((word0 & OWN_CHIP) != OWN_CHIP)
6909 && (evtctr != psli->slistat.mbox_event))
6910 break;
6911
09372820
JS
6912 if (i++ > 10) {
6913 spin_unlock_irqrestore(&phba->hbalock,
6914 drvr_flag);
6915 msleep(1);
6916 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6917 }
dea3101e 6918
3772a991 6919 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6920 /* First copy command data */
34b02dcd 6921 word0 = *((uint32_t *)phba->mbox);
dea3101e 6922 word0 = le32_to_cpu(word0);
6923 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6924 MAILBOX_t *slimmb;
34b02dcd 6925 uint32_t slimword0;
dea3101e 6926 /* Check real SLIM for any errors */
6927 slimword0 = readl(phba->MBslimaddr);
6928 slimmb = (MAILBOX_t *) & slimword0;
6929 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6930 && slimmb->mbxStatus) {
6931 psli->sli_flag &=
3772a991 6932 ~LPFC_SLI_ACTIVE;
dea3101e 6933 word0 = slimword0;
6934 }
6935 }
6936 } else {
6937 /* First copy command data */
6938 word0 = readl(phba->MBslimaddr);
6939 }
6940 /* Read the HBA Host Attention Register */
9940b97b
JS
6941 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6942 spin_unlock_irqrestore(&phba->hbalock,
6943 drvr_flag);
6944 goto out_not_finished;
6945 }
dea3101e 6946 }
6947
3772a991 6948 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6949 /* copy results back to user */
34b02dcd 6950 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
7a470277
JS
6951 /* Copy the mailbox extension data */
6952 if (pmbox->out_ext_byte_len && pmbox->context2) {
6953 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6954 pmbox->context2,
6955 pmbox->out_ext_byte_len);
6956 }
dea3101e 6957 } else {
6958 /* First copy command data */
6959 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
6960 MAILBOX_CMD_SIZE);
7a470277
JS
6961 /* Copy the mailbox extension data */
6962 if (pmbox->out_ext_byte_len && pmbox->context2) {
6963 lpfc_memcpy_from_slim(pmbox->context2,
6964 phba->MBslimaddr +
6965 MAILBOX_HBA_EXT_OFFSET,
6966 pmbox->out_ext_byte_len);
dea3101e 6967 }
6968 }
6969
6970 writel(HA_MBATT, phba->HAregaddr);
6971 readl(phba->HAregaddr); /* flush */
6972
6973 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6974 status = mb->mbxStatus;
6975 }
6976
2e0fef85
JS
6977 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6978 return status;
58da1ffb
JS
6979
6980out_not_finished:
6981 if (processing_queue) {
da0436e9 6982 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
6983 lpfc_mbox_cmpl_put(phba, pmbox);
6984 }
6985 return MBX_NOT_FINISHED;
dea3101e 6986}
6987
f1126688
JS
6988/**
6989 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
6990 * @phba: Pointer to HBA context object.
6991 *
6992 * The function blocks the posting of SLI4 asynchronous mailbox commands from
6993 * the driver internal pending mailbox queue. It will then try to wait out the
6994 * possible outstanding mailbox command before return.
6995 *
6996 * Returns:
6997 * 0 - the outstanding mailbox command completed; otherwise, the wait for
6998 * the outstanding mailbox command timed out.
6999 **/
7000static int
7001lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7002{
7003 struct lpfc_sli *psli = &phba->sli;
f1126688 7004 int rc = 0;
a183a15f 7005 unsigned long timeout = 0;
f1126688
JS
7006
7007 /* Mark the asynchronous mailbox command posting as blocked */
7008 spin_lock_irq(&phba->hbalock);
7009 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
7010 /* Determine how long we might wait for the active mailbox
7011 * command to be gracefully completed by firmware.
7012 */
a183a15f
JS
7013 if (phba->sli.mbox_active)
7014 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7015 phba->sli.mbox_active) *
7016 1000) + jiffies;
7017 spin_unlock_irq(&phba->hbalock);
7018
f1126688
JS
7019 /* Wait for the outstnading mailbox command to complete */
7020 while (phba->sli.mbox_active) {
7021 /* Check active mailbox complete status every 2ms */
7022 msleep(2);
7023 if (time_after(jiffies, timeout)) {
7024 /* Timeout, marked the outstanding cmd not complete */
7025 rc = 1;
7026 break;
7027 }
7028 }
7029
7030 /* Can not cleanly block async mailbox command, fails it */
7031 if (rc) {
7032 spin_lock_irq(&phba->hbalock);
7033 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7034 spin_unlock_irq(&phba->hbalock);
7035 }
7036 return rc;
7037}
7038
7039/**
7040 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7041 * @phba: Pointer to HBA context object.
7042 *
7043 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7044 * commands from the driver internal pending mailbox queue. It makes sure
7045 * that there is no outstanding mailbox command before resuming posting
7046 * asynchronous mailbox commands. If, for any reason, there is outstanding
7047 * mailbox command, it will try to wait it out before resuming asynchronous
7048 * mailbox command posting.
7049 **/
7050static void
7051lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7052{
7053 struct lpfc_sli *psli = &phba->sli;
7054
7055 spin_lock_irq(&phba->hbalock);
7056 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7057 /* Asynchronous mailbox posting is not blocked, do nothing */
7058 spin_unlock_irq(&phba->hbalock);
7059 return;
7060 }
7061
7062 /* Outstanding synchronous mailbox command is guaranteed to be done,
7063 * successful or timeout, after timing-out the outstanding mailbox
7064 * command shall always be removed, so just unblock posting async
7065 * mailbox command and resume
7066 */
7067 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7068 spin_unlock_irq(&phba->hbalock);
7069
7070 /* wake up worker thread to post asynchronlous mailbox command */
7071 lpfc_worker_wake_up(phba);
7072}
7073
2d843edc
JS
7074/**
7075 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7076 * @phba: Pointer to HBA context object.
7077 * @mboxq: Pointer to mailbox object.
7078 *
7079 * The function waits for the bootstrap mailbox register ready bit from
7080 * port for twice the regular mailbox command timeout value.
7081 *
7082 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7083 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7084 **/
7085static int
7086lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7087{
7088 uint32_t db_ready;
7089 unsigned long timeout;
7090 struct lpfc_register bmbx_reg;
7091
7092 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7093 * 1000) + jiffies;
7094
7095 do {
7096 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7097 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7098 if (!db_ready)
7099 msleep(2);
7100
7101 if (time_after(jiffies, timeout))
7102 return MBXERR_ERROR;
7103 } while (!db_ready);
7104
7105 return 0;
7106}
7107
da0436e9
JS
7108/**
7109 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7110 * @phba: Pointer to HBA context object.
7111 * @mboxq: Pointer to mailbox object.
7112 *
7113 * The function posts a mailbox to the port. The mailbox is expected
7114 * to be comletely filled in and ready for the port to operate on it.
7115 * This routine executes a synchronous completion operation on the
7116 * mailbox by polling for its completion.
7117 *
7118 * The caller must not be holding any locks when calling this routine.
7119 *
7120 * Returns:
7121 * MBX_SUCCESS - mailbox posted successfully
7122 * Any of the MBX error values.
7123 **/
7124static int
7125lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7126{
7127 int rc = MBX_SUCCESS;
7128 unsigned long iflag;
da0436e9
JS
7129 uint32_t mcqe_status;
7130 uint32_t mbx_cmnd;
da0436e9
JS
7131 struct lpfc_sli *psli = &phba->sli;
7132 struct lpfc_mqe *mb = &mboxq->u.mqe;
7133 struct lpfc_bmbx_create *mbox_rgn;
7134 struct dma_address *dma_address;
da0436e9
JS
7135
7136 /*
7137 * Only one mailbox can be active to the bootstrap mailbox region
7138 * at a time and there is no queueing provided.
7139 */
7140 spin_lock_irqsave(&phba->hbalock, iflag);
7141 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7142 spin_unlock_irqrestore(&phba->hbalock, iflag);
7143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7144 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7145 "cannot issue Data: x%x x%x\n",
7146 mboxq->vport ? mboxq->vport->vpi : 0,
7147 mboxq->u.mb.mbxCommand,
a183a15f
JS
7148 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7149 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7150 psli->sli_flag, MBX_POLL);
7151 return MBXERR_ERROR;
7152 }
7153 /* The server grabs the token and owns it until release */
7154 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7155 phba->sli.mbox_active = mboxq;
7156 spin_unlock_irqrestore(&phba->hbalock, iflag);
7157
2d843edc
JS
7158 /* wait for bootstrap mbox register for readyness */
7159 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7160 if (rc)
7161 goto exit;
7162
da0436e9
JS
7163 /*
7164 * Initialize the bootstrap memory region to avoid stale data areas
7165 * in the mailbox post. Then copy the caller's mailbox contents to
7166 * the bmbx mailbox region.
7167 */
7168 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7169 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7170 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7171 sizeof(struct lpfc_mqe));
7172
7173 /* Post the high mailbox dma address to the port and wait for ready. */
7174 dma_address = &phba->sli4_hba.bmbx.dma_address;
7175 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7176
2d843edc
JS
7177 /* wait for bootstrap mbox register for hi-address write done */
7178 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7179 if (rc)
7180 goto exit;
da0436e9
JS
7181
7182 /* Post the low mailbox dma address to the port. */
7183 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 7184
2d843edc
JS
7185 /* wait for bootstrap mbox register for low address write done */
7186 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7187 if (rc)
7188 goto exit;
da0436e9
JS
7189
7190 /*
7191 * Read the CQ to ensure the mailbox has completed.
7192 * If so, update the mailbox status so that the upper layers
7193 * can complete the request normally.
7194 */
7195 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7196 sizeof(struct lpfc_mqe));
7197 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7198 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7199 sizeof(struct lpfc_mcqe));
7200 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
7201 /*
7202 * When the CQE status indicates a failure and the mailbox status
7203 * indicates success then copy the CQE status into the mailbox status
7204 * (and prefix it with x4000).
7205 */
da0436e9 7206 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
7207 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7208 bf_set(lpfc_mqe_status, mb,
7209 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 7210 rc = MBXERR_ERROR;
d7c47992
JS
7211 } else
7212 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
7213
7214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7215 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
7216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7217 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
7218 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7219 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7220 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7221 bf_get(lpfc_mqe_status, mb),
7222 mb->un.mb_words[0], mb->un.mb_words[1],
7223 mb->un.mb_words[2], mb->un.mb_words[3],
7224 mb->un.mb_words[4], mb->un.mb_words[5],
7225 mb->un.mb_words[6], mb->un.mb_words[7],
7226 mb->un.mb_words[8], mb->un.mb_words[9],
7227 mb->un.mb_words[10], mb->un.mb_words[11],
7228 mb->un.mb_words[12], mboxq->mcqe.word0,
7229 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7230 mboxq->mcqe.trailer);
7231exit:
7232 /* We are holding the token, no needed for lock when release */
7233 spin_lock_irqsave(&phba->hbalock, iflag);
7234 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7235 phba->sli.mbox_active = NULL;
7236 spin_unlock_irqrestore(&phba->hbalock, iflag);
7237 return rc;
7238}
7239
7240/**
7241 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7242 * @phba: Pointer to HBA context object.
7243 * @pmbox: Pointer to mailbox object.
7244 * @flag: Flag indicating how the mailbox need to be processed.
7245 *
7246 * This function is called by discovery code and HBA management code to submit
7247 * a mailbox command to firmware with SLI-4 interface spec.
7248 *
7249 * Return codes the caller owns the mailbox command after the return of the
7250 * function.
7251 **/
7252static int
7253lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7254 uint32_t flag)
7255{
7256 struct lpfc_sli *psli = &phba->sli;
7257 unsigned long iflags;
7258 int rc;
7259
b76f2dc9
JS
7260 /* dump from issue mailbox command if setup */
7261 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7262
8fa38513
JS
7263 rc = lpfc_mbox_dev_check(phba);
7264 if (unlikely(rc)) {
7265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7266 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
7267 "cannot issue Data: x%x x%x\n",
7268 mboxq->vport ? mboxq->vport->vpi : 0,
7269 mboxq->u.mb.mbxCommand,
a183a15f
JS
7270 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7271 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
7272 psli->sli_flag, flag);
7273 goto out_not_finished;
7274 }
7275
da0436e9
JS
7276 /* Detect polling mode and jump to a handler */
7277 if (!phba->sli4_hba.intr_enable) {
7278 if (flag == MBX_POLL)
7279 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7280 else
7281 rc = -EIO;
7282 if (rc != MBX_SUCCESS)
0558056c 7283 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 7284 "(%d):2541 Mailbox command x%x "
cc459f19
JS
7285 "(x%x/x%x) failure: "
7286 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7287 "Data: x%x x%x\n,",
da0436e9
JS
7288 mboxq->vport ? mboxq->vport->vpi : 0,
7289 mboxq->u.mb.mbxCommand,
a183a15f
JS
7290 lpfc_sli_config_mbox_subsys_get(phba,
7291 mboxq),
7292 lpfc_sli_config_mbox_opcode_get(phba,
7293 mboxq),
cc459f19
JS
7294 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7295 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7296 bf_get(lpfc_mcqe_ext_status,
7297 &mboxq->mcqe),
da0436e9
JS
7298 psli->sli_flag, flag);
7299 return rc;
7300 } else if (flag == MBX_POLL) {
f1126688
JS
7301 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7302 "(%d):2542 Try to issue mailbox command "
a183a15f 7303 "x%x (x%x/x%x) synchronously ahead of async"
f1126688 7304 "mailbox command queue: x%x x%x\n",
da0436e9
JS
7305 mboxq->vport ? mboxq->vport->vpi : 0,
7306 mboxq->u.mb.mbxCommand,
a183a15f
JS
7307 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7308 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 7309 psli->sli_flag, flag);
f1126688
JS
7310 /* Try to block the asynchronous mailbox posting */
7311 rc = lpfc_sli4_async_mbox_block(phba);
7312 if (!rc) {
7313 /* Successfully blocked, now issue sync mbox cmd */
7314 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7315 if (rc != MBX_SUCCESS)
cc459f19 7316 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 7317 LOG_MBOX | LOG_SLI,
cc459f19
JS
7318 "(%d):2597 Sync Mailbox command "
7319 "x%x (x%x/x%x) failure: "
7320 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7321 "Data: x%x x%x\n,",
7322 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
7323 mboxq->u.mb.mbxCommand,
7324 lpfc_sli_config_mbox_subsys_get(phba,
7325 mboxq),
7326 lpfc_sli_config_mbox_opcode_get(phba,
7327 mboxq),
cc459f19
JS
7328 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7329 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7330 bf_get(lpfc_mcqe_ext_status,
7331 &mboxq->mcqe),
a183a15f 7332 psli->sli_flag, flag);
f1126688
JS
7333 /* Unblock the async mailbox posting afterward */
7334 lpfc_sli4_async_mbox_unblock(phba);
7335 }
7336 return rc;
da0436e9
JS
7337 }
7338
7339 /* Now, interrupt mode asynchrous mailbox command */
7340 rc = lpfc_mbox_cmd_check(phba, mboxq);
7341 if (rc) {
7342 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7343 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7344 "cannot issue Data: x%x x%x\n",
7345 mboxq->vport ? mboxq->vport->vpi : 0,
7346 mboxq->u.mb.mbxCommand,
a183a15f
JS
7347 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7348 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7349 psli->sli_flag, flag);
7350 goto out_not_finished;
7351 }
da0436e9
JS
7352
7353 /* Put the mailbox command to the driver internal FIFO */
7354 psli->slistat.mbox_busy++;
7355 spin_lock_irqsave(&phba->hbalock, iflags);
7356 lpfc_mbox_put(phba, mboxq);
7357 spin_unlock_irqrestore(&phba->hbalock, iflags);
7358 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7359 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 7360 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
7361 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7362 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
7363 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7364 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7365 phba->pport->port_state,
7366 psli->sli_flag, MBX_NOWAIT);
7367 /* Wake up worker thread to transport mailbox command from head */
7368 lpfc_worker_wake_up(phba);
7369
7370 return MBX_BUSY;
7371
7372out_not_finished:
7373 return MBX_NOT_FINISHED;
7374}
7375
7376/**
7377 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7378 * @phba: Pointer to HBA context object.
7379 *
7380 * This function is called by worker thread to send a mailbox command to
7381 * SLI4 HBA firmware.
7382 *
7383 **/
7384int
7385lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7386{
7387 struct lpfc_sli *psli = &phba->sli;
7388 LPFC_MBOXQ_t *mboxq;
7389 int rc = MBX_SUCCESS;
7390 unsigned long iflags;
7391 struct lpfc_mqe *mqe;
7392 uint32_t mbx_cmnd;
7393
7394 /* Check interrupt mode before post async mailbox command */
7395 if (unlikely(!phba->sli4_hba.intr_enable))
7396 return MBX_NOT_FINISHED;
7397
7398 /* Check for mailbox command service token */
7399 spin_lock_irqsave(&phba->hbalock, iflags);
7400 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7401 spin_unlock_irqrestore(&phba->hbalock, iflags);
7402 return MBX_NOT_FINISHED;
7403 }
7404 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7405 spin_unlock_irqrestore(&phba->hbalock, iflags);
7406 return MBX_NOT_FINISHED;
7407 }
7408 if (unlikely(phba->sli.mbox_active)) {
7409 spin_unlock_irqrestore(&phba->hbalock, iflags);
7410 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7411 "0384 There is pending active mailbox cmd\n");
7412 return MBX_NOT_FINISHED;
7413 }
7414 /* Take the mailbox command service token */
7415 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7416
7417 /* Get the next mailbox command from head of queue */
7418 mboxq = lpfc_mbox_get(phba);
7419
7420 /* If no more mailbox command waiting for post, we're done */
7421 if (!mboxq) {
7422 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7423 spin_unlock_irqrestore(&phba->hbalock, iflags);
7424 return MBX_SUCCESS;
7425 }
7426 phba->sli.mbox_active = mboxq;
7427 spin_unlock_irqrestore(&phba->hbalock, iflags);
7428
7429 /* Check device readiness for posting mailbox command */
7430 rc = lpfc_mbox_dev_check(phba);
7431 if (unlikely(rc))
7432 /* Driver clean routine will clean up pending mailbox */
7433 goto out_not_finished;
7434
7435 /* Prepare the mbox command to be posted */
7436 mqe = &mboxq->u.mqe;
7437 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7438
7439 /* Start timer for the mbox_tmo and log some mailbox post messages */
7440 mod_timer(&psli->mbox_tmo, (jiffies +
a183a15f 7441 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
7442
7443 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7444 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
7445 "x%x x%x\n",
7446 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
7447 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7448 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7449 phba->pport->port_state, psli->sli_flag);
7450
7451 if (mbx_cmnd != MBX_HEARTBEAT) {
7452 if (mboxq->vport) {
7453 lpfc_debugfs_disc_trc(mboxq->vport,
7454 LPFC_DISC_TRC_MBOX_VPORT,
7455 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7456 mbx_cmnd, mqe->un.mb_words[0],
7457 mqe->un.mb_words[1]);
7458 } else {
7459 lpfc_debugfs_disc_trc(phba->pport,
7460 LPFC_DISC_TRC_MBOX,
7461 "MBOX Send: cmd:x%x mb:x%x x%x",
7462 mbx_cmnd, mqe->un.mb_words[0],
7463 mqe->un.mb_words[1]);
7464 }
7465 }
7466 psli->slistat.mbox_cmd++;
7467
7468 /* Post the mailbox command to the port */
7469 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7470 if (rc != MBX_SUCCESS) {
7471 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7472 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7473 "cannot issue Data: x%x x%x\n",
7474 mboxq->vport ? mboxq->vport->vpi : 0,
7475 mboxq->u.mb.mbxCommand,
a183a15f
JS
7476 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7477 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7478 psli->sli_flag, MBX_NOWAIT);
7479 goto out_not_finished;
7480 }
7481
7482 return rc;
7483
7484out_not_finished:
7485 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
7486 if (phba->sli.mbox_active) {
7487 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7488 __lpfc_mbox_cmpl_put(phba, mboxq);
7489 /* Release the token */
7490 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7491 phba->sli.mbox_active = NULL;
7492 }
da0436e9
JS
7493 spin_unlock_irqrestore(&phba->hbalock, iflags);
7494
7495 return MBX_NOT_FINISHED;
7496}
7497
7498/**
7499 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7500 * @phba: Pointer to HBA context object.
7501 * @pmbox: Pointer to mailbox object.
7502 * @flag: Flag indicating how the mailbox need to be processed.
7503 *
7504 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7505 * the API jump table function pointer from the lpfc_hba struct.
7506 *
7507 * Return codes the caller owns the mailbox command after the return of the
7508 * function.
7509 **/
7510int
7511lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7512{
7513 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7514}
7515
7516/**
25985edc 7517 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
7518 * @phba: The hba struct for which this call is being executed.
7519 * @dev_grp: The HBA PCI-Device group number.
7520 *
7521 * This routine sets up the mbox interface API function jump table in @phba
7522 * struct.
7523 * Returns: 0 - success, -ENODEV - failure.
7524 **/
7525int
7526lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7527{
7528
7529 switch (dev_grp) {
7530 case LPFC_PCI_DEV_LP:
7531 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7532 phba->lpfc_sli_handle_slow_ring_event =
7533 lpfc_sli_handle_slow_ring_event_s3;
7534 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7535 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7536 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7537 break;
7538 case LPFC_PCI_DEV_OC:
7539 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7540 phba->lpfc_sli_handle_slow_ring_event =
7541 lpfc_sli_handle_slow_ring_event_s4;
7542 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7543 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7544 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7545 break;
7546 default:
7547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7548 "1420 Invalid HBA PCI-device group: 0x%x\n",
7549 dev_grp);
7550 return -ENODEV;
7551 break;
7552 }
7553 return 0;
7554}
7555
e59058c4 7556/**
3621a710 7557 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
7558 * @phba: Pointer to HBA context object.
7559 * @pring: Pointer to driver SLI ring object.
7560 * @piocb: Pointer to address of newly added command iocb.
7561 *
7562 * This function is called with hbalock held to add a command
7563 * iocb to the txq when SLI layer cannot submit the command iocb
7564 * to the ring.
7565 **/
2a9bf3d0 7566void
92d7f7b0 7567__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7568 struct lpfc_iocbq *piocb)
dea3101e 7569{
7570 /* Insert the caller's iocb in the txq tail for later processing. */
7571 list_add_tail(&piocb->list, &pring->txq);
7572 pring->txq_cnt++;
dea3101e 7573}
7574
e59058c4 7575/**
3621a710 7576 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
7577 * @phba: Pointer to HBA context object.
7578 * @pring: Pointer to driver SLI ring object.
7579 * @piocb: Pointer to address of newly added command iocb.
7580 *
7581 * This function is called with hbalock held before a new
7582 * iocb is submitted to the firmware. This function checks
7583 * txq to flush the iocbs in txq to Firmware before
7584 * submitting new iocbs to the Firmware.
7585 * If there are iocbs in the txq which need to be submitted
7586 * to firmware, lpfc_sli_next_iocb returns the first element
7587 * of the txq after dequeuing it from txq.
7588 * If there is no iocb in the txq then the function will return
7589 * *piocb and *piocb is set to NULL. Caller needs to check
7590 * *piocb to find if there are more commands in the txq.
7591 **/
dea3101e 7592static struct lpfc_iocbq *
7593lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7594 struct lpfc_iocbq **piocb)
dea3101e 7595{
7596 struct lpfc_iocbq * nextiocb;
7597
7598 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7599 if (!nextiocb) {
7600 nextiocb = *piocb;
7601 *piocb = NULL;
7602 }
7603
7604 return nextiocb;
7605}
7606
e59058c4 7607/**
3772a991 7608 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 7609 * @phba: Pointer to HBA context object.
3772a991 7610 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
7611 * @piocb: Pointer to command iocb.
7612 * @flag: Flag indicating if this command can be put into txq.
7613 *
3772a991
JS
7614 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7615 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7616 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7617 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7618 * this function allows only iocbs for posting buffers. This function finds
7619 * next available slot in the command ring and posts the command to the
7620 * available slot and writes the port attention register to request HBA start
7621 * processing new iocb. If there is no slot available in the ring and
7622 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7623 * the function returns IOCB_BUSY.
e59058c4 7624 *
3772a991
JS
7625 * This function is called with hbalock held. The function will return success
7626 * after it successfully submit the iocb to firmware or after adding to the
7627 * txq.
e59058c4 7628 **/
98c9ea5c 7629static int
3772a991 7630__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 7631 struct lpfc_iocbq *piocb, uint32_t flag)
7632{
7633 struct lpfc_iocbq *nextiocb;
7634 IOCB_t *iocb;
3772a991 7635 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea3101e 7636
92d7f7b0
JS
7637 if (piocb->iocb_cmpl && (!piocb->vport) &&
7638 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7639 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7640 lpfc_printf_log(phba, KERN_ERR,
7641 LOG_SLI | LOG_VPORT,
e8b62011 7642 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
7643 piocb->iocb.ulpCommand);
7644 dump_stack();
7645 return IOCB_ERROR;
7646 }
7647
7648
8d63f375
LV
7649 /* If the PCI channel is in offline state, do not post iocbs. */
7650 if (unlikely(pci_channel_offline(phba->pcidev)))
7651 return IOCB_ERROR;
7652
a257bf90
JS
7653 /* If HBA has a deferred error attention, fail the iocb. */
7654 if (unlikely(phba->hba_flag & DEFER_ERATT))
7655 return IOCB_ERROR;
7656
dea3101e 7657 /*
7658 * We should never get an IOCB if we are in a < LINK_DOWN state
7659 */
2e0fef85 7660 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 7661 return IOCB_ERROR;
7662
7663 /*
7664 * Check to see if we are blocking IOCB processing because of a
0b727fea 7665 * outstanding event.
dea3101e 7666 */
0b727fea 7667 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 7668 goto iocb_busy;
7669
2e0fef85 7670 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 7671 /*
2680eeaa 7672 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 7673 * can be issued if the link is not up.
7674 */
7675 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
7676 case CMD_GEN_REQUEST64_CR:
7677 case CMD_GEN_REQUEST64_CX:
7678 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7679 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 7680 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
7681 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7682 MENLO_TRANSPORT_TYPE))
7683
7684 goto iocb_busy;
7685 break;
dea3101e 7686 case CMD_QUE_RING_BUF_CN:
7687 case CMD_QUE_RING_BUF64_CN:
dea3101e 7688 /*
7689 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7690 * completion, iocb_cmpl MUST be 0.
7691 */
7692 if (piocb->iocb_cmpl)
7693 piocb->iocb_cmpl = NULL;
7694 /*FALLTHROUGH*/
7695 case CMD_CREATE_XRI_CR:
2680eeaa
JS
7696 case CMD_CLOSE_XRI_CN:
7697 case CMD_CLOSE_XRI_CX:
dea3101e 7698 break;
7699 default:
7700 goto iocb_busy;
7701 }
7702
7703 /*
7704 * For FCP commands, we must be in a state where we can process link
7705 * attention events.
7706 */
7707 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 7708 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 7709 goto iocb_busy;
92d7f7b0 7710 }
dea3101e 7711
dea3101e 7712 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7713 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7714 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7715
7716 if (iocb)
7717 lpfc_sli_update_ring(phba, pring);
7718 else
7719 lpfc_sli_update_full_ring(phba, pring);
7720
7721 if (!piocb)
7722 return IOCB_SUCCESS;
7723
7724 goto out_busy;
7725
7726 iocb_busy:
7727 pring->stats.iocb_cmd_delay++;
7728
7729 out_busy:
7730
7731 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 7732 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 7733 return IOCB_SUCCESS;
7734 }
7735
7736 return IOCB_BUSY;
7737}
7738
3772a991 7739/**
4f774513
JS
7740 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7741 * @phba: Pointer to HBA context object.
7742 * @piocb: Pointer to command iocb.
7743 * @sglq: Pointer to the scatter gather queue object.
7744 *
7745 * This routine converts the bpl or bde that is in the IOCB
7746 * to a sgl list for the sli4 hardware. The physical address
7747 * of the bpl/bde is converted back to a virtual address.
7748 * If the IOCB contains a BPL then the list of BDE's is
7749 * converted to sli4_sge's. If the IOCB contains a single
7750 * BDE then it is converted to a single sli_sge.
7751 * The IOCB is still in cpu endianess so the contents of
7752 * the bpl can be used without byte swapping.
7753 *
7754 * Returns valid XRI = Success, NO_XRI = Failure.
7755**/
7756static uint16_t
7757lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7758 struct lpfc_sglq *sglq)
3772a991 7759{
4f774513
JS
7760 uint16_t xritag = NO_XRI;
7761 struct ulp_bde64 *bpl = NULL;
7762 struct ulp_bde64 bde;
7763 struct sli4_sge *sgl = NULL;
1b51197d 7764 struct lpfc_dmabuf *dmabuf;
4f774513
JS
7765 IOCB_t *icmd;
7766 int numBdes = 0;
7767 int i = 0;
63e801ce
JS
7768 uint32_t offset = 0; /* accumulated offset in the sg request list */
7769 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 7770
4f774513
JS
7771 if (!piocbq || !sglq)
7772 return xritag;
7773
7774 sgl = (struct sli4_sge *)sglq->sgl;
7775 icmd = &piocbq->iocb;
6b5151fd
JS
7776 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7777 return sglq->sli4_xritag;
4f774513
JS
7778 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7779 numBdes = icmd->un.genreq64.bdl.bdeSize /
7780 sizeof(struct ulp_bde64);
7781 /* The addrHigh and addrLow fields within the IOCB
7782 * have not been byteswapped yet so there is no
7783 * need to swap them back.
7784 */
1b51197d
JS
7785 if (piocbq->context3)
7786 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7787 else
7788 return xritag;
4f774513 7789
1b51197d 7790 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
7791 if (!bpl)
7792 return xritag;
7793
7794 for (i = 0; i < numBdes; i++) {
7795 /* Should already be byte swapped. */
28baac74
JS
7796 sgl->addr_hi = bpl->addrHigh;
7797 sgl->addr_lo = bpl->addrLow;
7798
0558056c 7799 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7800 if ((i+1) == numBdes)
7801 bf_set(lpfc_sli4_sge_last, sgl, 1);
7802 else
7803 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
7804 /* swap the size field back to the cpu so we
7805 * can assign it to the sgl.
7806 */
7807 bde.tus.w = le32_to_cpu(bpl->tus.w);
7808 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
7809 /* The offsets in the sgl need to be accumulated
7810 * separately for the request and reply lists.
7811 * The request is always first, the reply follows.
7812 */
7813 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7814 /* add up the reply sg entries */
7815 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7816 inbound++;
7817 /* first inbound? reset the offset */
7818 if (inbound == 1)
7819 offset = 0;
7820 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
7821 bf_set(lpfc_sli4_sge_type, sgl,
7822 LPFC_SGE_TYPE_DATA);
63e801ce
JS
7823 offset += bde.tus.f.bdeSize;
7824 }
546fc854 7825 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
7826 bpl++;
7827 sgl++;
7828 }
7829 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7830 /* The addrHigh and addrLow fields of the BDE have not
7831 * been byteswapped yet so they need to be swapped
7832 * before putting them in the sgl.
7833 */
7834 sgl->addr_hi =
7835 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7836 sgl->addr_lo =
7837 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 7838 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7839 bf_set(lpfc_sli4_sge_last, sgl, 1);
7840 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
7841 sgl->sge_len =
7842 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
7843 }
7844 return sglq->sli4_xritag;
3772a991 7845}
92d7f7b0 7846
e59058c4 7847/**
4f774513 7848 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
e59058c4 7849 * @phba: Pointer to HBA context object.
e59058c4 7850 *
a93ff37a 7851 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
8fa38513
JS
7852 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7853 * held.
4f774513
JS
7854 *
7855 * Return: index into SLI4 fast-path FCP queue index.
e59058c4 7856 **/
2a76a283 7857static inline uint32_t
8fa38513 7858lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
92d7f7b0 7859{
2a76a283 7860 int i;
92d7f7b0 7861
49aa143d
JS
7862 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7863 i = smp_processor_id();
7864 else
7865 i = atomic_add_return(1, &phba->fcp_qidx);
92d7f7b0 7866
67d12733 7867 i = (i % phba->cfg_fcp_io_channel);
2a76a283 7868 return i;
92d7f7b0
JS
7869}
7870
e59058c4 7871/**
4f774513 7872 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 7873 * @phba: Pointer to HBA context object.
4f774513
JS
7874 * @piocb: Pointer to command iocb.
7875 * @wqe: Pointer to the work queue entry.
e59058c4 7876 *
4f774513
JS
7877 * This routine converts the iocb command to its Work Queue Entry
7878 * equivalent. The wqe pointer should not have any fields set when
7879 * this routine is called because it will memcpy over them.
7880 * This routine does not set the CQ_ID or the WQEC bits in the
7881 * wqe.
e59058c4 7882 *
4f774513 7883 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 7884 **/
cf5bf97e 7885static int
4f774513
JS
7886lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7887 union lpfc_wqe *wqe)
cf5bf97e 7888{
5ffc266e 7889 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
7890 uint8_t ct = 0;
7891 uint32_t fip;
7892 uint32_t abort_tag;
7893 uint8_t command_type = ELS_COMMAND_NON_FIP;
7894 uint8_t cmnd;
7895 uint16_t xritag;
dcf2a4e0
JS
7896 uint16_t abrt_iotag;
7897 struct lpfc_iocbq *abrtiocbq;
4f774513 7898 struct ulp_bde64 *bpl = NULL;
f0d9bccc 7899 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
7900 int numBdes, i;
7901 struct ulp_bde64 bde;
c31098ce 7902 struct lpfc_nodelist *ndlp;
ff78d8f9 7903 uint32_t *pcmd;
1b51197d 7904 uint32_t if_type;
4f774513 7905
45ed1190 7906 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 7907 /* The fcp commands will set command type */
0c287589 7908 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 7909 command_type = FCP_COMMAND;
c868595d 7910 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
7911 command_type = ELS_COMMAND_FIP;
7912 else
7913 command_type = ELS_COMMAND_NON_FIP;
7914
4f774513
JS
7915 /* Some of the fields are in the right position already */
7916 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7917 abort_tag = (uint32_t) iocbq->iotag;
7918 xritag = iocbq->sli4_xritag;
f0d9bccc 7919 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
4f774513
JS
7920 /* words0-2 bpl convert bde */
7921 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
7922 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7923 sizeof(struct ulp_bde64);
4f774513
JS
7924 bpl = (struct ulp_bde64 *)
7925 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7926 if (!bpl)
7927 return IOCB_ERROR;
cf5bf97e 7928
4f774513
JS
7929 /* Should already be byte swapped. */
7930 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
7931 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
7932 /* swap the size field back to the cpu so we
7933 * can assign it to the sgl.
7934 */
7935 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
7936 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7937 total_len = 0;
7938 for (i = 0; i < numBdes; i++) {
7939 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
7940 total_len += bde.tus.f.bdeSize;
7941 }
4f774513 7942 } else
5ffc266e 7943 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 7944
4f774513
JS
7945 iocbq->iocb.ulpIoTag = iocbq->iotag;
7946 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 7947
4f774513
JS
7948 switch (iocbq->iocb.ulpCommand) {
7949 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
7950 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
7951 ndlp = iocbq->context_un.ndlp;
7952 else
7953 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
7954 if (!iocbq->iocb.ulpLe) {
7955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7956 "2007 Only Limited Edition cmd Format"
7957 " supported 0x%x\n",
7958 iocbq->iocb.ulpCommand);
7959 return IOCB_ERROR;
7960 }
ff78d8f9 7961
5ffc266e 7962 wqe->els_req.payload_len = xmit_len;
4f774513
JS
7963 /* Els_reguest64 has a TMO */
7964 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
7965 iocbq->iocb.ulpTimeout);
7966 /* Need a VF for word 4 set the vf bit*/
7967 bf_set(els_req64_vf, &wqe->els_req, 0);
7968 /* And a VFID for word 12 */
7969 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 7970 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
7971 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7972 iocbq->iocb.ulpContext);
7973 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7974 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 7975 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 7976 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
7977 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7978 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
7979 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7980 iocbq->context2)->virt);
1b51197d
JS
7981 if_type = bf_get(lpfc_sli_intf_if_type,
7982 &phba->sli4_hba.sli_intf);
7983 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 7984 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 7985 *pcmd == ELS_CMD_SCR ||
6b5151fd 7986 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 7987 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
7988 *pcmd == ELS_CMD_PLOGI)) {
7989 bf_set(els_req64_sp, &wqe->els_req, 1);
7990 bf_set(els_req64_sid, &wqe->els_req,
7991 iocbq->vport->fc_myDID);
939723a4
JS
7992 if ((*pcmd == ELS_CMD_FLOGI) &&
7993 !(phba->fc_topology ==
7994 LPFC_TOPOLOGY_LOOP))
7995 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
7996 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7997 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 7998 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 7999 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
8000 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8001 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8002 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8003 }
c868595d 8004 }
6d368e53
JS
8005 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8006 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
8007 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8008 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8009 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8010 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8011 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8012 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7851fe2c 8013 break;
5ffc266e 8014 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
8015 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8016 iocbq->iocb.un.ulpWord[3]);
8017 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 8018 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
8019 /* The entire sequence is transmitted for this IOCB */
8020 xmit_len = total_len;
8021 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
8022 if (phba->link_flag & LS_LOOPBACK_MODE)
8023 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 8024 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
8025 /* word3 iocb=io_tag32 wqe=reserved */
8026 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
8027 /* word4 relative_offset memcpy */
8028 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
8029 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8030 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8031 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8032 LPFC_WQE_IOD_WRITE);
8033 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8034 LPFC_WQE_LENLOC_WORD12);
8035 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
8036 wqe->xmit_sequence.xmit_len = xmit_len;
8037 command_type = OTHER_COMMAND;
7851fe2c 8038 break;
4f774513 8039 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
8040 /* word3 iocb=iotag32 wqe=seq_payload_len */
8041 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
8042 /* word4 iocb=rsvd wqe=rsvd */
8043 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8044 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 8045 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 8046 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
8047 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8048 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8049 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8050 LPFC_WQE_LENLOC_WORD3);
8051 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 8052 break;
4f774513
JS
8053 case CMD_FCP_IWRITE64_CR:
8054 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
8055 /* word3 iocb=iotag wqe=payload_offset_len */
8056 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8057 wqe->fcp_iwrite.payload_offset_len =
8058 xmit_len + sizeof(struct fcp_rsp);
8059 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8060 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8061 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8062 iocbq->iocb.ulpFCP2Rcvy);
8063 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8064 /* Always open the exchange */
8065 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
f0d9bccc
JS
8066 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8067 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8068 LPFC_WQE_LENLOC_WORD4);
8069 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8070 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b
JS
8071 if (iocbq->iocb_flag & LPFC_IO_DIF) {
8072 iocbq->iocb_flag &= ~LPFC_IO_DIF;
8073 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
8074 }
8075 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7851fe2c 8076 break;
4f774513 8077 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
8078 /* word3 iocb=iotag wqe=payload_offset_len */
8079 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8080 wqe->fcp_iread.payload_offset_len =
5ffc266e 8081 xmit_len + sizeof(struct fcp_rsp);
f0d9bccc
JS
8082 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8083 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8084 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8085 iocbq->iocb.ulpFCP2Rcvy);
8086 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688
JS
8087 /* Always open the exchange */
8088 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
f0d9bccc
JS
8089 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8090 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8091 LPFC_WQE_LENLOC_WORD4);
8092 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8093 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b
JS
8094 if (iocbq->iocb_flag & LPFC_IO_DIF) {
8095 iocbq->iocb_flag &= ~LPFC_IO_DIF;
8096 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
8097 }
8098 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7851fe2c 8099 break;
4f774513 8100 case CMD_FCP_ICMND64_CR:
f0d9bccc
JS
8101 /* word3 iocb=IO_TAG wqe=reserved */
8102 wqe->fcp_icmd.rsrvd3 = 0;
8103 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 8104 /* Always open the exchange */
f0d9bccc
JS
8105 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8106 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8107 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8108 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8109 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8110 LPFC_WQE_LENLOC_NONE);
8111 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7851fe2c 8112 break;
4f774513 8113 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
8114 /* For this command calculate the xmit length of the
8115 * request bde.
8116 */
8117 xmit_len = 0;
8118 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8119 sizeof(struct ulp_bde64);
8120 for (i = 0; i < numBdes; i++) {
63e801ce 8121 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
8122 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8123 break;
63e801ce
JS
8124 xmit_len += bde.tus.f.bdeSize;
8125 }
f0d9bccc
JS
8126 /* word3 iocb=IO_TAG wqe=request_payload_len */
8127 wqe->gen_req.request_payload_len = xmit_len;
8128 /* word4 iocb=parameter wqe=relative_offset memcpy */
8129 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
8130 /* word6 context tag copied in memcpy */
8131 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8132 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8134 "2015 Invalid CT %x command 0x%x\n",
8135 ct, iocbq->iocb.ulpCommand);
8136 return IOCB_ERROR;
8137 }
f0d9bccc
JS
8138 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8139 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8140 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8141 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8142 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8143 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8144 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8145 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
4f774513 8146 command_type = OTHER_COMMAND;
7851fe2c 8147 break;
4f774513 8148 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 8149 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 8150 /* words0-2 BDE memcpy */
f0d9bccc
JS
8151 /* word3 iocb=iotag32 wqe=response_payload_len */
8152 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
8153 /* word4 */
8154 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
8155 /* word5 iocb=rsvd wge=did */
8156 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
8157 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8158
8159 if_type = bf_get(lpfc_sli_intf_if_type,
8160 &phba->sli4_hba.sli_intf);
8161 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8162 if (iocbq->vport->fc_flag & FC_PT2PT) {
8163 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8164 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8165 iocbq->vport->fc_myDID);
8166 if (iocbq->vport->fc_myDID == Fabric_DID) {
8167 bf_set(wqe_els_did,
8168 &wqe->xmit_els_rsp.wqe_dest, 0);
8169 }
8170 }
8171 }
f0d9bccc
JS
8172 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8173 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8174 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8175 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 8176 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 8177 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 8178 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 8179 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
8180 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8181 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8182 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8183 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8184 LPFC_WQE_LENLOC_WORD3);
8185 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
8186 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8187 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
8188 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8189 iocbq->context2)->virt);
8190 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
8191 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8192 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 8193 iocbq->vport->fc_myDID);
939723a4
JS
8194 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8195 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
8196 phba->vpi_ids[phba->pport->vpi]);
8197 }
4f774513 8198 command_type = OTHER_COMMAND;
7851fe2c 8199 break;
4f774513
JS
8200 case CMD_CLOSE_XRI_CN:
8201 case CMD_ABORT_XRI_CN:
8202 case CMD_ABORT_XRI_CX:
8203 /* words 0-2 memcpy should be 0 rserved */
8204 /* port will send abts */
dcf2a4e0
JS
8205 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8206 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8207 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8208 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8209 } else
8210 fip = 0;
8211
8212 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 8213 /*
dcf2a4e0
JS
8214 * The link is down, or the command was ELS_FIP
8215 * so the fw does not need to send abts
4f774513
JS
8216 * on the wire.
8217 */
8218 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8219 else
8220 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8221 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
8222 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8223 wqe->abort_cmd.rsrvd5 = 0;
8224 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
8225 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8226 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
8227 /*
8228 * The abort handler will send us CMD_ABORT_XRI_CN or
8229 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8230 */
f0d9bccc
JS
8231 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8232 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8233 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8234 LPFC_WQE_LENLOC_NONE);
4f774513
JS
8235 cmnd = CMD_ABORT_XRI_CX;
8236 command_type = OTHER_COMMAND;
8237 xritag = 0;
7851fe2c 8238 break;
6669f9bb 8239 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 8240 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 8241 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
8242 * we re-construct this WQE here based on information in
8243 * iocbq from scratch.
8244 */
8245 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 8246 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 8247 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
8248 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8249 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
8250 LPFC_ABTS_UNSOL_INT) {
8251 /* ABTS sent by initiator to CT exchange, the
8252 * RX_ID field will be filled with the newly
8253 * allocated responder XRI.
8254 */
8255 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8256 iocbq->sli4_xritag);
8257 } else {
8258 /* ABTS sent by responder to CT exchange, the
8259 * RX_ID field will be filled with the responder
8260 * RX_ID from ABTS.
8261 */
8262 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 8263 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 8264 }
6669f9bb
JS
8265 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8266 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
8267
8268 /* Use CT=VPI */
8269 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8270 ndlp->nlp_DID);
8271 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8272 iocbq->iocb.ulpContext);
8273 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 8274 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 8275 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
8276 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8277 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8278 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
8279 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8280 command_type = OTHER_COMMAND;
546fc854
JS
8281 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8282 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8283 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8284 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8285 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8286 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8287 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8288 }
8289
7851fe2c 8290 break;
4f774513
JS
8291 case CMD_XRI_ABORTED_CX:
8292 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
8293 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8294 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8295 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8296 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8297 default:
8298 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8299 "2014 Invalid command 0x%x\n",
8300 iocbq->iocb.ulpCommand);
8301 return IOCB_ERROR;
7851fe2c 8302 break;
4f774513 8303 }
6d368e53 8304
f0d9bccc
JS
8305 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8306 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8307 wqe->generic.wqe_com.abort_tag = abort_tag;
8308 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8309 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8310 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8311 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
8312 return 0;
8313}
8314
8315/**
8316 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8317 * @phba: Pointer to HBA context object.
8318 * @ring_number: SLI ring number to issue iocb on.
8319 * @piocb: Pointer to command iocb.
8320 * @flag: Flag indicating if this command can be put into txq.
8321 *
8322 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8323 * an iocb command to an HBA with SLI-4 interface spec.
8324 *
8325 * This function is called with hbalock held. The function will return success
8326 * after it successfully submit the iocb to firmware or after adding to the
8327 * txq.
8328 **/
8329static int
8330__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8331 struct lpfc_iocbq *piocb, uint32_t flag)
8332{
8333 struct lpfc_sglq *sglq;
4f774513
JS
8334 union lpfc_wqe wqe;
8335 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
4f774513
JS
8336
8337 if (piocb->sli4_xritag == NO_XRI) {
8338 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 8339 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
8340 sglq = NULL;
8341 else {
2a9bf3d0
JS
8342 if (pring->txq_cnt) {
8343 if (!(flag & SLI_IOCB_RET_IOCB)) {
8344 __lpfc_sli_ringtx_put(phba,
8345 pring, piocb);
8346 return IOCB_SUCCESS;
8347 } else {
8348 return IOCB_BUSY;
8349 }
8350 } else {
6d368e53 8351 sglq = __lpfc_sli_get_sglq(phba, piocb);
2a9bf3d0
JS
8352 if (!sglq) {
8353 if (!(flag & SLI_IOCB_RET_IOCB)) {
8354 __lpfc_sli_ringtx_put(phba,
8355 pring,
8356 piocb);
8357 return IOCB_SUCCESS;
8358 } else
8359 return IOCB_BUSY;
8360 }
8361 }
4f774513
JS
8362 }
8363 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6d368e53
JS
8364 /* These IO's already have an XRI and a mapped sgl. */
8365 sglq = NULL;
4f774513 8366 } else {
6d368e53
JS
8367 /*
8368 * This is a continuation of a commandi,(CX) so this
4f774513
JS
8369 * sglq is on the active list
8370 */
8371 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
8372 if (!sglq)
8373 return IOCB_ERROR;
8374 }
8375
8376 if (sglq) {
6d368e53 8377 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 8378 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 8379 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
8380 return IOCB_ERROR;
8381 }
8382
8383 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8384 return IOCB_ERROR;
8385
341af102
JS
8386 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8387 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5ffc266e
JS
8388 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8389 &wqe))
4f774513
JS
8390 return IOCB_ERROR;
8391 } else {
8392 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8393 return IOCB_ERROR;
8394 }
8395 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8396
8397 return 0;
8398}
8399
8400/**
8401 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8402 *
8403 * This routine wraps the actual lockless version for issusing IOCB function
8404 * pointer from the lpfc_hba struct.
8405 *
8406 * Return codes:
8407 * IOCB_ERROR - Error
8408 * IOCB_SUCCESS - Success
8409 * IOCB_BUSY - Busy
8410 **/
2a9bf3d0 8411int
4f774513
JS
8412__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8413 struct lpfc_iocbq *piocb, uint32_t flag)
8414{
8415 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8416}
8417
8418/**
25985edc 8419 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
8420 * @phba: The hba struct for which this call is being executed.
8421 * @dev_grp: The HBA PCI-Device group number.
8422 *
8423 * This routine sets up the SLI interface API function jump table in @phba
8424 * struct.
8425 * Returns: 0 - success, -ENODEV - failure.
8426 **/
8427int
8428lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8429{
8430
8431 switch (dev_grp) {
8432 case LPFC_PCI_DEV_LP:
8433 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8434 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8435 break;
8436 case LPFC_PCI_DEV_OC:
8437 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8438 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8439 break;
8440 default:
8441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8442 "1419 Invalid HBA PCI-device group: 0x%x\n",
8443 dev_grp);
8444 return -ENODEV;
8445 break;
8446 }
8447 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8448 return 0;
8449}
8450
8451/**
8452 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8453 * @phba: Pointer to HBA context object.
8454 * @pring: Pointer to driver SLI ring object.
8455 * @piocb: Pointer to command iocb.
8456 * @flag: Flag indicating if this command can be put into txq.
8457 *
8458 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8459 * function. This function gets the hbalock and calls
8460 * __lpfc_sli_issue_iocb function and will return the error returned
8461 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8462 * functions which do not hold hbalock.
8463 **/
8464int
8465lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8466 struct lpfc_iocbq *piocb, uint32_t flag)
8467{
ba20c853 8468 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
2a76a283 8469 struct lpfc_sli_ring *pring;
ba20c853
JS
8470 struct lpfc_queue *fpeq;
8471 struct lpfc_eqe *eqe;
4f774513 8472 unsigned long iflags;
2a76a283 8473 int rc, idx;
4f774513 8474
7e56aa25 8475 if (phba->sli_rev == LPFC_SLI_REV4) {
2a76a283
JS
8476 if (piocb->iocb_flag & LPFC_IO_FCP) {
8477 if (unlikely(!phba->sli4_hba.fcp_wq))
8478 return IOCB_ERROR;
8479 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8480 piocb->fcp_wqidx = idx;
8481 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
ba20c853
JS
8482
8483 pring = &phba->sli.ring[ring_number];
8484 spin_lock_irqsave(&pring->ring_lock, iflags);
8485 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8486 flag);
8487 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8488
8489 if (lpfc_fcp_look_ahead) {
8490 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8491
8492 if (atomic_dec_and_test(&fcp_eq_hdl->
8493 fcp_eq_in_use)) {
4f774513 8494
ba20c853
JS
8495 /* Get associated EQ with this index */
8496 fpeq = phba->sli4_hba.hba_eq[idx];
8497
8498 /* Turn off interrupts from this EQ */
8499 lpfc_sli4_eq_clr_intr(fpeq);
8500
8501 /*
8502 * Process all the events on FCP EQ
8503 */
8504 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8505 lpfc_sli4_hba_handle_eqe(phba,
8506 eqe, idx);
8507 fpeq->EQ_processed++;
8508 }
8509
8510 /* Always clear and re-arm the EQ */
8511 lpfc_sli4_eq_release(fpeq,
8512 LPFC_QUEUE_REARM);
8513 }
8514 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8515 }
8516 } else {
8517 pring = &phba->sli.ring[ring_number];
8518 spin_lock_irqsave(&pring->ring_lock, iflags);
8519 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8520 flag);
8521 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8522
2a76a283 8523 }
7e56aa25
JS
8524 } else {
8525 /* For now, SLI2/3 will still use hbalock */
8526 spin_lock_irqsave(&phba->hbalock, iflags);
8527 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8528 spin_unlock_irqrestore(&phba->hbalock, iflags);
8529 }
4f774513
JS
8530 return rc;
8531}
8532
8533/**
8534 * lpfc_extra_ring_setup - Extra ring setup function
8535 * @phba: Pointer to HBA context object.
8536 *
8537 * This function is called while driver attaches with the
8538 * HBA to setup the extra ring. The extra ring is used
8539 * only when driver needs to support target mode functionality
8540 * or IP over FC functionalities.
8541 *
8542 * This function is called with no lock held.
8543 **/
8544static int
8545lpfc_extra_ring_setup( struct lpfc_hba *phba)
8546{
8547 struct lpfc_sli *psli;
8548 struct lpfc_sli_ring *pring;
8549
8550 psli = &phba->sli;
8551
8552 /* Adjust cmd/rsp ring iocb entries more evenly */
8553
8554 /* Take some away from the FCP ring */
8555 pring = &psli->ring[psli->fcp_ring];
7e56aa25
JS
8556 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8557 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8558 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8559 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 8560
a4bc3379
JS
8561 /* and give them to the extra ring */
8562 pring = &psli->ring[psli->extra_ring];
8563
7e56aa25
JS
8564 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8565 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8566 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8567 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
8568
8569 /* Setup default profile for this ring */
8570 pring->iotag_max = 4096;
8571 pring->num_mask = 1;
8572 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
8573 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8574 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
8575 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8576 return 0;
8577}
8578
cb69f7de
JS
8579/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8580 * @phba: Pointer to HBA context object.
8581 * @iocbq: Pointer to iocb object.
8582 *
8583 * The async_event handler calls this routine when it receives
8584 * an ASYNC_STATUS_CN event from the port. The port generates
8585 * this event when an Abort Sequence request to an rport fails
8586 * twice in succession. The abort could be originated by the
8587 * driver or by the port. The ABTS could have been for an ELS
8588 * or FCP IO. The port only generates this event when an ABTS
8589 * fails to complete after one retry.
8590 */
8591static void
8592lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8593 struct lpfc_iocbq *iocbq)
8594{
8595 struct lpfc_nodelist *ndlp = NULL;
8596 uint16_t rpi = 0, vpi = 0;
8597 struct lpfc_vport *vport = NULL;
8598
8599 /* The rpi in the ulpContext is vport-sensitive. */
8600 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8601 rpi = iocbq->iocb.ulpContext;
8602
8603 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8604 "3092 Port generated ABTS async event "
8605 "on vpi %d rpi %d status 0x%x\n",
8606 vpi, rpi, iocbq->iocb.ulpStatus);
8607
8608 vport = lpfc_find_vport_by_vpid(phba, vpi);
8609 if (!vport)
8610 goto err_exit;
8611 ndlp = lpfc_findnode_rpi(vport, rpi);
8612 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8613 goto err_exit;
8614
8615 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8616 lpfc_sli_abts_recover_port(vport, ndlp);
8617 return;
8618
8619 err_exit:
8620 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8621 "3095 Event Context not found, no "
8622 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8623 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8624 vpi, rpi);
8625}
8626
8627/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8628 * @phba: pointer to HBA context object.
8629 * @ndlp: nodelist pointer for the impacted rport.
8630 * @axri: pointer to the wcqe containing the failed exchange.
8631 *
8632 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8633 * port. The port generates this event when an abort exchange request to an
8634 * rport fails twice in succession with no reply. The abort could be originated
8635 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8636 */
8637void
8638lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8639 struct lpfc_nodelist *ndlp,
8640 struct sli4_wcqe_xri_aborted *axri)
8641{
8642 struct lpfc_vport *vport;
5c1db2ac 8643 uint32_t ext_status = 0;
cb69f7de 8644
6b5151fd 8645 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
8646 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8647 "3115 Node Context not found, driver "
8648 "ignoring abts err event\n");
6b5151fd
JS
8649 return;
8650 }
8651
cb69f7de
JS
8652 vport = ndlp->vport;
8653 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8654 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 8655 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
cb69f7de
JS
8656 ndlp->vport->vpi, ndlp->nlp_rpi,
8657 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
8658 bf_get(lpfc_wcqe_xa_status, axri),
8659 axri->parameter);
cb69f7de 8660
5c1db2ac
JS
8661 /*
8662 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8663 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8664 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8665 */
e3d2b802 8666 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
8667 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8668 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
8669 lpfc_sli_abts_recover_port(vport, ndlp);
8670}
8671
e59058c4 8672/**
3621a710 8673 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
8674 * @phba: Pointer to HBA context object.
8675 * @pring: Pointer to driver SLI ring object.
8676 * @iocbq: Pointer to iocb object.
8677 *
8678 * This function is called by the slow ring event handler
8679 * function when there is an ASYNC event iocb in the ring.
8680 * This function is called with no lock held.
8681 * Currently this function handles only temperature related
8682 * ASYNC events. The function decodes the temperature sensor
8683 * event message and posts events for the management applications.
8684 **/
98c9ea5c 8685static void
57127f15
JS
8686lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8687 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8688{
8689 IOCB_t *icmd;
8690 uint16_t evt_code;
57127f15
JS
8691 struct temp_event temp_event_data;
8692 struct Scsi_Host *shost;
a257bf90 8693 uint32_t *iocb_w;
57127f15
JS
8694
8695 icmd = &iocbq->iocb;
8696 evt_code = icmd->un.asyncstat.evt_code;
57127f15 8697
cb69f7de
JS
8698 switch (evt_code) {
8699 case ASYNC_TEMP_WARN:
8700 case ASYNC_TEMP_SAFE:
8701 temp_event_data.data = (uint32_t) icmd->ulpContext;
8702 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8703 if (evt_code == ASYNC_TEMP_WARN) {
8704 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8705 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8706 "0347 Adapter is very hot, please take "
8707 "corrective action. temperature : %d Celsius\n",
8708 (uint32_t) icmd->ulpContext);
8709 } else {
8710 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8711 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8712 "0340 Adapter temperature is OK now. "
8713 "temperature : %d Celsius\n",
8714 (uint32_t) icmd->ulpContext);
8715 }
8716
8717 /* Send temperature change event to applications */
8718 shost = lpfc_shost_from_vport(phba->pport);
8719 fc_host_post_vendor_event(shost, fc_get_event_number(),
8720 sizeof(temp_event_data), (char *) &temp_event_data,
8721 LPFC_NL_VENDOR_ID);
8722 break;
8723 case ASYNC_STATUS_CN:
8724 lpfc_sli_abts_err_handler(phba, iocbq);
8725 break;
8726 default:
a257bf90 8727 iocb_w = (uint32_t *) icmd;
cb69f7de 8728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 8729 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 8730 " evt_code 0x%x\n"
a257bf90
JS
8731 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8732 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8733 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8734 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 8735 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
8736 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8737 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8738 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8739 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8740
cb69f7de 8741 break;
57127f15 8742 }
57127f15
JS
8743}
8744
8745
e59058c4 8746/**
3621a710 8747 * lpfc_sli_setup - SLI ring setup function
e59058c4
JS
8748 * @phba: Pointer to HBA context object.
8749 *
8750 * lpfc_sli_setup sets up rings of the SLI interface with
8751 * number of iocbs per ring and iotags. This function is
8752 * called while driver attach to the HBA and before the
8753 * interrupts are enabled. So there is no need for locking.
8754 *
8755 * This function always returns 0.
8756 **/
dea3101e 8757int
8758lpfc_sli_setup(struct lpfc_hba *phba)
8759{
ed957684 8760 int i, totiocbsize = 0;
dea3101e 8761 struct lpfc_sli *psli = &phba->sli;
8762 struct lpfc_sli_ring *pring;
8763
2a76a283
JS
8764 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8765 if (phba->sli_rev == LPFC_SLI_REV4)
67d12733 8766 psli->num_rings += phba->cfg_fcp_io_channel;
dea3101e 8767 psli->sli_flag = 0;
8768 psli->fcp_ring = LPFC_FCP_RING;
8769 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 8770 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 8771
604a3e30
JB
8772 psli->iocbq_lookup = NULL;
8773 psli->iocbq_lookup_len = 0;
8774 psli->last_iotag = 0;
8775
dea3101e 8776 for (i = 0; i < psli->num_rings; i++) {
8777 pring = &psli->ring[i];
8778 switch (i) {
8779 case LPFC_FCP_RING: /* ring 0 - FCP */
8780 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8781 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8782 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8783 pring->sli.sli3.numCiocb +=
8784 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8785 pring->sli.sli3.numRiocb +=
8786 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8787 pring->sli.sli3.numCiocb +=
8788 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8789 pring->sli.sli3.numRiocb +=
8790 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8791 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8792 SLI3_IOCB_CMD_SIZE :
8793 SLI2_IOCB_CMD_SIZE;
7e56aa25 8794 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8795 SLI3_IOCB_RSP_SIZE :
8796 SLI2_IOCB_RSP_SIZE;
dea3101e 8797 pring->iotag_ctr = 0;
8798 pring->iotag_max =
92d7f7b0 8799 (phba->cfg_hba_queue_depth * 2);
dea3101e 8800 pring->fast_iotag = pring->iotag_max;
8801 pring->num_mask = 0;
8802 break;
a4bc3379 8803 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 8804 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8805 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8806 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8807 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8808 SLI3_IOCB_CMD_SIZE :
8809 SLI2_IOCB_CMD_SIZE;
7e56aa25 8810 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8811 SLI3_IOCB_RSP_SIZE :
8812 SLI2_IOCB_RSP_SIZE;
2e0fef85 8813 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 8814 pring->num_mask = 0;
8815 break;
8816 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8817 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8818 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8819 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8820 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8821 SLI3_IOCB_CMD_SIZE :
8822 SLI2_IOCB_CMD_SIZE;
7e56aa25 8823 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8824 SLI3_IOCB_RSP_SIZE :
8825 SLI2_IOCB_RSP_SIZE;
dea3101e 8826 pring->fast_iotag = 0;
8827 pring->iotag_ctr = 0;
8828 pring->iotag_max = 4096;
57127f15
JS
8829 pring->lpfc_sli_rcv_async_status =
8830 lpfc_sli_async_event_handler;
6669f9bb 8831 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 8832 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
8833 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8834 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 8835 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 8836 lpfc_els_unsol_event;
dea3101e 8837 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
8838 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8839 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 8840 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 8841 lpfc_els_unsol_event;
dea3101e 8842 pring->prt[2].profile = 0; /* Mask 2 */
8843 /* NameServer Inquiry */
6a9c52cf 8844 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 8845 /* NameServer */
6a9c52cf 8846 pring->prt[2].type = FC_TYPE_CT;
dea3101e 8847 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 8848 lpfc_ct_unsol_event;
dea3101e 8849 pring->prt[3].profile = 0; /* Mask 3 */
8850 /* NameServer response */
6a9c52cf 8851 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 8852 /* NameServer */
6a9c52cf 8853 pring->prt[3].type = FC_TYPE_CT;
dea3101e 8854 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 8855 lpfc_ct_unsol_event;
6669f9bb
JS
8856 /* abort unsolicited sequence */
8857 pring->prt[4].profile = 0; /* Mask 4 */
8858 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8859 pring->prt[4].type = FC_TYPE_BLS;
8860 pring->prt[4].lpfc_sli_rcv_unsol_event =
8861 lpfc_sli4_ct_abort_unsol_event;
dea3101e 8862 break;
8863 }
7e56aa25
JS
8864 totiocbsize += (pring->sli.sli3.numCiocb *
8865 pring->sli.sli3.sizeCiocb) +
8866 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 8867 }
ed957684 8868 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 8869 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
8870 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8871 "SLI2 SLIM Data: x%x x%lx\n",
8872 phba->brd_no, totiocbsize,
8873 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 8874 }
cf5bf97e
JW
8875 if (phba->cfg_multi_ring_support == 2)
8876 lpfc_extra_ring_setup(phba);
dea3101e 8877
8878 return 0;
8879}
8880
e59058c4 8881/**
3621a710 8882 * lpfc_sli_queue_setup - Queue initialization function
e59058c4
JS
8883 * @phba: Pointer to HBA context object.
8884 *
8885 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8886 * ring. This function also initializes ring indices of each ring.
8887 * This function is called during the initialization of the SLI
8888 * interface of an HBA.
8889 * This function is called with no lock held and always returns
8890 * 1.
8891 **/
dea3101e 8892int
2e0fef85 8893lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e 8894{
8895 struct lpfc_sli *psli;
8896 struct lpfc_sli_ring *pring;
604a3e30 8897 int i;
dea3101e 8898
8899 psli = &phba->sli;
2e0fef85 8900 spin_lock_irq(&phba->hbalock);
dea3101e 8901 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 8902 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 8903 /* Initialize list headers for txq and txcmplq as double linked lists */
8904 for (i = 0; i < psli->num_rings; i++) {
8905 pring = &psli->ring[i];
8906 pring->ringno = i;
7e56aa25
JS
8907 pring->sli.sli3.next_cmdidx = 0;
8908 pring->sli.sli3.local_getidx = 0;
8909 pring->sli.sli3.cmdidx = 0;
dea3101e 8910 INIT_LIST_HEAD(&pring->txq);
8911 INIT_LIST_HEAD(&pring->txcmplq);
8912 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 8913 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 8914 INIT_LIST_HEAD(&pring->postbufq);
7e56aa25 8915 spin_lock_init(&pring->ring_lock);
dea3101e 8916 }
2e0fef85
JS
8917 spin_unlock_irq(&phba->hbalock);
8918 return 1;
dea3101e 8919}
8920
04c68496
JS
8921/**
8922 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8923 * @phba: Pointer to HBA context object.
8924 *
8925 * This routine flushes the mailbox command subsystem. It will unconditionally
8926 * flush all the mailbox commands in the three possible stages in the mailbox
8927 * command sub-system: pending mailbox command queue; the outstanding mailbox
8928 * command; and completed mailbox command queue. It is caller's responsibility
8929 * to make sure that the driver is in the proper state to flush the mailbox
8930 * command sub-system. Namely, the posting of mailbox commands into the
8931 * pending mailbox command queue from the various clients must be stopped;
8932 * either the HBA is in a state that it will never works on the outstanding
8933 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8934 * mailbox command has been completed.
8935 **/
8936static void
8937lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8938{
8939 LIST_HEAD(completions);
8940 struct lpfc_sli *psli = &phba->sli;
8941 LPFC_MBOXQ_t *pmb;
8942 unsigned long iflag;
8943
8944 /* Flush all the mailbox commands in the mbox system */
8945 spin_lock_irqsave(&phba->hbalock, iflag);
8946 /* The pending mailbox command queue */
8947 list_splice_init(&phba->sli.mboxq, &completions);
8948 /* The outstanding active mailbox command */
8949 if (psli->mbox_active) {
8950 list_add_tail(&psli->mbox_active->list, &completions);
8951 psli->mbox_active = NULL;
8952 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8953 }
8954 /* The completed mailbox command queue */
8955 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8956 spin_unlock_irqrestore(&phba->hbalock, iflag);
8957
8958 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
8959 while (!list_empty(&completions)) {
8960 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
8961 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
8962 if (pmb->mbox_cmpl)
8963 pmb->mbox_cmpl(phba, pmb);
8964 }
8965}
8966
e59058c4 8967/**
3621a710 8968 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
8969 * @vport: Pointer to virtual port object.
8970 *
8971 * lpfc_sli_host_down is called to clean up the resources
8972 * associated with a vport before destroying virtual
8973 * port data structures.
8974 * This function does following operations:
8975 * - Free discovery resources associated with this virtual
8976 * port.
8977 * - Free iocbs associated with this virtual port in
8978 * the txq.
8979 * - Send abort for all iocb commands associated with this
8980 * vport in txcmplq.
8981 *
8982 * This function is called with no lock held and always returns 1.
8983 **/
92d7f7b0
JS
8984int
8985lpfc_sli_host_down(struct lpfc_vport *vport)
8986{
858c9f6c 8987 LIST_HEAD(completions);
92d7f7b0
JS
8988 struct lpfc_hba *phba = vport->phba;
8989 struct lpfc_sli *psli = &phba->sli;
8990 struct lpfc_sli_ring *pring;
8991 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
8992 int i;
8993 unsigned long flags = 0;
8994 uint16_t prev_pring_flag;
8995
8996 lpfc_cleanup_discovery_resources(vport);
8997
8998 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
8999 for (i = 0; i < psli->num_rings; i++) {
9000 pring = &psli->ring[i];
9001 prev_pring_flag = pring->flag;
5e9d9b82
JS
9002 /* Only slow rings */
9003 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9004 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9005 /* Set the lpfc data pending flag */
9006 set_bit(LPFC_DATA_READY, &phba->data_flags);
9007 }
92d7f7b0
JS
9008 /*
9009 * Error everything on the txq since these iocbs have not been
9010 * given to the FW yet.
9011 */
92d7f7b0
JS
9012 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9013 if (iocb->vport != vport)
9014 continue;
858c9f6c 9015 list_move_tail(&iocb->list, &completions);
92d7f7b0 9016 pring->txq_cnt--;
92d7f7b0
JS
9017 }
9018
9019 /* Next issue ABTS for everything on the txcmplq */
9020 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9021 list) {
9022 if (iocb->vport != vport)
9023 continue;
9024 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9025 }
9026
9027 pring->flag = prev_pring_flag;
9028 }
9029
9030 spin_unlock_irqrestore(&phba->hbalock, flags);
9031
a257bf90
JS
9032 /* Cancel all the IOCBs from the completions list */
9033 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9034 IOERR_SLI_DOWN);
92d7f7b0
JS
9035 return 1;
9036}
9037
e59058c4 9038/**
3621a710 9039 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
9040 * @phba: Pointer to HBA context object.
9041 *
9042 * This function cleans up all iocb, buffers, mailbox commands
9043 * while shutting down the HBA. This function is called with no
9044 * lock held and always returns 1.
9045 * This function does the following to cleanup driver resources:
9046 * - Free discovery resources for each virtual port
9047 * - Cleanup any pending fabric iocbs
9048 * - Iterate through the iocb txq and free each entry
9049 * in the list.
9050 * - Free up any buffer posted to the HBA
9051 * - Free mailbox commands in the mailbox queue.
9052 **/
dea3101e 9053int
2e0fef85 9054lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 9055{
2534ba75 9056 LIST_HEAD(completions);
2e0fef85 9057 struct lpfc_sli *psli = &phba->sli;
dea3101e 9058 struct lpfc_sli_ring *pring;
0ff10d46 9059 struct lpfc_dmabuf *buf_ptr;
dea3101e 9060 unsigned long flags = 0;
04c68496
JS
9061 int i;
9062
9063 /* Shutdown the mailbox command sub-system */
618a5230 9064 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 9065
dea3101e 9066 lpfc_hba_down_prep(phba);
9067
92d7f7b0
JS
9068 lpfc_fabric_abort_hba(phba);
9069
2e0fef85 9070 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 9071 for (i = 0; i < psli->num_rings; i++) {
9072 pring = &psli->ring[i];
5e9d9b82
JS
9073 /* Only slow rings */
9074 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9075 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9076 /* Set the lpfc data pending flag */
9077 set_bit(LPFC_DATA_READY, &phba->data_flags);
9078 }
dea3101e 9079
9080 /*
9081 * Error everything on the txq since these iocbs have not been
9082 * given to the FW yet.
9083 */
2534ba75 9084 list_splice_init(&pring->txq, &completions);
dea3101e 9085 pring->txq_cnt = 0;
9086
2534ba75 9087 }
2e0fef85 9088 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 9089
a257bf90
JS
9090 /* Cancel all the IOCBs from the completions list */
9091 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9092 IOERR_SLI_DOWN);
dea3101e 9093
0ff10d46
JS
9094 spin_lock_irqsave(&phba->hbalock, flags);
9095 list_splice_init(&phba->elsbuf, &completions);
9096 phba->elsbuf_cnt = 0;
9097 phba->elsbuf_prev_cnt = 0;
9098 spin_unlock_irqrestore(&phba->hbalock, flags);
9099
9100 while (!list_empty(&completions)) {
9101 list_remove_head(&completions, buf_ptr,
9102 struct lpfc_dmabuf, list);
9103 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9104 kfree(buf_ptr);
9105 }
9106
dea3101e 9107 /* Return any active mbox cmds */
9108 del_timer_sync(&psli->mbox_tmo);
2e0fef85 9109
da0436e9 9110 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 9111 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 9112 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 9113
da0436e9
JS
9114 return 1;
9115}
9116
e59058c4 9117/**
3621a710 9118 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
9119 * @srcp: Source memory pointer.
9120 * @destp: Destination memory pointer.
9121 * @cnt: Number of words required to be copied.
9122 *
9123 * This function is used for copying data between driver memory
9124 * and the SLI memory. This function also changes the endianness
9125 * of each word if native endianness is different from SLI
9126 * endianness. This function can be called with or without
9127 * lock.
9128 **/
dea3101e 9129void
9130lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9131{
9132 uint32_t *src = srcp;
9133 uint32_t *dest = destp;
9134 uint32_t ldata;
9135 int i;
9136
9137 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9138 ldata = *src;
9139 ldata = le32_to_cpu(ldata);
9140 *dest = ldata;
9141 src++;
9142 dest++;
9143 }
9144}
9145
e59058c4 9146
a0c87cbd
JS
9147/**
9148 * lpfc_sli_bemem_bcopy - SLI memory copy function
9149 * @srcp: Source memory pointer.
9150 * @destp: Destination memory pointer.
9151 * @cnt: Number of words required to be copied.
9152 *
9153 * This function is used for copying data between a data structure
9154 * with big endian representation to local endianness.
9155 * This function can be called with or without lock.
9156 **/
9157void
9158lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9159{
9160 uint32_t *src = srcp;
9161 uint32_t *dest = destp;
9162 uint32_t ldata;
9163 int i;
9164
9165 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9166 ldata = *src;
9167 ldata = be32_to_cpu(ldata);
9168 *dest = ldata;
9169 src++;
9170 dest++;
9171 }
9172}
9173
e59058c4 9174/**
3621a710 9175 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
9176 * @phba: Pointer to HBA context object.
9177 * @pring: Pointer to driver SLI ring object.
9178 * @mp: Pointer to driver buffer object.
9179 *
9180 * This function is called with no lock held.
9181 * It always return zero after adding the buffer to the postbufq
9182 * buffer list.
9183 **/
dea3101e 9184int
2e0fef85
JS
9185lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9186 struct lpfc_dmabuf *mp)
dea3101e 9187{
9188 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9189 later */
2e0fef85 9190 spin_lock_irq(&phba->hbalock);
dea3101e 9191 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 9192 pring->postbufq_cnt++;
2e0fef85 9193 spin_unlock_irq(&phba->hbalock);
dea3101e 9194 return 0;
9195}
9196
e59058c4 9197/**
3621a710 9198 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
9199 * @phba: Pointer to HBA context object.
9200 *
9201 * When HBQ is enabled, buffers are searched based on tags. This function
9202 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9203 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9204 * does not conflict with tags of buffer posted for unsolicited events.
9205 * The function returns the allocated tag. The function is called with
9206 * no locks held.
9207 **/
76bb24ef
JS
9208uint32_t
9209lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9210{
9211 spin_lock_irq(&phba->hbalock);
9212 phba->buffer_tag_count++;
9213 /*
9214 * Always set the QUE_BUFTAG_BIT to distiguish between
9215 * a tag assigned by HBQ.
9216 */
9217 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9218 spin_unlock_irq(&phba->hbalock);
9219 return phba->buffer_tag_count;
9220}
9221
e59058c4 9222/**
3621a710 9223 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
9224 * @phba: Pointer to HBA context object.
9225 * @pring: Pointer to driver SLI ring object.
9226 * @tag: Buffer tag.
9227 *
9228 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9229 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9230 * iocb is posted to the response ring with the tag of the buffer.
9231 * This function searches the pring->postbufq list using the tag
9232 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9233 * iocb. If the buffer is found then lpfc_dmabuf object of the
9234 * buffer is returned to the caller else NULL is returned.
9235 * This function is called with no lock held.
9236 **/
76bb24ef
JS
9237struct lpfc_dmabuf *
9238lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9239 uint32_t tag)
9240{
9241 struct lpfc_dmabuf *mp, *next_mp;
9242 struct list_head *slp = &pring->postbufq;
9243
25985edc 9244 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
9245 spin_lock_irq(&phba->hbalock);
9246 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9247 if (mp->buffer_tag == tag) {
9248 list_del_init(&mp->list);
9249 pring->postbufq_cnt--;
9250 spin_unlock_irq(&phba->hbalock);
9251 return mp;
9252 }
9253 }
9254
9255 spin_unlock_irq(&phba->hbalock);
9256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 9257 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
9258 "ring %d Data x%lx x%p x%p x%x\n",
9259 pring->ringno, (unsigned long) tag,
9260 slp->next, slp->prev, pring->postbufq_cnt);
9261
9262 return NULL;
9263}
dea3101e 9264
e59058c4 9265/**
3621a710 9266 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
9267 * @phba: Pointer to HBA context object.
9268 * @pring: Pointer to driver SLI ring object.
9269 * @phys: DMA address of the buffer.
9270 *
9271 * This function searches the buffer list using the dma_address
9272 * of unsolicited event to find the driver's lpfc_dmabuf object
9273 * corresponding to the dma_address. The function returns the
9274 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9275 * This function is called by the ct and els unsolicited event
9276 * handlers to get the buffer associated with the unsolicited
9277 * event.
9278 *
9279 * This function is called with no lock held.
9280 **/
dea3101e 9281struct lpfc_dmabuf *
9282lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9283 dma_addr_t phys)
9284{
9285 struct lpfc_dmabuf *mp, *next_mp;
9286 struct list_head *slp = &pring->postbufq;
9287
25985edc 9288 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 9289 spin_lock_irq(&phba->hbalock);
dea3101e 9290 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9291 if (mp->phys == phys) {
9292 list_del_init(&mp->list);
9293 pring->postbufq_cnt--;
2e0fef85 9294 spin_unlock_irq(&phba->hbalock);
dea3101e 9295 return mp;
9296 }
9297 }
9298
2e0fef85 9299 spin_unlock_irq(&phba->hbalock);
dea3101e 9300 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 9301 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 9302 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 9303 pring->ringno, (unsigned long long)phys,
dea3101e 9304 slp->next, slp->prev, pring->postbufq_cnt);
9305 return NULL;
9306}
9307
e59058c4 9308/**
3621a710 9309 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
9310 * @phba: Pointer to HBA context object.
9311 * @cmdiocb: Pointer to driver command iocb object.
9312 * @rspiocb: Pointer to driver response iocb object.
9313 *
9314 * This function is the completion handler for the abort iocbs for
9315 * ELS commands. This function is called from the ELS ring event
9316 * handler with no lock held. This function frees memory resources
9317 * associated with the abort iocb.
9318 **/
dea3101e 9319static void
2e0fef85
JS
9320lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9321 struct lpfc_iocbq *rspiocb)
dea3101e 9322{
2e0fef85 9323 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 9324 uint16_t abort_iotag, abort_context;
ff78d8f9 9325 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
9326
9327 if (irsp->ulpStatus) {
ff78d8f9
JS
9328
9329 /*
9330 * Assume that the port already completed and returned, or
9331 * will return the iocb. Just Log the message.
9332 */
2680eeaa
JS
9333 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9334 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9335
2e0fef85 9336 spin_lock_irq(&phba->hbalock);
45ed1190
JS
9337 if (phba->sli_rev < LPFC_SLI_REV4) {
9338 if (abort_iotag != 0 &&
9339 abort_iotag <= phba->sli.last_iotag)
9340 abort_iocb =
9341 phba->sli.iocbq_lookup[abort_iotag];
9342 } else
9343 /* For sli4 the abort_tag is the XRI,
9344 * so the abort routine puts the iotag of the iocb
9345 * being aborted in the context field of the abort
9346 * IOCB.
9347 */
9348 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 9349
2a9bf3d0
JS
9350 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9351 "0327 Cannot abort els iocb %p "
9352 "with tag %x context %x, abort status %x, "
9353 "abort code %x\n",
9354 abort_iocb, abort_iotag, abort_context,
9355 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 9356
ff78d8f9 9357 spin_unlock_irq(&phba->hbalock);
2680eeaa 9358 }
604a3e30 9359 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 9360 return;
9361}
9362
e59058c4 9363/**
3621a710 9364 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
9365 * @phba: Pointer to HBA context object.
9366 * @cmdiocb: Pointer to driver command iocb object.
9367 * @rspiocb: Pointer to driver response iocb object.
9368 *
9369 * The function is called from SLI ring event handler with no
9370 * lock held. This function is the completion handler for ELS commands
9371 * which are aborted. The function frees memory resources used for
9372 * the aborted ELS commands.
9373 **/
92d7f7b0
JS
9374static void
9375lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9376 struct lpfc_iocbq *rspiocb)
9377{
9378 IOCB_t *irsp = &rspiocb->iocb;
9379
9380 /* ELS cmd tag <ulpIoTag> completes */
9381 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 9382 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 9383 "x%x x%x x%x\n",
e8b62011 9384 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 9385 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
9386 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9387 lpfc_ct_free_iocb(phba, cmdiocb);
9388 else
9389 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
9390 return;
9391}
9392
e59058c4 9393/**
5af5eee7 9394 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
9395 * @phba: Pointer to HBA context object.
9396 * @pring: Pointer to driver SLI ring object.
9397 * @cmdiocb: Pointer to driver command iocb object.
9398 *
5af5eee7
JS
9399 * This function issues an abort iocb for the provided command iocb down to
9400 * the port. Other than the case the outstanding command iocb is an abort
9401 * request, this function issues abort out unconditionally. This function is
9402 * called with hbalock held. The function returns 0 when it fails due to
9403 * memory allocation failure or when the command iocb is an abort request.
e59058c4 9404 **/
5af5eee7
JS
9405static int
9406lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9407 struct lpfc_iocbq *cmdiocb)
dea3101e 9408{
2e0fef85 9409 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 9410 struct lpfc_iocbq *abtsiocbp;
dea3101e 9411 IOCB_t *icmd = NULL;
9412 IOCB_t *iabt = NULL;
5af5eee7 9413 int retval;
7e56aa25 9414 unsigned long iflags;
07951076 9415
92d7f7b0
JS
9416 /*
9417 * There are certain command types we don't want to abort. And we
9418 * don't want to abort commands that are already in the process of
9419 * being aborted.
07951076
JS
9420 */
9421 icmd = &cmdiocb->iocb;
2e0fef85 9422 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
9423 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9424 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
9425 return 0;
9426
dea3101e 9427 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 9428 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 9429 if (abtsiocbp == NULL)
9430 return 0;
dea3101e 9431
07951076 9432 /* This signals the response to set the correct status
341af102 9433 * before calling the completion handler
07951076
JS
9434 */
9435 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9436
dea3101e 9437 iabt = &abtsiocbp->iocb;
07951076
JS
9438 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9439 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 9440 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 9441 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
9442 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9443 }
da0436e9
JS
9444 else
9445 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
9446 iabt->ulpLe = 1;
9447 iabt->ulpClass = icmd->ulpClass;
dea3101e 9448
5ffc266e
JS
9449 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9450 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
341af102
JS
9451 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9452 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9453
2e0fef85 9454 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
9455 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9456 else
9457 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 9458
07951076 9459 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 9460
e8b62011
JS
9461 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9462 "0339 Abort xri x%x, original iotag x%x, "
9463 "abort cmd iotag x%x\n",
2a9bf3d0 9464 iabt->un.acxri.abortIoTag,
e8b62011 9465 iabt->un.acxri.abortContextTag,
2a9bf3d0 9466 abtsiocbp->iotag);
7e56aa25
JS
9467
9468 if (phba->sli_rev == LPFC_SLI_REV4) {
9469 /* Note: both hbalock and ring_lock need to be set here */
9470 spin_lock_irqsave(&pring->ring_lock, iflags);
9471 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9472 abtsiocbp, 0);
9473 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9474 } else {
9475 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9476 abtsiocbp, 0);
9477 }
dea3101e 9478
d7c255b2
JS
9479 if (retval)
9480 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
9481
9482 /*
9483 * Caller to this routine should check for IOCB_ERROR
9484 * and handle it properly. This routine no longer removes
9485 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9486 */
9487 return retval;
9488}
9489
9490/**
9491 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9492 * @phba: Pointer to HBA context object.
9493 * @pring: Pointer to driver SLI ring object.
9494 * @cmdiocb: Pointer to driver command iocb object.
9495 *
9496 * This function issues an abort iocb for the provided command iocb. In case
9497 * of unloading, the abort iocb will not be issued to commands on the ELS
9498 * ring. Instead, the callback function shall be changed to those commands
9499 * so that nothing happens when them finishes. This function is called with
9500 * hbalock held. The function returns 0 when the command iocb is an abort
9501 * request.
9502 **/
9503int
9504lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9505 struct lpfc_iocbq *cmdiocb)
9506{
9507 struct lpfc_vport *vport = cmdiocb->vport;
9508 int retval = IOCB_ERROR;
9509 IOCB_t *icmd = NULL;
9510
9511 /*
9512 * There are certain command types we don't want to abort. And we
9513 * don't want to abort commands that are already in the process of
9514 * being aborted.
9515 */
9516 icmd = &cmdiocb->iocb;
9517 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9518 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9519 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9520 return 0;
9521
9522 /*
9523 * If we're unloading, don't abort iocb on the ELS ring, but change
9524 * the callback so that nothing happens when it finishes.
9525 */
9526 if ((vport->load_flag & FC_UNLOADING) &&
9527 (pring->ringno == LPFC_ELS_RING)) {
9528 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9529 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9530 else
9531 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9532 goto abort_iotag_exit;
9533 }
9534
9535 /* Now, we try to issue the abort to the cmdiocb out */
9536 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9537
07951076 9538abort_iotag_exit:
2e0fef85
JS
9539 /*
9540 * Caller to this routine should check for IOCB_ERROR
9541 * and handle it properly. This routine no longer removes
9542 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 9543 */
2e0fef85 9544 return retval;
dea3101e 9545}
9546
5af5eee7
JS
9547/**
9548 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9549 * @phba: Pointer to HBA context object.
9550 * @pring: Pointer to driver SLI ring object.
9551 *
9552 * This function aborts all iocbs in the given ring and frees all the iocb
9553 * objects in txq. This function issues abort iocbs unconditionally for all
9554 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9555 * to complete before the return of this function. The caller is not required
9556 * to hold any locks.
9557 **/
9558static void
9559lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9560{
9561 LIST_HEAD(completions);
9562 struct lpfc_iocbq *iocb, *next_iocb;
9563
9564 if (pring->ringno == LPFC_ELS_RING)
9565 lpfc_fabric_abort_hba(phba);
9566
9567 spin_lock_irq(&phba->hbalock);
9568
9569 /* Take off all the iocbs on txq for cancelling */
9570 list_splice_init(&pring->txq, &completions);
9571 pring->txq_cnt = 0;
9572
9573 /* Next issue ABTS for everything on the txcmplq */
9574 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9575 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9576
9577 spin_unlock_irq(&phba->hbalock);
9578
9579 /* Cancel all the IOCBs from the completions list */
9580 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9581 IOERR_SLI_ABORTED);
9582}
9583
9584/**
9585 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9586 * @phba: pointer to lpfc HBA data structure.
9587 *
9588 * This routine will abort all pending and outstanding iocbs to an HBA.
9589 **/
9590void
9591lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9592{
9593 struct lpfc_sli *psli = &phba->sli;
9594 struct lpfc_sli_ring *pring;
9595 int i;
9596
9597 for (i = 0; i < psli->num_rings; i++) {
9598 pring = &psli->ring[i];
9599 lpfc_sli_iocb_ring_abort(phba, pring);
9600 }
9601}
9602
e59058c4 9603/**
3621a710 9604 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
9605 * @iocbq: Pointer to driver iocb object.
9606 * @vport: Pointer to driver virtual port object.
9607 * @tgt_id: SCSI ID of the target.
9608 * @lun_id: LUN ID of the scsi device.
9609 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9610 *
3621a710 9611 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
9612 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9613 * 0 if the filtering criteria is met for the given iocb and will return
9614 * 1 if the filtering criteria is not met.
9615 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9616 * given iocb is for the SCSI device specified by vport, tgt_id and
9617 * lun_id parameter.
9618 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9619 * given iocb is for the SCSI target specified by vport and tgt_id
9620 * parameters.
9621 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9622 * given iocb is for the SCSI host associated with the given vport.
9623 * This function is called with no locks held.
9624 **/
dea3101e 9625static int
51ef4c26
JS
9626lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9627 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 9628 lpfc_ctx_cmd ctx_cmd)
dea3101e 9629{
0bd4ca25 9630 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9631 int rc = 1;
9632
0bd4ca25
JSEC
9633 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9634 return rc;
9635
51ef4c26
JS
9636 if (iocbq->vport != vport)
9637 return rc;
9638
0bd4ca25 9639 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 9640
495a714c 9641 if (lpfc_cmd->pCmd == NULL)
dea3101e 9642 return rc;
9643
9644 switch (ctx_cmd) {
9645 case LPFC_CTX_LUN:
495a714c
JS
9646 if ((lpfc_cmd->rdata->pnode) &&
9647 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9648 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 9649 rc = 0;
9650 break;
9651 case LPFC_CTX_TGT:
495a714c
JS
9652 if ((lpfc_cmd->rdata->pnode) &&
9653 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 9654 rc = 0;
9655 break;
dea3101e 9656 case LPFC_CTX_HOST:
9657 rc = 0;
9658 break;
9659 default:
9660 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 9661 __func__, ctx_cmd);
dea3101e 9662 break;
9663 }
9664
9665 return rc;
9666}
9667
e59058c4 9668/**
3621a710 9669 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
9670 * @vport: Pointer to virtual port.
9671 * @tgt_id: SCSI ID of the target.
9672 * @lun_id: LUN ID of the scsi device.
9673 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9674 *
9675 * This function returns number of FCP commands pending for the vport.
9676 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9677 * commands pending on the vport associated with SCSI device specified
9678 * by tgt_id and lun_id parameters.
9679 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9680 * commands pending on the vport associated with SCSI target specified
9681 * by tgt_id parameter.
9682 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9683 * commands pending on the vport.
9684 * This function returns the number of iocbs which satisfy the filter.
9685 * This function is called without any lock held.
9686 **/
dea3101e 9687int
51ef4c26
JS
9688lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9689 lpfc_ctx_cmd ctx_cmd)
dea3101e 9690{
51ef4c26 9691 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9692 struct lpfc_iocbq *iocbq;
9693 int sum, i;
dea3101e 9694
0bd4ca25
JSEC
9695 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9696 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9697
51ef4c26
JS
9698 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9699 ctx_cmd) == 0)
0bd4ca25 9700 sum++;
dea3101e 9701 }
0bd4ca25 9702
dea3101e 9703 return sum;
9704}
9705
e59058c4 9706/**
3621a710 9707 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
9708 * @phba: Pointer to HBA context object
9709 * @cmdiocb: Pointer to command iocb object.
9710 * @rspiocb: Pointer to response iocb object.
9711 *
9712 * This function is called when an aborted FCP iocb completes. This
9713 * function is called by the ring event handler with no lock held.
9714 * This function frees the iocb.
9715 **/
5eb95af0 9716void
2e0fef85
JS
9717lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9718 struct lpfc_iocbq *rspiocb)
5eb95af0 9719{
cb69f7de
JS
9720 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9721 "3096 ABORT_XRI_CN completing on xri x%x "
9722 "original iotag x%x, abort cmd iotag x%x "
9723 "status 0x%x, reason 0x%x\n",
9724 cmdiocb->iocb.un.acxri.abortContextTag,
9725 cmdiocb->iocb.un.acxri.abortIoTag,
9726 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9727 rspiocb->iocb.un.ulpWord[4]);
604a3e30 9728 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
9729 return;
9730}
9731
e59058c4 9732/**
3621a710 9733 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
9734 * @vport: Pointer to virtual port.
9735 * @pring: Pointer to driver SLI ring object.
9736 * @tgt_id: SCSI ID of the target.
9737 * @lun_id: LUN ID of the scsi device.
9738 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9739 *
9740 * This function sends an abort command for every SCSI command
9741 * associated with the given virtual port pending on the ring
9742 * filtered by lpfc_sli_validate_fcp_iocb function.
9743 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9744 * FCP iocbs associated with lun specified by tgt_id and lun_id
9745 * parameters
9746 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9747 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9748 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9749 * FCP iocbs associated with virtual port.
9750 * This function returns number of iocbs it failed to abort.
9751 * This function is called with no locks held.
9752 **/
dea3101e 9753int
51ef4c26
JS
9754lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9755 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 9756{
51ef4c26 9757 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9758 struct lpfc_iocbq *iocbq;
9759 struct lpfc_iocbq *abtsiocb;
dea3101e 9760 IOCB_t *cmd = NULL;
dea3101e 9761 int errcnt = 0, ret_val = 0;
0bd4ca25 9762 int i;
dea3101e 9763
0bd4ca25
JSEC
9764 for (i = 1; i <= phba->sli.last_iotag; i++) {
9765 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9766
51ef4c26 9767 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 9768 abort_cmd) != 0)
dea3101e 9769 continue;
9770
9771 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 9772 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 9773 if (abtsiocb == NULL) {
9774 errcnt++;
9775 continue;
9776 }
dea3101e 9777
0bd4ca25 9778 cmd = &iocbq->iocb;
dea3101e 9779 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9780 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
9781 if (phba->sli_rev == LPFC_SLI_REV4)
9782 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9783 else
9784 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 9785 abtsiocb->iocb.ulpLe = 1;
9786 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 9787 abtsiocb->vport = phba->pport;
dea3101e 9788
5ffc266e
JS
9789 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9790 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
341af102
JS
9791 if (iocbq->iocb_flag & LPFC_IO_FCP)
9792 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9793
2e0fef85 9794 if (lpfc_is_link_up(phba))
dea3101e 9795 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9796 else
9797 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9798
5eb95af0
JSEC
9799 /* Setup callback routine and issue the command. */
9800 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
9801 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9802 abtsiocb, 0);
dea3101e 9803 if (ret_val == IOCB_ERROR) {
604a3e30 9804 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 9805 errcnt++;
9806 continue;
9807 }
9808 }
9809
9810 return errcnt;
9811}
9812
e59058c4 9813/**
3621a710 9814 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
9815 * @phba: Pointer to HBA context object.
9816 * @cmdiocbq: Pointer to command iocb.
9817 * @rspiocbq: Pointer to response iocb.
9818 *
9819 * This function is the completion handler for iocbs issued using
9820 * lpfc_sli_issue_iocb_wait function. This function is called by the
9821 * ring event handler function without any lock held. This function
9822 * can be called from both worker thread context and interrupt
9823 * context. This function also can be called from other thread which
9824 * cleans up the SLI layer objects.
9825 * This function copy the contents of the response iocb to the
9826 * response iocb memory object provided by the caller of
9827 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9828 * sleeps for the iocb completion.
9829 **/
68876920
JSEC
9830static void
9831lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9832 struct lpfc_iocbq *cmdiocbq,
9833 struct lpfc_iocbq *rspiocbq)
dea3101e 9834{
68876920
JSEC
9835 wait_queue_head_t *pdone_q;
9836 unsigned long iflags;
0f65ff68 9837 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9838
2e0fef85 9839 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
9840 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9841 if (cmdiocbq->context2 && rspiocbq)
9842 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9843 &rspiocbq->iocb, sizeof(IOCB_t));
9844
0f65ff68
JS
9845 /* Set the exchange busy flag for task management commands */
9846 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9847 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9848 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9849 cur_iocbq);
9850 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9851 }
9852
68876920 9853 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
9854 if (pdone_q)
9855 wake_up(pdone_q);
858c9f6c 9856 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 9857 return;
9858}
9859
d11e31dd
JS
9860/**
9861 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9862 * @phba: Pointer to HBA context object..
9863 * @piocbq: Pointer to command iocb.
9864 * @flag: Flag to test.
9865 *
9866 * This routine grabs the hbalock and then test the iocb_flag to
9867 * see if the passed in flag is set.
9868 * Returns:
9869 * 1 if flag is set.
9870 * 0 if flag is not set.
9871 **/
9872static int
9873lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9874 struct lpfc_iocbq *piocbq, uint32_t flag)
9875{
9876 unsigned long iflags;
9877 int ret;
9878
9879 spin_lock_irqsave(&phba->hbalock, iflags);
9880 ret = piocbq->iocb_flag & flag;
9881 spin_unlock_irqrestore(&phba->hbalock, iflags);
9882 return ret;
9883
9884}
9885
e59058c4 9886/**
3621a710 9887 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
9888 * @phba: Pointer to HBA context object..
9889 * @pring: Pointer to sli ring.
9890 * @piocb: Pointer to command iocb.
9891 * @prspiocbq: Pointer to response iocb.
9892 * @timeout: Timeout in number of seconds.
9893 *
9894 * This function issues the iocb to firmware and waits for the
9895 * iocb to complete. If the iocb command is not
9896 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9897 * Caller should not free the iocb resources if this function
9898 * returns IOCB_TIMEDOUT.
9899 * The function waits for the iocb completion using an
9900 * non-interruptible wait.
9901 * This function will sleep while waiting for iocb completion.
9902 * So, this function should not be called from any context which
9903 * does not allow sleeping. Due to the same reason, this function
9904 * cannot be called with interrupt disabled.
9905 * This function assumes that the iocb completions occur while
9906 * this function sleep. So, this function cannot be called from
9907 * the thread which process iocb completion for this ring.
9908 * This function clears the iocb_flag of the iocb object before
9909 * issuing the iocb and the iocb completion handler sets this
9910 * flag and wakes this thread when the iocb completes.
9911 * The contents of the response iocb will be copied to prspiocbq
9912 * by the completion handler when the command completes.
9913 * This function returns IOCB_SUCCESS when success.
9914 * This function is called with no lock held.
9915 **/
dea3101e 9916int
2e0fef85 9917lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 9918 uint32_t ring_number,
2e0fef85
JS
9919 struct lpfc_iocbq *piocb,
9920 struct lpfc_iocbq *prspiocbq,
68876920 9921 uint32_t timeout)
dea3101e 9922{
7259f0d0 9923 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
9924 long timeleft, timeout_req = 0;
9925 int retval = IOCB_SUCCESS;
875fbdfe 9926 uint32_t creg_val;
2a9bf3d0 9927 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 9928 /*
68876920
JSEC
9929 * If the caller has provided a response iocbq buffer, then context2
9930 * is NULL or its an error.
dea3101e 9931 */
68876920
JSEC
9932 if (prspiocbq) {
9933 if (piocb->context2)
9934 return IOCB_ERROR;
9935 piocb->context2 = prspiocbq;
dea3101e 9936 }
9937
68876920
JSEC
9938 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9939 piocb->context_un.wait_queue = &done_q;
9940 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 9941
875fbdfe 9942 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
9943 if (lpfc_readl(phba->HCregaddr, &creg_val))
9944 return IOCB_ERROR;
875fbdfe
JSEC
9945 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9946 writel(creg_val, phba->HCregaddr);
9947 readl(phba->HCregaddr); /* flush */
9948 }
9949
2a9bf3d0
JS
9950 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9951 SLI_IOCB_RET_IOCB);
68876920
JSEC
9952 if (retval == IOCB_SUCCESS) {
9953 timeout_req = timeout * HZ;
68876920 9954 timeleft = wait_event_timeout(done_q,
d11e31dd 9955 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 9956 timeout_req);
dea3101e 9957
7054a606
JS
9958 if (piocb->iocb_flag & LPFC_IO_WAKE) {
9959 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 9960 "0331 IOCB wake signaled\n");
7054a606 9961 } else if (timeleft == 0) {
68876920 9962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
9963 "0338 IOCB wait timeout error - no "
9964 "wake response Data x%x\n", timeout);
68876920 9965 retval = IOCB_TIMEDOUT;
7054a606 9966 } else {
68876920 9967 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
9968 "0330 IOCB wake NOT set, "
9969 "Data x%x x%lx\n",
68876920
JSEC
9970 timeout, (timeleft / jiffies));
9971 retval = IOCB_TIMEDOUT;
dea3101e 9972 }
2a9bf3d0
JS
9973 } else if (retval == IOCB_BUSY) {
9974 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9975 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
9976 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
9977 return retval;
68876920
JSEC
9978 } else {
9979 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 9980 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 9981 retval);
68876920 9982 retval = IOCB_ERROR;
dea3101e 9983 }
9984
875fbdfe 9985 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
9986 if (lpfc_readl(phba->HCregaddr, &creg_val))
9987 return IOCB_ERROR;
875fbdfe
JSEC
9988 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
9989 writel(creg_val, phba->HCregaddr);
9990 readl(phba->HCregaddr); /* flush */
9991 }
9992
68876920
JSEC
9993 if (prspiocbq)
9994 piocb->context2 = NULL;
9995
9996 piocb->context_un.wait_queue = NULL;
9997 piocb->iocb_cmpl = NULL;
dea3101e 9998 return retval;
9999}
68876920 10000
e59058c4 10001/**
3621a710 10002 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
10003 * @phba: Pointer to HBA context object.
10004 * @pmboxq: Pointer to driver mailbox object.
10005 * @timeout: Timeout in number of seconds.
10006 *
10007 * This function issues the mailbox to firmware and waits for the
10008 * mailbox command to complete. If the mailbox command is not
10009 * completed within timeout seconds, it returns MBX_TIMEOUT.
10010 * The function waits for the mailbox completion using an
10011 * interruptible wait. If the thread is woken up due to a
10012 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10013 * should not free the mailbox resources, if this function returns
10014 * MBX_TIMEOUT.
10015 * This function will sleep while waiting for mailbox completion.
10016 * So, this function should not be called from any context which
10017 * does not allow sleeping. Due to the same reason, this function
10018 * cannot be called with interrupt disabled.
10019 * This function assumes that the mailbox completion occurs while
10020 * this function sleep. So, this function cannot be called from
10021 * the worker thread which processes mailbox completion.
10022 * This function is called in the context of HBA management
10023 * applications.
10024 * This function returns MBX_SUCCESS when successful.
10025 * This function is called with no lock held.
10026 **/
dea3101e 10027int
2e0fef85 10028lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 10029 uint32_t timeout)
10030{
7259f0d0 10031 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 10032 int retval;
858c9f6c 10033 unsigned long flag;
dea3101e 10034
10035 /* The caller must leave context1 empty. */
98c9ea5c 10036 if (pmboxq->context1)
2e0fef85 10037 return MBX_NOT_FINISHED;
dea3101e 10038
495a714c 10039 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 10040 /* setup wake call as IOCB callback */
10041 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10042 /* setup context field to pass wait_queue pointer to wake function */
10043 pmboxq->context1 = &done_q;
10044
dea3101e 10045 /* now issue the command */
10046 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 10047 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
10048 wait_event_interruptible_timeout(done_q,
10049 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10050 timeout * HZ);
10051
858c9f6c 10052 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 10053 pmboxq->context1 = NULL;
7054a606
JS
10054 /*
10055 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10056 * else do not free the resources.
10057 */
d7c47992 10058 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 10059 retval = MBX_SUCCESS;
d7c47992
JS
10060 lpfc_sli4_swap_str(phba, pmboxq);
10061 } else {
7054a606 10062 retval = MBX_TIMEOUT;
858c9f6c
JS
10063 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10064 }
10065 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 10066 }
10067
dea3101e 10068 return retval;
10069}
10070
e59058c4 10071/**
3772a991 10072 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
10073 * @phba: Pointer to HBA context.
10074 *
3772a991
JS
10075 * This function is called to shutdown the driver's mailbox sub-system.
10076 * It first marks the mailbox sub-system is in a block state to prevent
10077 * the asynchronous mailbox command from issued off the pending mailbox
10078 * command queue. If the mailbox command sub-system shutdown is due to
10079 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10080 * the mailbox sub-system flush routine to forcefully bring down the
10081 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10082 * as with offline or HBA function reset), this routine will wait for the
10083 * outstanding mailbox command to complete before invoking the mailbox
10084 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 10085 **/
3772a991 10086void
618a5230 10087lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 10088{
3772a991 10089 struct lpfc_sli *psli = &phba->sli;
3772a991 10090 unsigned long timeout;
b4c02652 10091
618a5230
JS
10092 if (mbx_action == LPFC_MBX_NO_WAIT) {
10093 /* delay 100ms for port state */
10094 msleep(100);
10095 lpfc_sli_mbox_sys_flush(phba);
10096 return;
10097 }
a183a15f 10098 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 10099
3772a991
JS
10100 spin_lock_irq(&phba->hbalock);
10101 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 10102
3772a991 10103 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
10104 /* Determine how long we might wait for the active mailbox
10105 * command to be gracefully completed by firmware.
10106 */
a183a15f
JS
10107 if (phba->sli.mbox_active)
10108 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10109 phba->sli.mbox_active) *
10110 1000) + jiffies;
10111 spin_unlock_irq(&phba->hbalock);
10112
3772a991
JS
10113 while (phba->sli.mbox_active) {
10114 /* Check active mailbox complete status every 2ms */
10115 msleep(2);
10116 if (time_after(jiffies, timeout))
10117 /* Timeout, let the mailbox flush routine to
10118 * forcefully release active mailbox command
10119 */
10120 break;
10121 }
d7069f09
JS
10122 } else
10123 spin_unlock_irq(&phba->hbalock);
10124
3772a991
JS
10125 lpfc_sli_mbox_sys_flush(phba);
10126}
ed957684 10127
3772a991
JS
10128/**
10129 * lpfc_sli_eratt_read - read sli-3 error attention events
10130 * @phba: Pointer to HBA context.
10131 *
10132 * This function is called to read the SLI3 device error attention registers
10133 * for possible error attention events. The caller must hold the hostlock
10134 * with spin_lock_irq().
10135 *
25985edc 10136 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
10137 * Register and returns 0 otherwise.
10138 **/
10139static int
10140lpfc_sli_eratt_read(struct lpfc_hba *phba)
10141{
10142 uint32_t ha_copy;
b4c02652 10143
3772a991 10144 /* Read chip Host Attention (HA) register */
9940b97b
JS
10145 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10146 goto unplug_err;
10147
3772a991
JS
10148 if (ha_copy & HA_ERATT) {
10149 /* Read host status register to retrieve error event */
9940b97b
JS
10150 if (lpfc_sli_read_hs(phba))
10151 goto unplug_err;
b4c02652 10152
3772a991
JS
10153 /* Check if there is a deferred error condition is active */
10154 if ((HS_FFER1 & phba->work_hs) &&
10155 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 10156 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 10157 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
10158 /* Clear all interrupt enable conditions */
10159 writel(0, phba->HCregaddr);
10160 readl(phba->HCregaddr);
10161 }
10162
10163 /* Set the driver HA work bitmap */
3772a991
JS
10164 phba->work_ha |= HA_ERATT;
10165 /* Indicate polling handles this ERATT */
10166 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
10167 return 1;
10168 }
10169 return 0;
9940b97b
JS
10170
10171unplug_err:
10172 /* Set the driver HS work bitmap */
10173 phba->work_hs |= UNPLUG_ERR;
10174 /* Set the driver HA work bitmap */
10175 phba->work_ha |= HA_ERATT;
10176 /* Indicate polling handles this ERATT */
10177 phba->hba_flag |= HBA_ERATT_HANDLED;
10178 return 1;
b4c02652
JS
10179}
10180
da0436e9
JS
10181/**
10182 * lpfc_sli4_eratt_read - read sli-4 error attention events
10183 * @phba: Pointer to HBA context.
10184 *
10185 * This function is called to read the SLI4 device error attention registers
10186 * for possible error attention events. The caller must hold the hostlock
10187 * with spin_lock_irq().
10188 *
25985edc 10189 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
10190 * Register and returns 0 otherwise.
10191 **/
10192static int
10193lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10194{
10195 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
10196 uint32_t if_type, portsmphr;
10197 struct lpfc_register portstat_reg;
da0436e9 10198
2fcee4bf
JS
10199 /*
10200 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
10201 * registers for error attention. This can be changed later.
10202 */
2fcee4bf
JS
10203 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10204 switch (if_type) {
10205 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
10206 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10207 &uerr_sta_lo) ||
10208 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10209 &uerr_sta_hi)) {
10210 phba->work_hs |= UNPLUG_ERR;
10211 phba->work_ha |= HA_ERATT;
10212 phba->hba_flag |= HBA_ERATT_HANDLED;
10213 return 1;
10214 }
2fcee4bf
JS
10215 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10216 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10218 "1423 HBA Unrecoverable error: "
10219 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10220 "ue_mask_lo_reg=0x%x, "
10221 "ue_mask_hi_reg=0x%x\n",
10222 uerr_sta_lo, uerr_sta_hi,
10223 phba->sli4_hba.ue_mask_lo,
10224 phba->sli4_hba.ue_mask_hi);
10225 phba->work_status[0] = uerr_sta_lo;
10226 phba->work_status[1] = uerr_sta_hi;
10227 phba->work_ha |= HA_ERATT;
10228 phba->hba_flag |= HBA_ERATT_HANDLED;
10229 return 1;
10230 }
10231 break;
10232 case LPFC_SLI_INTF_IF_TYPE_2:
9940b97b
JS
10233 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10234 &portstat_reg.word0) ||
10235 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10236 &portsmphr)){
10237 phba->work_hs |= UNPLUG_ERR;
10238 phba->work_ha |= HA_ERATT;
10239 phba->hba_flag |= HBA_ERATT_HANDLED;
10240 return 1;
10241 }
2fcee4bf
JS
10242 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10243 phba->work_status[0] =
10244 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10245 phba->work_status[1] =
10246 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 10248 "2885 Port Status Event: "
2fcee4bf
JS
10249 "port status reg 0x%x, "
10250 "port smphr reg 0x%x, "
10251 "error 1=0x%x, error 2=0x%x\n",
10252 portstat_reg.word0,
10253 portsmphr,
10254 phba->work_status[0],
10255 phba->work_status[1]);
10256 phba->work_ha |= HA_ERATT;
10257 phba->hba_flag |= HBA_ERATT_HANDLED;
10258 return 1;
10259 }
10260 break;
10261 case LPFC_SLI_INTF_IF_TYPE_1:
10262 default:
a747c9ce 10263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
10264 "2886 HBA Error Attention on unsupported "
10265 "if type %d.", if_type);
a747c9ce 10266 return 1;
da0436e9 10267 }
2fcee4bf 10268
da0436e9
JS
10269 return 0;
10270}
10271
e59058c4 10272/**
3621a710 10273 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
10274 * @phba: Pointer to HBA context.
10275 *
3772a991 10276 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
10277 * error attention register bit for error attention events.
10278 *
25985edc 10279 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
10280 * Register and returns 0 otherwise.
10281 **/
10282int
10283lpfc_sli_check_eratt(struct lpfc_hba *phba)
10284{
10285 uint32_t ha_copy;
10286
10287 /* If somebody is waiting to handle an eratt, don't process it
10288 * here. The brdkill function will do this.
10289 */
10290 if (phba->link_flag & LS_IGNORE_ERATT)
10291 return 0;
10292
10293 /* Check if interrupt handler handles this ERATT */
10294 spin_lock_irq(&phba->hbalock);
10295 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10296 /* Interrupt handler has handled ERATT */
10297 spin_unlock_irq(&phba->hbalock);
10298 return 0;
10299 }
10300
a257bf90
JS
10301 /*
10302 * If there is deferred error attention, do not check for error
10303 * attention
10304 */
10305 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10306 spin_unlock_irq(&phba->hbalock);
10307 return 0;
10308 }
10309
3772a991
JS
10310 /* If PCI channel is offline, don't process it */
10311 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 10312 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10313 return 0;
10314 }
10315
10316 switch (phba->sli_rev) {
10317 case LPFC_SLI_REV2:
10318 case LPFC_SLI_REV3:
10319 /* Read chip Host Attention (HA) register */
10320 ha_copy = lpfc_sli_eratt_read(phba);
10321 break;
da0436e9 10322 case LPFC_SLI_REV4:
2fcee4bf 10323 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
10324 ha_copy = lpfc_sli4_eratt_read(phba);
10325 break;
3772a991
JS
10326 default:
10327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10328 "0299 Invalid SLI revision (%d)\n",
10329 phba->sli_rev);
10330 ha_copy = 0;
10331 break;
9399627f
JS
10332 }
10333 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10334
10335 return ha_copy;
10336}
10337
10338/**
10339 * lpfc_intr_state_check - Check device state for interrupt handling
10340 * @phba: Pointer to HBA context.
10341 *
10342 * This inline routine checks whether a device or its PCI slot is in a state
10343 * that the interrupt should be handled.
10344 *
10345 * This function returns 0 if the device or the PCI slot is in a state that
10346 * interrupt should be handled, otherwise -EIO.
10347 */
10348static inline int
10349lpfc_intr_state_check(struct lpfc_hba *phba)
10350{
10351 /* If the pci channel is offline, ignore all the interrupts */
10352 if (unlikely(pci_channel_offline(phba->pcidev)))
10353 return -EIO;
10354
10355 /* Update device level interrupt statistics */
10356 phba->sli.slistat.sli_intr++;
10357
10358 /* Ignore all interrupts during initialization. */
10359 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10360 return -EIO;
10361
9399627f
JS
10362 return 0;
10363}
10364
10365/**
3772a991 10366 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
10367 * @irq: Interrupt number.
10368 * @dev_id: The device context pointer.
10369 *
9399627f 10370 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10371 * service routine when device with SLI-3 interface spec is enabled with
10372 * MSI-X multi-message interrupt mode and there are slow-path events in
10373 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10374 * interrupt mode, this function is called as part of the device-level
10375 * interrupt handler. When the PCI slot is in error recovery or the HBA
10376 * is undergoing initialization, the interrupt handler will not process
10377 * the interrupt. The link attention and ELS ring attention events are
10378 * handled by the worker thread. The interrupt handler signals the worker
10379 * thread and returns for these events. This function is called without
10380 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
10381 * structures.
10382 *
10383 * This function returns IRQ_HANDLED when interrupt is handled else it
10384 * returns IRQ_NONE.
e59058c4 10385 **/
dea3101e 10386irqreturn_t
3772a991 10387lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 10388{
2e0fef85 10389 struct lpfc_hba *phba;
a747c9ce 10390 uint32_t ha_copy, hc_copy;
dea3101e 10391 uint32_t work_ha_copy;
10392 unsigned long status;
5b75da2f 10393 unsigned long iflag;
dea3101e 10394 uint32_t control;
10395
92d7f7b0 10396 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
10397 struct lpfc_vport *vport;
10398 struct lpfc_nodelist *ndlp;
10399 struct lpfc_dmabuf *mp;
92d7f7b0
JS
10400 LPFC_MBOXQ_t *pmb;
10401 int rc;
10402
dea3101e 10403 /*
10404 * Get the driver's phba structure from the dev_id and
10405 * assume the HBA is not interrupting.
10406 */
9399627f 10407 phba = (struct lpfc_hba *)dev_id;
dea3101e 10408
10409 if (unlikely(!phba))
10410 return IRQ_NONE;
10411
dea3101e 10412 /*
9399627f
JS
10413 * Stuff needs to be attented to when this function is invoked as an
10414 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 10415 */
9399627f 10416 if (phba->intr_type == MSIX) {
3772a991
JS
10417 /* Check device state for handling interrupt */
10418 if (lpfc_intr_state_check(phba))
9399627f
JS
10419 return IRQ_NONE;
10420 /* Need to read HA REG for slow-path events */
5b75da2f 10421 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10422 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10423 goto unplug_error;
9399627f
JS
10424 /* If somebody is waiting to handle an eratt don't process it
10425 * here. The brdkill function will do this.
10426 */
10427 if (phba->link_flag & LS_IGNORE_ERATT)
10428 ha_copy &= ~HA_ERATT;
10429 /* Check the need for handling ERATT in interrupt handler */
10430 if (ha_copy & HA_ERATT) {
10431 if (phba->hba_flag & HBA_ERATT_HANDLED)
10432 /* ERATT polling has handled ERATT */
10433 ha_copy &= ~HA_ERATT;
10434 else
10435 /* Indicate interrupt handler handles ERATT */
10436 phba->hba_flag |= HBA_ERATT_HANDLED;
10437 }
a257bf90
JS
10438
10439 /*
10440 * If there is deferred error attention, do not check for any
10441 * interrupt.
10442 */
10443 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10444 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10445 return IRQ_NONE;
10446 }
10447
9399627f 10448 /* Clear up only attention source related to slow-path */
9940b97b
JS
10449 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10450 goto unplug_error;
10451
a747c9ce
JS
10452 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10453 HC_LAINT_ENA | HC_ERINT_ENA),
10454 phba->HCregaddr);
9399627f
JS
10455 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10456 phba->HAregaddr);
a747c9ce 10457 writel(hc_copy, phba->HCregaddr);
9399627f 10458 readl(phba->HAregaddr); /* flush */
5b75da2f 10459 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10460 } else
10461 ha_copy = phba->ha_copy;
dea3101e 10462
dea3101e 10463 work_ha_copy = ha_copy & phba->work_ha_mask;
10464
9399627f 10465 if (work_ha_copy) {
dea3101e 10466 if (work_ha_copy & HA_LATT) {
10467 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10468 /*
10469 * Turn off Link Attention interrupts
10470 * until CLEAR_LA done
10471 */
5b75da2f 10472 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10473 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
10474 if (lpfc_readl(phba->HCregaddr, &control))
10475 goto unplug_error;
dea3101e 10476 control &= ~HC_LAINT_ENA;
10477 writel(control, phba->HCregaddr);
10478 readl(phba->HCregaddr); /* flush */
5b75da2f 10479 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10480 }
10481 else
10482 work_ha_copy &= ~HA_LATT;
10483 }
10484
9399627f 10485 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
10486 /*
10487 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10488 * the only slow ring.
10489 */
10490 status = (work_ha_copy &
10491 (HA_RXMASK << (4*LPFC_ELS_RING)));
10492 status >>= (4*LPFC_ELS_RING);
10493 if (status & HA_RXMASK) {
5b75da2f 10494 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10495 if (lpfc_readl(phba->HCregaddr, &control))
10496 goto unplug_error;
a58cbd52
JS
10497
10498 lpfc_debugfs_slow_ring_trc(phba,
10499 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10500 control, status,
10501 (uint32_t)phba->sli.slistat.sli_intr);
10502
858c9f6c 10503 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
10504 lpfc_debugfs_slow_ring_trc(phba,
10505 "ISR Disable ring:"
10506 "pwork:x%x hawork:x%x wait:x%x",
10507 phba->work_ha, work_ha_copy,
10508 (uint32_t)((unsigned long)
5e9d9b82 10509 &phba->work_waitq));
a58cbd52 10510
858c9f6c
JS
10511 control &=
10512 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 10513 writel(control, phba->HCregaddr);
10514 readl(phba->HCregaddr); /* flush */
dea3101e 10515 }
a58cbd52
JS
10516 else {
10517 lpfc_debugfs_slow_ring_trc(phba,
10518 "ISR slow ring: pwork:"
10519 "x%x hawork:x%x wait:x%x",
10520 phba->work_ha, work_ha_copy,
10521 (uint32_t)((unsigned long)
5e9d9b82 10522 &phba->work_waitq));
a58cbd52 10523 }
5b75da2f 10524 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10525 }
10526 }
5b75da2f 10527 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 10528 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
10529 if (lpfc_sli_read_hs(phba))
10530 goto unplug_error;
a257bf90
JS
10531 /*
10532 * Check if there is a deferred error condition
10533 * is active
10534 */
10535 if ((HS_FFER1 & phba->work_hs) &&
10536 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
10537 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10538 phba->work_hs)) {
a257bf90
JS
10539 phba->hba_flag |= DEFER_ERATT;
10540 /* Clear all interrupt enable conditions */
10541 writel(0, phba->HCregaddr);
10542 readl(phba->HCregaddr);
10543 }
10544 }
10545
9399627f 10546 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 10547 pmb = phba->sli.mbox_active;
04c68496 10548 pmbox = &pmb->u.mb;
34b02dcd 10549 mbox = phba->mbox;
858c9f6c 10550 vport = pmb->vport;
92d7f7b0
JS
10551
10552 /* First check out the status word */
10553 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10554 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 10555 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
10556 /*
10557 * Stray Mailbox Interrupt, mbxCommand <cmd>
10558 * mbxStatus <status>
10559 */
09372820 10560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 10561 LOG_SLI,
e8b62011 10562 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
10563 "Interrupt mbxCommand x%x "
10564 "mbxStatus x%x\n",
e8b62011 10565 (vport ? vport->vpi : 0),
92d7f7b0
JS
10566 pmbox->mbxCommand,
10567 pmbox->mbxStatus);
09372820
JS
10568 /* clear mailbox attention bit */
10569 work_ha_copy &= ~HA_MBATT;
10570 } else {
97eab634 10571 phba->sli.mbox_active = NULL;
5b75da2f 10572 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
10573 phba->last_completion_time = jiffies;
10574 del_timer(&phba->sli.mbox_tmo);
09372820
JS
10575 if (pmb->mbox_cmpl) {
10576 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10577 MAILBOX_CMD_SIZE);
7a470277
JS
10578 if (pmb->out_ext_byte_len &&
10579 pmb->context2)
10580 lpfc_sli_pcimem_bcopy(
10581 phba->mbox_ext,
10582 pmb->context2,
10583 pmb->out_ext_byte_len);
09372820
JS
10584 }
10585 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10586 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10587
10588 lpfc_debugfs_disc_trc(vport,
10589 LPFC_DISC_TRC_MBOX_VPORT,
10590 "MBOX dflt rpi: : "
10591 "status:x%x rpi:x%x",
10592 (uint32_t)pmbox->mbxStatus,
10593 pmbox->un.varWords[0], 0);
10594
10595 if (!pmbox->mbxStatus) {
10596 mp = (struct lpfc_dmabuf *)
10597 (pmb->context1);
10598 ndlp = (struct lpfc_nodelist *)
10599 pmb->context2;
10600
10601 /* Reg_LOGIN of dflt RPI was
10602 * successful. new lets get
10603 * rid of the RPI using the
10604 * same mbox buffer.
10605 */
10606 lpfc_unreg_login(phba,
10607 vport->vpi,
10608 pmbox->un.varWords[0],
10609 pmb);
10610 pmb->mbox_cmpl =
10611 lpfc_mbx_cmpl_dflt_rpi;
10612 pmb->context1 = mp;
10613 pmb->context2 = ndlp;
10614 pmb->vport = vport;
58da1ffb
JS
10615 rc = lpfc_sli_issue_mbox(phba,
10616 pmb,
10617 MBX_NOWAIT);
10618 if (rc != MBX_BUSY)
10619 lpfc_printf_log(phba,
10620 KERN_ERR,
10621 LOG_MBOX | LOG_SLI,
d7c255b2 10622 "0350 rc should have"
6a9c52cf 10623 "been MBX_BUSY\n");
3772a991
JS
10624 if (rc != MBX_NOT_FINISHED)
10625 goto send_current_mbox;
09372820 10626 }
858c9f6c 10627 }
5b75da2f
JS
10628 spin_lock_irqsave(
10629 &phba->pport->work_port_lock,
10630 iflag);
09372820
JS
10631 phba->pport->work_port_events &=
10632 ~WORKER_MBOX_TMO;
5b75da2f
JS
10633 spin_unlock_irqrestore(
10634 &phba->pport->work_port_lock,
10635 iflag);
09372820 10636 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 10637 }
97eab634 10638 } else
5b75da2f 10639 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 10640
92d7f7b0
JS
10641 if ((work_ha_copy & HA_MBATT) &&
10642 (phba->sli.mbox_active == NULL)) {
858c9f6c 10643send_current_mbox:
92d7f7b0 10644 /* Process next mailbox command if there is one */
58da1ffb
JS
10645 do {
10646 rc = lpfc_sli_issue_mbox(phba, NULL,
10647 MBX_NOWAIT);
10648 } while (rc == MBX_NOT_FINISHED);
10649 if (rc != MBX_SUCCESS)
10650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10651 LOG_SLI, "0349 rc should be "
6a9c52cf 10652 "MBX_SUCCESS\n");
92d7f7b0
JS
10653 }
10654
5b75da2f 10655 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10656 phba->work_ha |= work_ha_copy;
5b75da2f 10657 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 10658 lpfc_worker_wake_up(phba);
dea3101e 10659 }
9399627f 10660 return IRQ_HANDLED;
9940b97b
JS
10661unplug_error:
10662 spin_unlock_irqrestore(&phba->hbalock, iflag);
10663 return IRQ_HANDLED;
dea3101e 10664
3772a991 10665} /* lpfc_sli_sp_intr_handler */
9399627f
JS
10666
10667/**
3772a991 10668 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
10669 * @irq: Interrupt number.
10670 * @dev_id: The device context pointer.
10671 *
10672 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10673 * service routine when device with SLI-3 interface spec is enabled with
10674 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10675 * ring event in the HBA. However, when the device is enabled with either
10676 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10677 * device-level interrupt handler. When the PCI slot is in error recovery
10678 * or the HBA is undergoing initialization, the interrupt handler will not
10679 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10680 * the intrrupt context. This function is called without any lock held.
10681 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
10682 *
10683 * This function returns IRQ_HANDLED when interrupt is handled else it
10684 * returns IRQ_NONE.
10685 **/
10686irqreturn_t
3772a991 10687lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
10688{
10689 struct lpfc_hba *phba;
10690 uint32_t ha_copy;
10691 unsigned long status;
5b75da2f 10692 unsigned long iflag;
9399627f
JS
10693
10694 /* Get the driver's phba structure from the dev_id and
10695 * assume the HBA is not interrupting.
10696 */
10697 phba = (struct lpfc_hba *) dev_id;
10698
10699 if (unlikely(!phba))
10700 return IRQ_NONE;
10701
10702 /*
10703 * Stuff needs to be attented to when this function is invoked as an
10704 * individual interrupt handler in MSI-X multi-message interrupt mode
10705 */
10706 if (phba->intr_type == MSIX) {
3772a991
JS
10707 /* Check device state for handling interrupt */
10708 if (lpfc_intr_state_check(phba))
9399627f
JS
10709 return IRQ_NONE;
10710 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
10711 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10712 return IRQ_HANDLED;
9399627f 10713 /* Clear up only attention source related to fast-path */
5b75da2f 10714 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
10715 /*
10716 * If there is deferred error attention, do not check for
10717 * any interrupt.
10718 */
10719 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10720 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10721 return IRQ_NONE;
10722 }
9399627f
JS
10723 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10724 phba->HAregaddr);
10725 readl(phba->HAregaddr); /* flush */
5b75da2f 10726 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10727 } else
10728 ha_copy = phba->ha_copy;
dea3101e 10729
10730 /*
9399627f 10731 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 10732 */
9399627f
JS
10733 ha_copy &= ~(phba->work_ha_mask);
10734
10735 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 10736 status >>= (4*LPFC_FCP_RING);
858c9f6c 10737 if (status & HA_RXMASK)
dea3101e 10738 lpfc_sli_handle_fast_ring_event(phba,
10739 &phba->sli.ring[LPFC_FCP_RING],
10740 status);
a4bc3379
JS
10741
10742 if (phba->cfg_multi_ring_support == 2) {
10743 /*
9399627f
JS
10744 * Process all events on extra ring. Take the optimized path
10745 * for extra ring IO.
a4bc3379 10746 */
9399627f 10747 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 10748 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 10749 if (status & HA_RXMASK) {
a4bc3379
JS
10750 lpfc_sli_handle_fast_ring_event(phba,
10751 &phba->sli.ring[LPFC_EXTRA_RING],
10752 status);
10753 }
10754 }
dea3101e 10755 return IRQ_HANDLED;
3772a991 10756} /* lpfc_sli_fp_intr_handler */
9399627f
JS
10757
10758/**
3772a991 10759 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
10760 * @irq: Interrupt number.
10761 * @dev_id: The device context pointer.
10762 *
3772a991
JS
10763 * This function is the HBA device-level interrupt handler to device with
10764 * SLI-3 interface spec, called from the PCI layer when either MSI or
10765 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10766 * requires driver attention. This function invokes the slow-path interrupt
10767 * attention handling function and fast-path interrupt attention handling
10768 * function in turn to process the relevant HBA attention events. This
10769 * function is called without any lock held. It gets the hbalock to access
10770 * and update SLI data structures.
9399627f
JS
10771 *
10772 * This function returns IRQ_HANDLED when interrupt is handled, else it
10773 * returns IRQ_NONE.
10774 **/
10775irqreturn_t
3772a991 10776lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
10777{
10778 struct lpfc_hba *phba;
10779 irqreturn_t sp_irq_rc, fp_irq_rc;
10780 unsigned long status1, status2;
a747c9ce 10781 uint32_t hc_copy;
9399627f
JS
10782
10783 /*
10784 * Get the driver's phba structure from the dev_id and
10785 * assume the HBA is not interrupting.
10786 */
10787 phba = (struct lpfc_hba *) dev_id;
10788
10789 if (unlikely(!phba))
10790 return IRQ_NONE;
10791
3772a991
JS
10792 /* Check device state for handling interrupt */
10793 if (lpfc_intr_state_check(phba))
9399627f
JS
10794 return IRQ_NONE;
10795
10796 spin_lock(&phba->hbalock);
9940b97b
JS
10797 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10798 spin_unlock(&phba->hbalock);
10799 return IRQ_HANDLED;
10800 }
10801
9399627f
JS
10802 if (unlikely(!phba->ha_copy)) {
10803 spin_unlock(&phba->hbalock);
10804 return IRQ_NONE;
10805 } else if (phba->ha_copy & HA_ERATT) {
10806 if (phba->hba_flag & HBA_ERATT_HANDLED)
10807 /* ERATT polling has handled ERATT */
10808 phba->ha_copy &= ~HA_ERATT;
10809 else
10810 /* Indicate interrupt handler handles ERATT */
10811 phba->hba_flag |= HBA_ERATT_HANDLED;
10812 }
10813
a257bf90
JS
10814 /*
10815 * If there is deferred error attention, do not check for any interrupt.
10816 */
10817 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 10818 spin_unlock(&phba->hbalock);
a257bf90
JS
10819 return IRQ_NONE;
10820 }
10821
9399627f 10822 /* Clear attention sources except link and error attentions */
9940b97b
JS
10823 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10824 spin_unlock(&phba->hbalock);
10825 return IRQ_HANDLED;
10826 }
a747c9ce
JS
10827 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10828 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10829 phba->HCregaddr);
9399627f 10830 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 10831 writel(hc_copy, phba->HCregaddr);
9399627f
JS
10832 readl(phba->HAregaddr); /* flush */
10833 spin_unlock(&phba->hbalock);
10834
10835 /*
10836 * Invokes slow-path host attention interrupt handling as appropriate.
10837 */
10838
10839 /* status of events with mailbox and link attention */
10840 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10841
10842 /* status of events with ELS ring */
10843 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10844 status2 >>= (4*LPFC_ELS_RING);
10845
10846 if (status1 || (status2 & HA_RXMASK))
3772a991 10847 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
10848 else
10849 sp_irq_rc = IRQ_NONE;
10850
10851 /*
10852 * Invoke fast-path host attention interrupt handling as appropriate.
10853 */
10854
10855 /* status of events with FCP ring */
10856 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10857 status1 >>= (4*LPFC_FCP_RING);
10858
10859 /* status of events with extra ring */
10860 if (phba->cfg_multi_ring_support == 2) {
10861 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10862 status2 >>= (4*LPFC_EXTRA_RING);
10863 } else
10864 status2 = 0;
10865
10866 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 10867 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
10868 else
10869 fp_irq_rc = IRQ_NONE;
dea3101e 10870
9399627f
JS
10871 /* Return device-level interrupt handling status */
10872 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 10873} /* lpfc_sli_intr_handler */
4f774513
JS
10874
10875/**
10876 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10877 * @phba: pointer to lpfc hba data structure.
10878 *
10879 * This routine is invoked by the worker thread to process all the pending
10880 * SLI4 FCP abort XRI events.
10881 **/
10882void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10883{
10884 struct lpfc_cq_event *cq_event;
10885
10886 /* First, declare the fcp xri abort event has been handled */
10887 spin_lock_irq(&phba->hbalock);
10888 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10889 spin_unlock_irq(&phba->hbalock);
10890 /* Now, handle all the fcp xri abort events */
10891 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10892 /* Get the first event from the head of the event queue */
10893 spin_lock_irq(&phba->hbalock);
10894 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10895 cq_event, struct lpfc_cq_event, list);
10896 spin_unlock_irq(&phba->hbalock);
10897 /* Notify aborted XRI for FCP work queue */
10898 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10899 /* Free the event processed back to the free pool */
10900 lpfc_sli4_cq_event_release(phba, cq_event);
10901 }
10902}
10903
10904/**
10905 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10906 * @phba: pointer to lpfc hba data structure.
10907 *
10908 * This routine is invoked by the worker thread to process all the pending
10909 * SLI4 els abort xri events.
10910 **/
10911void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10912{
10913 struct lpfc_cq_event *cq_event;
10914
10915 /* First, declare the els xri abort event has been handled */
10916 spin_lock_irq(&phba->hbalock);
10917 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10918 spin_unlock_irq(&phba->hbalock);
10919 /* Now, handle all the els xri abort events */
10920 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10921 /* Get the first event from the head of the event queue */
10922 spin_lock_irq(&phba->hbalock);
10923 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10924 cq_event, struct lpfc_cq_event, list);
10925 spin_unlock_irq(&phba->hbalock);
10926 /* Notify aborted XRI for ELS work queue */
10927 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10928 /* Free the event processed back to the free pool */
10929 lpfc_sli4_cq_event_release(phba, cq_event);
10930 }
10931}
10932
341af102
JS
10933/**
10934 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10935 * @phba: pointer to lpfc hba data structure
10936 * @pIocbIn: pointer to the rspiocbq
10937 * @pIocbOut: pointer to the cmdiocbq
10938 * @wcqe: pointer to the complete wcqe
10939 *
10940 * This routine transfers the fields of a command iocbq to a response iocbq
10941 * by copying all the IOCB fields from command iocbq and transferring the
10942 * completion status information from the complete wcqe.
10943 **/
4f774513 10944static void
341af102
JS
10945lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10946 struct lpfc_iocbq *pIocbIn,
4f774513
JS
10947 struct lpfc_iocbq *pIocbOut,
10948 struct lpfc_wcqe_complete *wcqe)
10949{
341af102 10950 unsigned long iflags;
acd6859b 10951 uint32_t status;
4f774513
JS
10952 size_t offset = offsetof(struct lpfc_iocbq, iocb);
10953
10954 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10955 sizeof(struct lpfc_iocbq) - offset);
4f774513 10956 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
10957 status = bf_get(lpfc_wcqe_c_status, wcqe);
10958 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
10959 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10960 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10961 pIocbIn->iocb.un.fcpi.fcpi_parm =
10962 pIocbOut->iocb.un.fcpi.fcpi_parm -
10963 wcqe->total_data_placed;
10964 else
10965 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 10966 else {
4f774513 10967 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e
JS
10968 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10969 }
341af102 10970
acd6859b
JS
10971 /* Convert BG errors for completion status */
10972 if (status == CQE_STATUS_DI_ERROR) {
10973 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
10974
10975 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
10976 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
10977 else
10978 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
10979
10980 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
10981 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
10982 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10983 BGS_GUARD_ERR_MASK;
10984 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
10985 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10986 BGS_APPTAG_ERR_MASK;
10987 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
10988 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10989 BGS_REFTAG_ERR_MASK;
10990
10991 /* Check to see if there was any good data before the error */
10992 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
10993 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10994 BGS_HI_WATER_MARK_PRESENT_MASK;
10995 pIocbIn->iocb.unsli3.sli3_bg.bghm =
10996 wcqe->total_data_placed;
10997 }
10998
10999 /*
11000 * Set ALL the error bits to indicate we don't know what
11001 * type of error it is.
11002 */
11003 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11004 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11005 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11006 BGS_GUARD_ERR_MASK);
11007 }
11008
341af102
JS
11009 /* Pick up HBA exchange busy condition */
11010 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11011 spin_lock_irqsave(&phba->hbalock, iflags);
11012 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11013 spin_unlock_irqrestore(&phba->hbalock, iflags);
11014 }
4f774513
JS
11015}
11016
45ed1190
JS
11017/**
11018 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11019 * @phba: Pointer to HBA context object.
11020 * @wcqe: Pointer to work-queue completion queue entry.
11021 *
11022 * This routine handles an ELS work-queue completion event and construct
11023 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11024 * discovery engine to handle.
11025 *
11026 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11027 **/
11028static struct lpfc_iocbq *
11029lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11030 struct lpfc_iocbq *irspiocbq)
11031{
11032 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11033 struct lpfc_iocbq *cmdiocbq;
11034 struct lpfc_wcqe_complete *wcqe;
11035 unsigned long iflags;
11036
11037 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 11038 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
11039 pring->stats.iocb_event++;
11040 /* Look up the ELS command IOCB and create pseudo response IOCB */
11041 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11042 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11043 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
11044
11045 if (unlikely(!cmdiocbq)) {
11046 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11047 "0386 ELS complete with no corresponding "
11048 "cmdiocb: iotag (%d)\n",
11049 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11050 lpfc_sli_release_iocbq(phba, irspiocbq);
11051 return NULL;
11052 }
11053
11054 /* Fake the irspiocbq and copy necessary response information */
341af102 11055 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
11056
11057 return irspiocbq;
11058}
11059
04c68496
JS
11060/**
11061 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11062 * @phba: Pointer to HBA context object.
11063 * @cqe: Pointer to mailbox completion queue entry.
11064 *
11065 * This routine process a mailbox completion queue entry with asynchrous
11066 * event.
11067 *
11068 * Return: true if work posted to worker thread, otherwise false.
11069 **/
11070static bool
11071lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11072{
11073 struct lpfc_cq_event *cq_event;
11074 unsigned long iflags;
11075
11076 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11077 "0392 Async Event: word0:x%x, word1:x%x, "
11078 "word2:x%x, word3:x%x\n", mcqe->word0,
11079 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11080
11081 /* Allocate a new internal CQ_EVENT entry */
11082 cq_event = lpfc_sli4_cq_event_alloc(phba);
11083 if (!cq_event) {
11084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11085 "0394 Failed to allocate CQ_EVENT entry\n");
11086 return false;
11087 }
11088
11089 /* Move the CQE into an asynchronous event entry */
11090 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11091 spin_lock_irqsave(&phba->hbalock, iflags);
11092 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11093 /* Set the async event flag */
11094 phba->hba_flag |= ASYNC_EVENT;
11095 spin_unlock_irqrestore(&phba->hbalock, iflags);
11096
11097 return true;
11098}
11099
11100/**
11101 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11102 * @phba: Pointer to HBA context object.
11103 * @cqe: Pointer to mailbox completion queue entry.
11104 *
11105 * This routine process a mailbox completion queue entry with mailbox
11106 * completion event.
11107 *
11108 * Return: true if work posted to worker thread, otherwise false.
11109 **/
11110static bool
11111lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11112{
11113 uint32_t mcqe_status;
11114 MAILBOX_t *mbox, *pmbox;
11115 struct lpfc_mqe *mqe;
11116 struct lpfc_vport *vport;
11117 struct lpfc_nodelist *ndlp;
11118 struct lpfc_dmabuf *mp;
11119 unsigned long iflags;
11120 LPFC_MBOXQ_t *pmb;
11121 bool workposted = false;
11122 int rc;
11123
11124 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11125 if (!bf_get(lpfc_trailer_completed, mcqe))
11126 goto out_no_mqe_complete;
11127
11128 /* Get the reference to the active mbox command */
11129 spin_lock_irqsave(&phba->hbalock, iflags);
11130 pmb = phba->sli.mbox_active;
11131 if (unlikely(!pmb)) {
11132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11133 "1832 No pending MBOX command to handle\n");
11134 spin_unlock_irqrestore(&phba->hbalock, iflags);
11135 goto out_no_mqe_complete;
11136 }
11137 spin_unlock_irqrestore(&phba->hbalock, iflags);
11138 mqe = &pmb->u.mqe;
11139 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11140 mbox = phba->mbox;
11141 vport = pmb->vport;
11142
11143 /* Reset heartbeat timer */
11144 phba->last_completion_time = jiffies;
11145 del_timer(&phba->sli.mbox_tmo);
11146
11147 /* Move mbox data to caller's mailbox region, do endian swapping */
11148 if (pmb->mbox_cmpl && mbox)
11149 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 11150
73d91e50
JS
11151 /*
11152 * For mcqe errors, conditionally move a modified error code to
11153 * the mbox so that the error will not be missed.
11154 */
11155 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11156 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11157 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11158 bf_set(lpfc_mqe_status, mqe,
11159 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11160 }
04c68496
JS
11161 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11162 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11164 "MBOX dflt rpi: status:x%x rpi:x%x",
11165 mcqe_status,
11166 pmbox->un.varWords[0], 0);
11167 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11168 mp = (struct lpfc_dmabuf *)(pmb->context1);
11169 ndlp = (struct lpfc_nodelist *)pmb->context2;
11170 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11171 * RID of the PPI using the same mbox buffer.
11172 */
11173 lpfc_unreg_login(phba, vport->vpi,
11174 pmbox->un.varWords[0], pmb);
11175 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11176 pmb->context1 = mp;
11177 pmb->context2 = ndlp;
11178 pmb->vport = vport;
11179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11180 if (rc != MBX_BUSY)
11181 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11182 LOG_SLI, "0385 rc should "
11183 "have been MBX_BUSY\n");
11184 if (rc != MBX_NOT_FINISHED)
11185 goto send_current_mbox;
11186 }
11187 }
11188 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11189 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11190 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11191
11192 /* There is mailbox completion work to do */
11193 spin_lock_irqsave(&phba->hbalock, iflags);
11194 __lpfc_mbox_cmpl_put(phba, pmb);
11195 phba->work_ha |= HA_MBATT;
11196 spin_unlock_irqrestore(&phba->hbalock, iflags);
11197 workposted = true;
11198
11199send_current_mbox:
11200 spin_lock_irqsave(&phba->hbalock, iflags);
11201 /* Release the mailbox command posting token */
11202 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11203 /* Setting active mailbox pointer need to be in sync to flag clear */
11204 phba->sli.mbox_active = NULL;
11205 spin_unlock_irqrestore(&phba->hbalock, iflags);
11206 /* Wake up worker thread to post the next pending mailbox command */
11207 lpfc_worker_wake_up(phba);
11208out_no_mqe_complete:
11209 if (bf_get(lpfc_trailer_consumed, mcqe))
11210 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11211 return workposted;
11212}
11213
11214/**
11215 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11216 * @phba: Pointer to HBA context object.
11217 * @cqe: Pointer to mailbox completion queue entry.
11218 *
11219 * This routine process a mailbox completion queue entry, it invokes the
11220 * proper mailbox complete handling or asynchrous event handling routine
11221 * according to the MCQE's async bit.
11222 *
11223 * Return: true if work posted to worker thread, otherwise false.
11224 **/
11225static bool
11226lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11227{
11228 struct lpfc_mcqe mcqe;
11229 bool workposted;
11230
11231 /* Copy the mailbox MCQE and convert endian order as needed */
11232 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11233
11234 /* Invoke the proper event handling routine */
11235 if (!bf_get(lpfc_trailer_async, &mcqe))
11236 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11237 else
11238 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11239 return workposted;
11240}
11241
4f774513
JS
11242/**
11243 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11244 * @phba: Pointer to HBA context object.
2a76a283 11245 * @cq: Pointer to associated CQ
4f774513
JS
11246 * @wcqe: Pointer to work-queue completion queue entry.
11247 *
11248 * This routine handles an ELS work-queue completion event.
11249 *
11250 * Return: true if work posted to worker thread, otherwise false.
11251 **/
11252static bool
2a76a283 11253lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11254 struct lpfc_wcqe_complete *wcqe)
11255{
4f774513
JS
11256 struct lpfc_iocbq *irspiocbq;
11257 unsigned long iflags;
2a76a283 11258 struct lpfc_sli_ring *pring = cq->pring;
4f774513 11259
45ed1190 11260 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
11261 irspiocbq = lpfc_sli_get_iocbq(phba);
11262 if (!irspiocbq) {
11263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
11264 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11265 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11266 pring->txq_cnt, phba->iocb_cnt,
11267 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
11268 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
45ed1190 11269 return false;
4f774513 11270 }
4f774513 11271
45ed1190
JS
11272 /* Save off the slow-path queue event for work thread to process */
11273 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 11274 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 11275 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
11276 &phba->sli4_hba.sp_queue_event);
11277 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 11278 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 11279
45ed1190 11280 return true;
4f774513
JS
11281}
11282
11283/**
11284 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11285 * @phba: Pointer to HBA context object.
11286 * @wcqe: Pointer to work-queue completion queue entry.
11287 *
11288 * This routine handles slow-path WQ entry comsumed event by invoking the
11289 * proper WQ release routine to the slow-path WQ.
11290 **/
11291static void
11292lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11293 struct lpfc_wcqe_release *wcqe)
11294{
2e90f4b5
JS
11295 /* sanity check on queue memory */
11296 if (unlikely(!phba->sli4_hba.els_wq))
11297 return;
4f774513
JS
11298 /* Check for the slow-path ELS work queue */
11299 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11300 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11301 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11302 else
11303 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11304 "2579 Slow-path wqe consume event carries "
11305 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11306 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11307 phba->sli4_hba.els_wq->queue_id);
11308}
11309
11310/**
11311 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11312 * @phba: Pointer to HBA context object.
11313 * @cq: Pointer to a WQ completion queue.
11314 * @wcqe: Pointer to work-queue completion queue entry.
11315 *
11316 * This routine handles an XRI abort event.
11317 *
11318 * Return: true if work posted to worker thread, otherwise false.
11319 **/
11320static bool
11321lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11322 struct lpfc_queue *cq,
11323 struct sli4_wcqe_xri_aborted *wcqe)
11324{
11325 bool workposted = false;
11326 struct lpfc_cq_event *cq_event;
11327 unsigned long iflags;
11328
11329 /* Allocate a new internal CQ_EVENT entry */
11330 cq_event = lpfc_sli4_cq_event_alloc(phba);
11331 if (!cq_event) {
11332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11333 "0602 Failed to allocate CQ_EVENT entry\n");
11334 return false;
11335 }
11336
11337 /* Move the CQE into the proper xri abort event list */
11338 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11339 switch (cq->subtype) {
11340 case LPFC_FCP:
11341 spin_lock_irqsave(&phba->hbalock, iflags);
11342 list_add_tail(&cq_event->list,
11343 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11344 /* Set the fcp xri abort event flag */
11345 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11346 spin_unlock_irqrestore(&phba->hbalock, iflags);
11347 workposted = true;
11348 break;
11349 case LPFC_ELS:
11350 spin_lock_irqsave(&phba->hbalock, iflags);
11351 list_add_tail(&cq_event->list,
11352 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11353 /* Set the els xri abort event flag */
11354 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11355 spin_unlock_irqrestore(&phba->hbalock, iflags);
11356 workposted = true;
11357 break;
11358 default:
11359 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11360 "0603 Invalid work queue CQE subtype (x%x)\n",
11361 cq->subtype);
11362 workposted = false;
11363 break;
11364 }
11365 return workposted;
11366}
11367
4f774513
JS
11368/**
11369 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11370 * @phba: Pointer to HBA context object.
11371 * @rcqe: Pointer to receive-queue completion queue entry.
11372 *
11373 * This routine process a receive-queue completion queue entry.
11374 *
11375 * Return: true if work posted to worker thread, otherwise false.
11376 **/
11377static bool
4d9ab994 11378lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 11379{
4f774513
JS
11380 bool workposted = false;
11381 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11382 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11383 struct hbq_dmabuf *dma_buf;
7851fe2c 11384 uint32_t status, rq_id;
4f774513
JS
11385 unsigned long iflags;
11386
2e90f4b5
JS
11387 /* sanity check on queue memory */
11388 if (unlikely(!hrq) || unlikely(!drq))
11389 return workposted;
11390
7851fe2c
JS
11391 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11392 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11393 else
11394 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11395 if (rq_id != hrq->queue_id)
4f774513
JS
11396 goto out;
11397
4d9ab994 11398 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
11399 switch (status) {
11400 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11402 "2537 Receive Frame Truncated!!\n");
b84daac9 11403 hrq->RQ_buf_trunc++;
4f774513 11404 case FC_STATUS_RQ_SUCCESS:
5ffc266e 11405 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
11406 spin_lock_irqsave(&phba->hbalock, iflags);
11407 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11408 if (!dma_buf) {
b84daac9 11409 hrq->RQ_no_buf_found++;
4f774513
JS
11410 spin_unlock_irqrestore(&phba->hbalock, iflags);
11411 goto out;
11412 }
b84daac9 11413 hrq->RQ_rcv_buf++;
4d9ab994 11414 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
4f774513 11415 /* save off the frame for the word thread to process */
4d9ab994 11416 list_add_tail(&dma_buf->cq_event.list,
45ed1190 11417 &phba->sli4_hba.sp_queue_event);
4f774513 11418 /* Frame received */
45ed1190 11419 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
11420 spin_unlock_irqrestore(&phba->hbalock, iflags);
11421 workposted = true;
11422 break;
11423 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11424 case FC_STATUS_INSUFF_BUF_FRM_DISC:
b84daac9 11425 hrq->RQ_no_posted_buf++;
4f774513
JS
11426 /* Post more buffers if possible */
11427 spin_lock_irqsave(&phba->hbalock, iflags);
11428 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11429 spin_unlock_irqrestore(&phba->hbalock, iflags);
11430 workposted = true;
11431 break;
11432 }
11433out:
11434 return workposted;
4f774513
JS
11435}
11436
4d9ab994
JS
11437/**
11438 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11439 * @phba: Pointer to HBA context object.
11440 * @cq: Pointer to the completion queue.
11441 * @wcqe: Pointer to a completion queue entry.
11442 *
25985edc 11443 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
11444 * entry.
11445 *
11446 * Return: true if work posted to worker thread, otherwise false.
11447 **/
11448static bool
11449lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11450 struct lpfc_cqe *cqe)
11451{
45ed1190 11452 struct lpfc_cqe cqevt;
4d9ab994
JS
11453 bool workposted = false;
11454
11455 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 11456 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
11457
11458 /* Check and process for different type of WCQE and dispatch */
45ed1190 11459 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 11460 case CQE_CODE_COMPL_WQE:
45ed1190 11461 /* Process the WQ/RQ complete event */
bc73905a 11462 phba->last_completion_time = jiffies;
2a76a283 11463 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 11464 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
11465 break;
11466 case CQE_CODE_RELEASE_WQE:
11467 /* Process the WQ release event */
11468 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 11469 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
11470 break;
11471 case CQE_CODE_XRI_ABORTED:
11472 /* Process the WQ XRI abort event */
bc73905a 11473 phba->last_completion_time = jiffies;
4d9ab994 11474 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 11475 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
11476 break;
11477 case CQE_CODE_RECEIVE:
7851fe2c 11478 case CQE_CODE_RECEIVE_V1:
4d9ab994 11479 /* Process the RQ event */
bc73905a 11480 phba->last_completion_time = jiffies;
4d9ab994 11481 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 11482 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
11483 break;
11484 default:
11485 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11486 "0388 Not a valid WCQE code: x%x\n",
45ed1190 11487 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
11488 break;
11489 }
11490 return workposted;
11491}
11492
4f774513
JS
11493/**
11494 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11495 * @phba: Pointer to HBA context object.
11496 * @eqe: Pointer to fast-path event queue entry.
11497 *
11498 * This routine process a event queue entry from the slow-path event queue.
11499 * It will check the MajorCode and MinorCode to determine this is for a
11500 * completion event on a completion queue, if not, an error shall be logged
11501 * and just return. Otherwise, it will get to the corresponding completion
11502 * queue and process all the entries on that completion queue, rearm the
11503 * completion queue, and then return.
11504 *
11505 **/
11506static void
67d12733
JS
11507lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11508 struct lpfc_queue *speq)
4f774513 11509{
67d12733 11510 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
11511 struct lpfc_cqe *cqe;
11512 bool workposted = false;
11513 int ecount = 0;
11514 uint16_t cqid;
11515
4f774513 11516 /* Get the reference to the corresponding CQ */
cb5172ea 11517 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 11518
4f774513
JS
11519 list_for_each_entry(childq, &speq->child_list, list) {
11520 if (childq->queue_id == cqid) {
11521 cq = childq;
11522 break;
11523 }
11524 }
11525 if (unlikely(!cq)) {
75baf696
JS
11526 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11527 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11528 "0365 Slow-path CQ identifier "
11529 "(%d) does not exist\n", cqid);
4f774513
JS
11530 return;
11531 }
11532
11533 /* Process all the entries to the CQ */
11534 switch (cq->type) {
11535 case LPFC_MCQ:
11536 while ((cqe = lpfc_sli4_cq_get(cq))) {
11537 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
73d91e50 11538 if (!(++ecount % cq->entry_repost))
4f774513 11539 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
b84daac9 11540 cq->CQ_mbox++;
4f774513
JS
11541 }
11542 break;
11543 case LPFC_WCQ:
11544 while ((cqe = lpfc_sli4_cq_get(cq))) {
0558056c
JS
11545 if (cq->subtype == LPFC_FCP)
11546 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11547 cqe);
11548 else
11549 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11550 cqe);
73d91e50 11551 if (!(++ecount % cq->entry_repost))
4f774513
JS
11552 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11553 }
b84daac9
JS
11554
11555 /* Track the max number of CQEs processed in 1 EQ */
11556 if (ecount > cq->CQ_max_cqe)
11557 cq->CQ_max_cqe = ecount;
4f774513
JS
11558 break;
11559 default:
11560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11561 "0370 Invalid completion queue type (%d)\n",
11562 cq->type);
11563 return;
11564 }
11565
11566 /* Catch the no cq entry condition, log an error */
11567 if (unlikely(ecount == 0))
11568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11569 "0371 No entry from the CQ: identifier "
11570 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11571
11572 /* In any case, flash and re-arm the RCQ */
11573 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11574
11575 /* wake up worker thread if there are works to be done */
11576 if (workposted)
11577 lpfc_worker_wake_up(phba);
11578}
11579
11580/**
11581 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
11582 * @phba: Pointer to HBA context object.
11583 * @cq: Pointer to associated CQ
11584 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
11585 *
11586 * This routine process a fast-path work queue completion entry from fast-path
11587 * event queue for FCP command response completion.
11588 **/
11589static void
2a76a283 11590lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11591 struct lpfc_wcqe_complete *wcqe)
11592{
2a76a283 11593 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
11594 struct lpfc_iocbq *cmdiocbq;
11595 struct lpfc_iocbq irspiocbq;
11596 unsigned long iflags;
11597
4f774513
JS
11598 /* Check for response status */
11599 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11600 /* If resource errors reported from HBA, reduce queue
11601 * depth of the SCSI device.
11602 */
e3d2b802
JS
11603 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11604 IOSTAT_LOCAL_REJECT)) &&
11605 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11606 IOERR_NO_RESOURCES))
4f774513 11607 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 11608
4f774513
JS
11609 /* Log the error status */
11610 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11611 "0373 FCP complete error: status=x%x, "
11612 "hw_status=x%x, total_data_specified=%d, "
11613 "parameter=x%x, word3=x%x\n",
11614 bf_get(lpfc_wcqe_c_status, wcqe),
11615 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11616 wcqe->total_data_placed, wcqe->parameter,
11617 wcqe->word3);
11618 }
11619
11620 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
11621 spin_lock_irqsave(&pring->ring_lock, iflags);
11622 pring->stats.iocb_event++;
4f774513
JS
11623 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11624 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11625 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
11626 if (unlikely(!cmdiocbq)) {
11627 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11628 "0374 FCP complete with no corresponding "
11629 "cmdiocb: iotag (%d)\n",
11630 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11631 return;
11632 }
11633 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11634 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11635 "0375 FCP cmdiocb not callback function "
11636 "iotag: (%d)\n",
11637 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11638 return;
11639 }
11640
11641 /* Fake the irspiocb and copy necessary response information */
341af102 11642 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 11643
0f65ff68
JS
11644 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11645 spin_lock_irqsave(&phba->hbalock, iflags);
11646 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11647 spin_unlock_irqrestore(&phba->hbalock, iflags);
11648 }
11649
4f774513
JS
11650 /* Pass the cmd_iocb and the rsp state to the upper layer */
11651 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11652}
11653
11654/**
11655 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11656 * @phba: Pointer to HBA context object.
11657 * @cq: Pointer to completion queue.
11658 * @wcqe: Pointer to work-queue completion queue entry.
11659 *
11660 * This routine handles an fast-path WQ entry comsumed event by invoking the
11661 * proper WQ release routine to the slow-path WQ.
11662 **/
11663static void
11664lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11665 struct lpfc_wcqe_release *wcqe)
11666{
11667 struct lpfc_queue *childwq;
11668 bool wqid_matched = false;
11669 uint16_t fcp_wqid;
11670
11671 /* Check for fast-path FCP work queue release */
11672 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11673 list_for_each_entry(childwq, &cq->child_list, list) {
11674 if (childwq->queue_id == fcp_wqid) {
11675 lpfc_sli4_wq_release(childwq,
11676 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11677 wqid_matched = true;
11678 break;
11679 }
11680 }
11681 /* Report warning log message if no match found */
11682 if (wqid_matched != true)
11683 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11684 "2580 Fast-path wqe consume event carries "
11685 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11686}
11687
11688/**
11689 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11690 * @cq: Pointer to the completion queue.
11691 * @eqe: Pointer to fast-path completion queue entry.
11692 *
11693 * This routine process a fast-path work queue completion entry from fast-path
11694 * event queue for FCP command response completion.
11695 **/
11696static int
11697lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11698 struct lpfc_cqe *cqe)
11699{
11700 struct lpfc_wcqe_release wcqe;
11701 bool workposted = false;
11702
11703 /* Copy the work queue CQE and convert endian order if needed */
11704 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11705
11706 /* Check and process for different type of WCQE and dispatch */
11707 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11708 case CQE_CODE_COMPL_WQE:
b84daac9 11709 cq->CQ_wq++;
4f774513 11710 /* Process the WQ complete event */
98fc5dd9 11711 phba->last_completion_time = jiffies;
2a76a283 11712 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
11713 (struct lpfc_wcqe_complete *)&wcqe);
11714 break;
11715 case CQE_CODE_RELEASE_WQE:
b84daac9 11716 cq->CQ_release_wqe++;
4f774513
JS
11717 /* Process the WQ release event */
11718 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11719 (struct lpfc_wcqe_release *)&wcqe);
11720 break;
11721 case CQE_CODE_XRI_ABORTED:
b84daac9 11722 cq->CQ_xri_aborted++;
4f774513 11723 /* Process the WQ XRI abort event */
bc73905a 11724 phba->last_completion_time = jiffies;
4f774513
JS
11725 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11726 (struct sli4_wcqe_xri_aborted *)&wcqe);
11727 break;
11728 default:
11729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11730 "0144 Not a valid WCQE code: x%x\n",
11731 bf_get(lpfc_wcqe_c_code, &wcqe));
11732 break;
11733 }
11734 return workposted;
11735}
11736
11737/**
67d12733 11738 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
11739 * @phba: Pointer to HBA context object.
11740 * @eqe: Pointer to fast-path event queue entry.
11741 *
11742 * This routine process a event queue entry from the fast-path event queue.
11743 * It will check the MajorCode and MinorCode to determine this is for a
11744 * completion event on a completion queue, if not, an error shall be logged
11745 * and just return. Otherwise, it will get to the corresponding completion
11746 * queue and process all the entries on the completion queue, rearm the
11747 * completion queue, and then return.
11748 **/
11749static void
67d12733
JS
11750lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11751 uint32_t qidx)
4f774513
JS
11752{
11753 struct lpfc_queue *cq;
11754 struct lpfc_cqe *cqe;
11755 bool workposted = false;
11756 uint16_t cqid;
11757 int ecount = 0;
11758
cb5172ea 11759 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 11760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 11761 "0366 Not a valid completion "
4f774513 11762 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
11763 bf_get_le32(lpfc_eqe_major_code, eqe),
11764 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
11765 return;
11766 }
11767
67d12733
JS
11768 /* Get the reference to the corresponding CQ */
11769 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11770
11771 /* Check if this is a Slow path event */
11772 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11773 lpfc_sli4_sp_handle_eqe(phba, eqe,
11774 phba->sli4_hba.hba_eq[qidx]);
11775 return;
11776 }
11777
2e90f4b5
JS
11778 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11779 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11780 "3146 Fast-path completion queues "
11781 "does not exist\n");
11782 return;
11783 }
67d12733 11784 cq = phba->sli4_hba.fcp_cq[qidx];
4f774513 11785 if (unlikely(!cq)) {
75baf696
JS
11786 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11787 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11788 "0367 Fast-path completion queue "
67d12733 11789 "(%d) does not exist\n", qidx);
4f774513
JS
11790 return;
11791 }
11792
4f774513
JS
11793 if (unlikely(cqid != cq->queue_id)) {
11794 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11795 "0368 Miss-matched fast-path completion "
11796 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11797 cqid, cq->queue_id);
11798 return;
11799 }
11800
11801 /* Process all the entries to the CQ */
11802 while ((cqe = lpfc_sli4_cq_get(cq))) {
11803 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
73d91e50 11804 if (!(++ecount % cq->entry_repost))
4f774513
JS
11805 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11806 }
11807
b84daac9
JS
11808 /* Track the max number of CQEs processed in 1 EQ */
11809 if (ecount > cq->CQ_max_cqe)
11810 cq->CQ_max_cqe = ecount;
11811
4f774513
JS
11812 /* Catch the no cq entry condition */
11813 if (unlikely(ecount == 0))
11814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11815 "0369 No entry from fast-path completion "
11816 "queue fcpcqid=%d\n", cq->queue_id);
11817
11818 /* In any case, flash and re-arm the CQ */
11819 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11820
11821 /* wake up worker thread if there are works to be done */
11822 if (workposted)
11823 lpfc_worker_wake_up(phba);
11824}
11825
11826static void
11827lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11828{
11829 struct lpfc_eqe *eqe;
11830
11831 /* walk all the EQ entries and drop on the floor */
11832 while ((eqe = lpfc_sli4_eq_get(eq)))
11833 ;
11834
11835 /* Clear and re-arm the EQ */
11836 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11837}
11838
11839/**
67d12733 11840 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
11841 * @irq: Interrupt number.
11842 * @dev_id: The device context pointer.
11843 *
11844 * This function is directly called from the PCI layer as an interrupt
11845 * service routine when device with SLI-4 interface spec is enabled with
11846 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11847 * ring event in the HBA. However, when the device is enabled with either
11848 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11849 * device-level interrupt handler. When the PCI slot is in error recovery
11850 * or the HBA is undergoing initialization, the interrupt handler will not
11851 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11852 * the intrrupt context. This function is called without any lock held.
11853 * It gets the hbalock to access and update SLI data structures. Note that,
11854 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11855 * equal to that of FCP CQ index.
11856 *
67d12733
JS
11857 * The link attention and ELS ring attention events are handled
11858 * by the worker thread. The interrupt handler signals the worker thread
11859 * and returns for these events. This function is called without any lock
11860 * held. It gets the hbalock to access and update SLI data structures.
11861 *
4f774513
JS
11862 * This function returns IRQ_HANDLED when interrupt is handled else it
11863 * returns IRQ_NONE.
11864 **/
11865irqreturn_t
67d12733 11866lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
11867{
11868 struct lpfc_hba *phba;
11869 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11870 struct lpfc_queue *fpeq;
11871 struct lpfc_eqe *eqe;
11872 unsigned long iflag;
11873 int ecount = 0;
11874 uint32_t fcp_eqidx;
11875
11876 /* Get the driver's phba structure from the dev_id */
11877 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11878 phba = fcp_eq_hdl->phba;
11879 fcp_eqidx = fcp_eq_hdl->idx;
11880
11881 if (unlikely(!phba))
11882 return IRQ_NONE;
67d12733 11883 if (unlikely(!phba->sli4_hba.hba_eq))
5350d872 11884 return IRQ_NONE;
4f774513
JS
11885
11886 /* Get to the EQ struct associated with this vector */
67d12733 11887 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
2e90f4b5
JS
11888 if (unlikely(!fpeq))
11889 return IRQ_NONE;
4f774513 11890
ba20c853
JS
11891 if (lpfc_fcp_look_ahead) {
11892 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11893 lpfc_sli4_eq_clr_intr(fpeq);
11894 else {
11895 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11896 return IRQ_NONE;
11897 }
11898 }
11899
4f774513
JS
11900 /* Check device state for handling interrupt */
11901 if (unlikely(lpfc_intr_state_check(phba))) {
b84daac9 11902 fpeq->EQ_badstate++;
4f774513
JS
11903 /* Check again for link_state with lock held */
11904 spin_lock_irqsave(&phba->hbalock, iflag);
11905 if (phba->link_state < LPFC_LINK_DOWN)
11906 /* Flush, clear interrupt, and rearm the EQ */
11907 lpfc_sli4_eq_flush(phba, fpeq);
11908 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853
JS
11909 if (lpfc_fcp_look_ahead)
11910 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
11911 return IRQ_NONE;
11912 }
11913
11914 /*
11915 * Process all the event on FCP fast-path EQ
11916 */
11917 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
67d12733 11918 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
73d91e50 11919 if (!(++ecount % fpeq->entry_repost))
4f774513 11920 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
b84daac9 11921 fpeq->EQ_processed++;
4f774513
JS
11922 }
11923
b84daac9
JS
11924 /* Track the max number of EQEs processed in 1 intr */
11925 if (ecount > fpeq->EQ_max_eqe)
11926 fpeq->EQ_max_eqe = ecount;
11927
4f774513
JS
11928 /* Always clear and re-arm the fast-path EQ */
11929 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11930
11931 if (unlikely(ecount == 0)) {
b84daac9 11932 fpeq->EQ_no_entry++;
ba20c853
JS
11933
11934 if (lpfc_fcp_look_ahead) {
11935 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11936 return IRQ_NONE;
11937 }
11938
4f774513
JS
11939 if (phba->intr_type == MSIX)
11940 /* MSI-X treated interrupt served as no EQ share INT */
11941 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11942 "0358 MSI-X interrupt with no EQE\n");
11943 else
11944 /* Non MSI-X treated on interrupt as EQ share INT */
11945 return IRQ_NONE;
11946 }
11947
ba20c853
JS
11948 if (lpfc_fcp_look_ahead)
11949 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
11950 return IRQ_HANDLED;
11951} /* lpfc_sli4_fp_intr_handler */
11952
11953/**
11954 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11955 * @irq: Interrupt number.
11956 * @dev_id: The device context pointer.
11957 *
11958 * This function is the device-level interrupt handler to device with SLI-4
11959 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
11960 * interrupt mode is enabled and there is an event in the HBA which requires
11961 * driver attention. This function invokes the slow-path interrupt attention
11962 * handling function and fast-path interrupt attention handling function in
11963 * turn to process the relevant HBA attention events. This function is called
11964 * without any lock held. It gets the hbalock to access and update SLI data
11965 * structures.
11966 *
11967 * This function returns IRQ_HANDLED when interrupt is handled, else it
11968 * returns IRQ_NONE.
11969 **/
11970irqreturn_t
11971lpfc_sli4_intr_handler(int irq, void *dev_id)
11972{
11973 struct lpfc_hba *phba;
67d12733
JS
11974 irqreturn_t hba_irq_rc;
11975 bool hba_handled = false;
4f774513
JS
11976 uint32_t fcp_eqidx;
11977
11978 /* Get the driver's phba structure from the dev_id */
11979 phba = (struct lpfc_hba *)dev_id;
11980
11981 if (unlikely(!phba))
11982 return IRQ_NONE;
11983
4f774513
JS
11984 /*
11985 * Invoke fast-path host attention interrupt handling as appropriate.
11986 */
67d12733
JS
11987 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
11988 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
4f774513 11989 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
67d12733
JS
11990 if (hba_irq_rc == IRQ_HANDLED)
11991 hba_handled |= true;
4f774513
JS
11992 }
11993
67d12733 11994 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
11995} /* lpfc_sli4_intr_handler */
11996
11997/**
11998 * lpfc_sli4_queue_free - free a queue structure and associated memory
11999 * @queue: The queue structure to free.
12000 *
b595076a 12001 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
12002 * the host resident queue. This function must be called after destroying the
12003 * queue on the HBA.
12004 **/
12005void
12006lpfc_sli4_queue_free(struct lpfc_queue *queue)
12007{
12008 struct lpfc_dmabuf *dmabuf;
12009
12010 if (!queue)
12011 return;
12012
12013 while (!list_empty(&queue->page_list)) {
12014 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12015 list);
49198b37 12016 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
4f774513
JS
12017 dmabuf->virt, dmabuf->phys);
12018 kfree(dmabuf);
12019 }
12020 kfree(queue);
12021 return;
12022}
12023
12024/**
12025 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12026 * @phba: The HBA that this queue is being created on.
12027 * @entry_size: The size of each queue entry for this queue.
12028 * @entry count: The number of entries that this queue will handle.
12029 *
12030 * This function allocates a queue structure and the DMAable memory used for
12031 * the host resident queue. This function must be called before creating the
12032 * queue on the HBA.
12033 **/
12034struct lpfc_queue *
12035lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12036 uint32_t entry_count)
12037{
12038 struct lpfc_queue *queue;
12039 struct lpfc_dmabuf *dmabuf;
12040 int x, total_qe_count;
12041 void *dma_pointer;
cb5172ea 12042 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 12043
cb5172ea
JS
12044 if (!phba->sli4_hba.pc_sli4_params.supported)
12045 hw_page_size = SLI4_PAGE_SIZE;
12046
4f774513
JS
12047 queue = kzalloc(sizeof(struct lpfc_queue) +
12048 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12049 if (!queue)
12050 return NULL;
cb5172ea
JS
12051 queue->page_count = (ALIGN(entry_size * entry_count,
12052 hw_page_size))/hw_page_size;
4f774513
JS
12053 INIT_LIST_HEAD(&queue->list);
12054 INIT_LIST_HEAD(&queue->page_list);
12055 INIT_LIST_HEAD(&queue->child_list);
12056 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12057 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12058 if (!dmabuf)
12059 goto out_fail;
12060 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
cb5172ea 12061 hw_page_size, &dmabuf->phys,
4f774513
JS
12062 GFP_KERNEL);
12063 if (!dmabuf->virt) {
12064 kfree(dmabuf);
12065 goto out_fail;
12066 }
cb5172ea 12067 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12068 dmabuf->buffer_tag = x;
12069 list_add_tail(&dmabuf->list, &queue->page_list);
12070 /* initialize queue's entry array */
12071 dma_pointer = dmabuf->virt;
12072 for (; total_qe_count < entry_count &&
cb5172ea 12073 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
12074 total_qe_count++, dma_pointer += entry_size) {
12075 queue->qe[total_qe_count].address = dma_pointer;
12076 }
12077 }
12078 queue->entry_size = entry_size;
12079 queue->entry_count = entry_count;
73d91e50
JS
12080
12081 /*
12082 * entry_repost is calculated based on the number of entries in the
12083 * queue. This works out except for RQs. If buffers are NOT initially
12084 * posted for every RQE, entry_repost should be adjusted accordingly.
12085 */
12086 queue->entry_repost = (entry_count >> 3);
12087 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12088 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
4f774513
JS
12089 queue->phba = phba;
12090
12091 return queue;
12092out_fail:
12093 lpfc_sli4_queue_free(queue);
12094 return NULL;
12095}
12096
173edbb2
JS
12097/**
12098 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12099 * @phba: HBA structure that indicates port to create a queue on.
12100 * @startq: The starting FCP EQ to modify
12101 *
12102 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12103 *
12104 * The @phba struct is used to send mailbox command to HBA. The @startq
12105 * is used to get the starting FCP EQ to change.
12106 * This function is asynchronous and will wait for the mailbox
12107 * command to finish before continuing.
12108 *
12109 * On success this function will return a zero. If unable to allocate enough
12110 * memory this function will return -ENOMEM. If the queue create mailbox command
12111 * fails this function will return -ENXIO.
12112 **/
12113uint32_t
12114lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12115{
12116 struct lpfc_mbx_modify_eq_delay *eq_delay;
12117 LPFC_MBOXQ_t *mbox;
12118 struct lpfc_queue *eq;
12119 int cnt, rc, length, status = 0;
12120 uint32_t shdr_status, shdr_add_status;
ee02006b 12121 uint32_t result;
173edbb2
JS
12122 int fcp_eqidx;
12123 union lpfc_sli4_cfg_shdr *shdr;
12124 uint16_t dmult;
12125
67d12733 12126 if (startq >= phba->cfg_fcp_io_channel)
173edbb2
JS
12127 return 0;
12128
12129 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12130 if (!mbox)
12131 return -ENOMEM;
12132 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12133 sizeof(struct lpfc_sli4_cfg_mhdr));
12134 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12135 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12136 length, LPFC_SLI4_MBX_EMBED);
12137 eq_delay = &mbox->u.mqe.un.eq_delay;
12138
12139 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12140 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12141 if (result > LPFC_DMULT_CONST)
12142 dmult = 0;
12143 else
12144 dmult = LPFC_DMULT_CONST/result - 1;
173edbb2
JS
12145
12146 cnt = 0;
67d12733 12147 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
173edbb2 12148 fcp_eqidx++) {
67d12733 12149 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
173edbb2
JS
12150 if (!eq)
12151 continue;
12152 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12153 eq_delay->u.request.eq[cnt].phase = 0;
12154 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12155 cnt++;
12156 if (cnt >= LPFC_MAX_EQ_DELAY)
12157 break;
12158 }
12159 eq_delay->u.request.num_eq = cnt;
12160
12161 mbox->vport = phba->pport;
12162 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12163 mbox->context1 = NULL;
12164 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12165 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12166 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12167 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12168 if (shdr_status || shdr_add_status || rc) {
12169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12170 "2512 MODIFY_EQ_DELAY mailbox failed with "
12171 "status x%x add_status x%x, mbx status x%x\n",
12172 shdr_status, shdr_add_status, rc);
12173 status = -ENXIO;
12174 }
12175 mempool_free(mbox, phba->mbox_mem_pool);
12176 return status;
12177}
12178
4f774513
JS
12179/**
12180 * lpfc_eq_create - Create an Event Queue on the HBA
12181 * @phba: HBA structure that indicates port to create a queue on.
12182 * @eq: The queue structure to use to create the event queue.
12183 * @imax: The maximum interrupt per second limit.
12184 *
12185 * This function creates an event queue, as detailed in @eq, on a port,
12186 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12187 *
12188 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12189 * is used to get the entry count and entry size that are necessary to
12190 * determine the number of pages to allocate and use for this queue. This
12191 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12192 * event queue. This function is asynchronous and will wait for the mailbox
12193 * command to finish before continuing.
12194 *
12195 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12196 * memory this function will return -ENOMEM. If the queue create mailbox command
12197 * fails this function will return -ENXIO.
4f774513
JS
12198 **/
12199uint32_t
ee02006b 12200lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
12201{
12202 struct lpfc_mbx_eq_create *eq_create;
12203 LPFC_MBOXQ_t *mbox;
12204 int rc, length, status = 0;
12205 struct lpfc_dmabuf *dmabuf;
12206 uint32_t shdr_status, shdr_add_status;
12207 union lpfc_sli4_cfg_shdr *shdr;
12208 uint16_t dmult;
49198b37
JS
12209 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12210
2e90f4b5
JS
12211 /* sanity check on queue memory */
12212 if (!eq)
12213 return -ENODEV;
49198b37
JS
12214 if (!phba->sli4_hba.pc_sli4_params.supported)
12215 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12216
12217 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12218 if (!mbox)
12219 return -ENOMEM;
12220 length = (sizeof(struct lpfc_mbx_eq_create) -
12221 sizeof(struct lpfc_sli4_cfg_mhdr));
12222 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12223 LPFC_MBOX_OPCODE_EQ_CREATE,
12224 length, LPFC_SLI4_MBX_EMBED);
12225 eq_create = &mbox->u.mqe.un.eq_create;
12226 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12227 eq->page_count);
12228 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12229 LPFC_EQE_SIZE);
12230 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12231 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12232 if (imax > LPFC_DMULT_CONST)
12233 dmult = 0;
12234 else
12235 dmult = LPFC_DMULT_CONST/imax - 1;
4f774513
JS
12236 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12237 dmult);
12238 switch (eq->entry_count) {
12239 default:
12240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12241 "0360 Unsupported EQ count. (%d)\n",
12242 eq->entry_count);
12243 if (eq->entry_count < 256)
12244 return -EINVAL;
12245 /* otherwise default to smallest count (drop through) */
12246 case 256:
12247 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12248 LPFC_EQ_CNT_256);
12249 break;
12250 case 512:
12251 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12252 LPFC_EQ_CNT_512);
12253 break;
12254 case 1024:
12255 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12256 LPFC_EQ_CNT_1024);
12257 break;
12258 case 2048:
12259 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12260 LPFC_EQ_CNT_2048);
12261 break;
12262 case 4096:
12263 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12264 LPFC_EQ_CNT_4096);
12265 break;
12266 }
12267 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 12268 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12269 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12270 putPaddrLow(dmabuf->phys);
12271 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12272 putPaddrHigh(dmabuf->phys);
12273 }
12274 mbox->vport = phba->pport;
12275 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12276 mbox->context1 = NULL;
12277 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12278 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12281 if (shdr_status || shdr_add_status || rc) {
12282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12283 "2500 EQ_CREATE mailbox failed with "
12284 "status x%x add_status x%x, mbx status x%x\n",
12285 shdr_status, shdr_add_status, rc);
12286 status = -ENXIO;
12287 }
12288 eq->type = LPFC_EQ;
12289 eq->subtype = LPFC_NONE;
12290 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12291 if (eq->queue_id == 0xFFFF)
12292 status = -ENXIO;
12293 eq->host_index = 0;
12294 eq->hba_index = 0;
12295
8fa38513 12296 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12297 return status;
12298}
12299
12300/**
12301 * lpfc_cq_create - Create a Completion Queue on the HBA
12302 * @phba: HBA structure that indicates port to create a queue on.
12303 * @cq: The queue structure to use to create the completion queue.
12304 * @eq: The event queue to bind this completion queue to.
12305 *
12306 * This function creates a completion queue, as detailed in @wq, on a port,
12307 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12308 *
12309 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12310 * is used to get the entry count and entry size that are necessary to
12311 * determine the number of pages to allocate and use for this queue. The @eq
12312 * is used to indicate which event queue to bind this completion queue to. This
12313 * function will send the CQ_CREATE mailbox command to the HBA to setup the
12314 * completion queue. This function is asynchronous and will wait for the mailbox
12315 * command to finish before continuing.
12316 *
12317 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12318 * memory this function will return -ENOMEM. If the queue create mailbox command
12319 * fails this function will return -ENXIO.
4f774513
JS
12320 **/
12321uint32_t
12322lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12323 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12324{
12325 struct lpfc_mbx_cq_create *cq_create;
12326 struct lpfc_dmabuf *dmabuf;
12327 LPFC_MBOXQ_t *mbox;
12328 int rc, length, status = 0;
12329 uint32_t shdr_status, shdr_add_status;
12330 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12331 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12332
2e90f4b5
JS
12333 /* sanity check on queue memory */
12334 if (!cq || !eq)
12335 return -ENODEV;
49198b37
JS
12336 if (!phba->sli4_hba.pc_sli4_params.supported)
12337 hw_page_size = SLI4_PAGE_SIZE;
12338
4f774513
JS
12339 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12340 if (!mbox)
12341 return -ENOMEM;
12342 length = (sizeof(struct lpfc_mbx_cq_create) -
12343 sizeof(struct lpfc_sli4_cfg_mhdr));
12344 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12345 LPFC_MBOX_OPCODE_CQ_CREATE,
12346 length, LPFC_SLI4_MBX_EMBED);
12347 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 12348 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
12349 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12350 cq->page_count);
12351 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12352 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
12353 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12354 phba->sli4_hba.pc_sli4_params.cqv);
12355 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
c31098ce
JS
12356 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12357 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
5a6f133e
JS
12358 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12359 eq->queue_id);
12360 } else {
12361 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12362 eq->queue_id);
12363 }
4f774513
JS
12364 switch (cq->entry_count) {
12365 default:
12366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12367 "0361 Unsupported CQ count. (%d)\n",
12368 cq->entry_count);
4f4c1863
JS
12369 if (cq->entry_count < 256) {
12370 status = -EINVAL;
12371 goto out;
12372 }
4f774513
JS
12373 /* otherwise default to smallest count (drop through) */
12374 case 256:
12375 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12376 LPFC_CQ_CNT_256);
12377 break;
12378 case 512:
12379 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12380 LPFC_CQ_CNT_512);
12381 break;
12382 case 1024:
12383 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12384 LPFC_CQ_CNT_1024);
12385 break;
12386 }
12387 list_for_each_entry(dmabuf, &cq->page_list, list) {
49198b37 12388 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12389 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12390 putPaddrLow(dmabuf->phys);
12391 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12392 putPaddrHigh(dmabuf->phys);
12393 }
12394 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12395
12396 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12397 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12398 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12399 if (shdr_status || shdr_add_status || rc) {
12400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12401 "2501 CQ_CREATE mailbox failed with "
12402 "status x%x add_status x%x, mbx status x%x\n",
12403 shdr_status, shdr_add_status, rc);
12404 status = -ENXIO;
12405 goto out;
12406 }
12407 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12408 if (cq->queue_id == 0xFFFF) {
12409 status = -ENXIO;
12410 goto out;
12411 }
12412 /* link the cq onto the parent eq child list */
12413 list_add_tail(&cq->list, &eq->child_list);
12414 /* Set up completion queue's type and subtype */
12415 cq->type = type;
12416 cq->subtype = subtype;
12417 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 12418 cq->assoc_qid = eq->queue_id;
4f774513
JS
12419 cq->host_index = 0;
12420 cq->hba_index = 0;
4f774513 12421
8fa38513
JS
12422out:
12423 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12424 return status;
12425}
12426
b19a061a
JS
12427/**
12428 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12429 * @phba: HBA structure that indicates port to create a queue on.
12430 * @mq: The queue structure to use to create the mailbox queue.
12431 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12432 * @cq: The completion queue to associate with this cq.
12433 *
12434 * This function provides failback (fb) functionality when the
12435 * mq_create_ext fails on older FW generations. It's purpose is identical
12436 * to mq_create_ext otherwise.
12437 *
12438 * This routine cannot fail as all attributes were previously accessed and
12439 * initialized in mq_create_ext.
12440 **/
12441static void
12442lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12443 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12444{
12445 struct lpfc_mbx_mq_create *mq_create;
12446 struct lpfc_dmabuf *dmabuf;
12447 int length;
12448
12449 length = (sizeof(struct lpfc_mbx_mq_create) -
12450 sizeof(struct lpfc_sli4_cfg_mhdr));
12451 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12452 LPFC_MBOX_OPCODE_MQ_CREATE,
12453 length, LPFC_SLI4_MBX_EMBED);
12454 mq_create = &mbox->u.mqe.un.mq_create;
12455 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12456 mq->page_count);
12457 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12458 cq->queue_id);
12459 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12460 switch (mq->entry_count) {
12461 case 16:
5a6f133e
JS
12462 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12463 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
12464 break;
12465 case 32:
5a6f133e
JS
12466 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12467 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
12468 break;
12469 case 64:
5a6f133e
JS
12470 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12471 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
12472 break;
12473 case 128:
5a6f133e
JS
12474 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12475 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
12476 break;
12477 }
12478 list_for_each_entry(dmabuf, &mq->page_list, list) {
12479 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12480 putPaddrLow(dmabuf->phys);
12481 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12482 putPaddrHigh(dmabuf->phys);
12483 }
12484}
12485
04c68496
JS
12486/**
12487 * lpfc_mq_create - Create a mailbox Queue on the HBA
12488 * @phba: HBA structure that indicates port to create a queue on.
12489 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
12490 * @cq: The completion queue to associate with this cq.
12491 * @subtype: The queue's subtype.
04c68496
JS
12492 *
12493 * This function creates a mailbox queue, as detailed in @mq, on a port,
12494 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12495 *
12496 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12497 * is used to get the entry count and entry size that are necessary to
12498 * determine the number of pages to allocate and use for this queue. This
12499 * function will send the MQ_CREATE mailbox command to the HBA to setup the
12500 * mailbox queue. This function is asynchronous and will wait for the mailbox
12501 * command to finish before continuing.
12502 *
12503 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12504 * memory this function will return -ENOMEM. If the queue create mailbox command
12505 * fails this function will return -ENXIO.
04c68496 12506 **/
b19a061a 12507int32_t
04c68496
JS
12508lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12509 struct lpfc_queue *cq, uint32_t subtype)
12510{
12511 struct lpfc_mbx_mq_create *mq_create;
b19a061a 12512 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
12513 struct lpfc_dmabuf *dmabuf;
12514 LPFC_MBOXQ_t *mbox;
12515 int rc, length, status = 0;
12516 uint32_t shdr_status, shdr_add_status;
12517 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12518 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 12519
2e90f4b5
JS
12520 /* sanity check on queue memory */
12521 if (!mq || !cq)
12522 return -ENODEV;
49198b37
JS
12523 if (!phba->sli4_hba.pc_sli4_params.supported)
12524 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 12525
04c68496
JS
12526 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12527 if (!mbox)
12528 return -ENOMEM;
b19a061a 12529 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
12530 sizeof(struct lpfc_sli4_cfg_mhdr));
12531 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 12532 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 12533 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
12534
12535 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 12536 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
12537 bf_set(lpfc_mbx_mq_create_ext_num_pages,
12538 &mq_create_ext->u.request, mq->page_count);
12539 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12540 &mq_create_ext->u.request, 1);
12541 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
12542 &mq_create_ext->u.request, 1);
12543 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12544 &mq_create_ext->u.request, 1);
70f3c073
JS
12545 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12546 &mq_create_ext->u.request, 1);
12547 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12548 &mq_create_ext->u.request, 1);
b19a061a 12549 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
12550 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12551 phba->sli4_hba.pc_sli4_params.mqv);
12552 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12553 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12554 cq->queue_id);
12555 else
12556 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12557 cq->queue_id);
04c68496
JS
12558 switch (mq->entry_count) {
12559 default:
12560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12561 "0362 Unsupported MQ count. (%d)\n",
12562 mq->entry_count);
4f4c1863
JS
12563 if (mq->entry_count < 16) {
12564 status = -EINVAL;
12565 goto out;
12566 }
04c68496
JS
12567 /* otherwise default to smallest count (drop through) */
12568 case 16:
5a6f133e
JS
12569 bf_set(lpfc_mq_context_ring_size,
12570 &mq_create_ext->u.request.context,
12571 LPFC_MQ_RING_SIZE_16);
04c68496
JS
12572 break;
12573 case 32:
5a6f133e
JS
12574 bf_set(lpfc_mq_context_ring_size,
12575 &mq_create_ext->u.request.context,
12576 LPFC_MQ_RING_SIZE_32);
04c68496
JS
12577 break;
12578 case 64:
5a6f133e
JS
12579 bf_set(lpfc_mq_context_ring_size,
12580 &mq_create_ext->u.request.context,
12581 LPFC_MQ_RING_SIZE_64);
04c68496
JS
12582 break;
12583 case 128:
5a6f133e
JS
12584 bf_set(lpfc_mq_context_ring_size,
12585 &mq_create_ext->u.request.context,
12586 LPFC_MQ_RING_SIZE_128);
04c68496
JS
12587 break;
12588 }
12589 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 12590 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 12591 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 12592 putPaddrLow(dmabuf->phys);
b19a061a 12593 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
12594 putPaddrHigh(dmabuf->phys);
12595 }
12596 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
12597 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12598 &mq_create_ext->u.response);
12599 if (rc != MBX_SUCCESS) {
12600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12601 "2795 MQ_CREATE_EXT failed with "
12602 "status x%x. Failback to MQ_CREATE.\n",
12603 rc);
12604 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12605 mq_create = &mbox->u.mqe.un.mq_create;
12606 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12607 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12608 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12609 &mq_create->u.response);
12610 }
12611
04c68496 12612 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
12613 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12614 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12615 if (shdr_status || shdr_add_status || rc) {
12616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12617 "2502 MQ_CREATE mailbox failed with "
12618 "status x%x add_status x%x, mbx status x%x\n",
12619 shdr_status, shdr_add_status, rc);
12620 status = -ENXIO;
12621 goto out;
12622 }
04c68496
JS
12623 if (mq->queue_id == 0xFFFF) {
12624 status = -ENXIO;
12625 goto out;
12626 }
12627 mq->type = LPFC_MQ;
2a622bfb 12628 mq->assoc_qid = cq->queue_id;
04c68496
JS
12629 mq->subtype = subtype;
12630 mq->host_index = 0;
12631 mq->hba_index = 0;
12632
12633 /* link the mq onto the parent cq child list */
12634 list_add_tail(&mq->list, &cq->child_list);
12635out:
8fa38513 12636 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
12637 return status;
12638}
12639
4f774513
JS
12640/**
12641 * lpfc_wq_create - Create a Work Queue on the HBA
12642 * @phba: HBA structure that indicates port to create a queue on.
12643 * @wq: The queue structure to use to create the work queue.
12644 * @cq: The completion queue to bind this work queue to.
12645 * @subtype: The subtype of the work queue indicating its functionality.
12646 *
12647 * This function creates a work queue, as detailed in @wq, on a port, described
12648 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12649 *
12650 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12651 * is used to get the entry count and entry size that are necessary to
12652 * determine the number of pages to allocate and use for this queue. The @cq
12653 * is used to indicate which completion queue to bind this work queue to. This
12654 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12655 * work queue. This function is asynchronous and will wait for the mailbox
12656 * command to finish before continuing.
12657 *
12658 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12659 * memory this function will return -ENOMEM. If the queue create mailbox command
12660 * fails this function will return -ENXIO.
4f774513
JS
12661 **/
12662uint32_t
12663lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12664 struct lpfc_queue *cq, uint32_t subtype)
12665{
12666 struct lpfc_mbx_wq_create *wq_create;
12667 struct lpfc_dmabuf *dmabuf;
12668 LPFC_MBOXQ_t *mbox;
12669 int rc, length, status = 0;
12670 uint32_t shdr_status, shdr_add_status;
12671 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12672 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 12673 struct dma_address *page;
49198b37 12674
2e90f4b5
JS
12675 /* sanity check on queue memory */
12676 if (!wq || !cq)
12677 return -ENODEV;
49198b37
JS
12678 if (!phba->sli4_hba.pc_sli4_params.supported)
12679 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12680
12681 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12682 if (!mbox)
12683 return -ENOMEM;
12684 length = (sizeof(struct lpfc_mbx_wq_create) -
12685 sizeof(struct lpfc_sli4_cfg_mhdr));
12686 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12687 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12688 length, LPFC_SLI4_MBX_EMBED);
12689 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 12690 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
12691 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12692 wq->page_count);
12693 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12694 cq->queue_id);
5a6f133e
JS
12695 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12696 phba->sli4_hba.pc_sli4_params.wqv);
12697 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12698 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12699 wq->entry_count);
12700 switch (wq->entry_size) {
12701 default:
12702 case 64:
12703 bf_set(lpfc_mbx_wq_create_wqe_size,
12704 &wq_create->u.request_1,
12705 LPFC_WQ_WQE_SIZE_64);
12706 break;
12707 case 128:
12708 bf_set(lpfc_mbx_wq_create_wqe_size,
12709 &wq_create->u.request_1,
12710 LPFC_WQ_WQE_SIZE_128);
12711 break;
12712 }
12713 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12714 (PAGE_SIZE/SLI4_PAGE_SIZE));
12715 page = wq_create->u.request_1.page;
12716 } else {
12717 page = wq_create->u.request.page;
12718 }
4f774513 12719 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 12720 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
12721 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12722 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513
JS
12723 }
12724 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12725 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12726 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12727 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12728 if (shdr_status || shdr_add_status || rc) {
12729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12730 "2503 WQ_CREATE mailbox failed with "
12731 "status x%x add_status x%x, mbx status x%x\n",
12732 shdr_status, shdr_add_status, rc);
12733 status = -ENXIO;
12734 goto out;
12735 }
12736 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12737 if (wq->queue_id == 0xFFFF) {
12738 status = -ENXIO;
12739 goto out;
12740 }
12741 wq->type = LPFC_WQ;
2a622bfb 12742 wq->assoc_qid = cq->queue_id;
4f774513
JS
12743 wq->subtype = subtype;
12744 wq->host_index = 0;
12745 wq->hba_index = 0;
ff78d8f9 12746 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
12747
12748 /* link the wq onto the parent cq child list */
12749 list_add_tail(&wq->list, &cq->child_list);
12750out:
8fa38513 12751 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12752 return status;
12753}
12754
73d91e50
JS
12755/**
12756 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12757 * @phba: HBA structure that indicates port to create a queue on.
12758 * @rq: The queue structure to use for the receive queue.
12759 * @qno: The associated HBQ number
12760 *
12761 *
12762 * For SLI4 we need to adjust the RQ repost value based on
12763 * the number of buffers that are initially posted to the RQ.
12764 */
12765void
12766lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12767{
12768 uint32_t cnt;
12769
2e90f4b5
JS
12770 /* sanity check on queue memory */
12771 if (!rq)
12772 return;
73d91e50
JS
12773 cnt = lpfc_hbq_defs[qno]->entry_count;
12774
12775 /* Recalc repost for RQs based on buffers initially posted */
12776 cnt = (cnt >> 3);
12777 if (cnt < LPFC_QUEUE_MIN_REPOST)
12778 cnt = LPFC_QUEUE_MIN_REPOST;
12779
12780 rq->entry_repost = cnt;
12781}
12782
4f774513
JS
12783/**
12784 * lpfc_rq_create - Create a Receive Queue on the HBA
12785 * @phba: HBA structure that indicates port to create a queue on.
12786 * @hrq: The queue structure to use to create the header receive queue.
12787 * @drq: The queue structure to use to create the data receive queue.
12788 * @cq: The completion queue to bind this work queue to.
12789 *
12790 * This function creates a receive buffer queue pair , as detailed in @hrq and
12791 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12792 * to the HBA.
12793 *
12794 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12795 * struct is used to get the entry count that is necessary to determine the
12796 * number of pages to use for this queue. The @cq is used to indicate which
12797 * completion queue to bind received buffers that are posted to these queues to.
12798 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12799 * receive queue pair. This function is asynchronous and will wait for the
12800 * mailbox command to finish before continuing.
12801 *
12802 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12803 * memory this function will return -ENOMEM. If the queue create mailbox command
12804 * fails this function will return -ENXIO.
4f774513
JS
12805 **/
12806uint32_t
12807lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12808 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12809{
12810 struct lpfc_mbx_rq_create *rq_create;
12811 struct lpfc_dmabuf *dmabuf;
12812 LPFC_MBOXQ_t *mbox;
12813 int rc, length, status = 0;
12814 uint32_t shdr_status, shdr_add_status;
12815 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12816 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12817
2e90f4b5
JS
12818 /* sanity check on queue memory */
12819 if (!hrq || !drq || !cq)
12820 return -ENODEV;
49198b37
JS
12821 if (!phba->sli4_hba.pc_sli4_params.supported)
12822 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12823
12824 if (hrq->entry_count != drq->entry_count)
12825 return -EINVAL;
12826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12827 if (!mbox)
12828 return -ENOMEM;
12829 length = (sizeof(struct lpfc_mbx_rq_create) -
12830 sizeof(struct lpfc_sli4_cfg_mhdr));
12831 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12832 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12833 length, LPFC_SLI4_MBX_EMBED);
12834 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
12835 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12836 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12837 phba->sli4_hba.pc_sli4_params.rqv);
12838 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12839 bf_set(lpfc_rq_context_rqe_count_1,
12840 &rq_create->u.request.context,
12841 hrq->entry_count);
12842 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
12843 bf_set(lpfc_rq_context_rqe_size,
12844 &rq_create->u.request.context,
12845 LPFC_RQE_SIZE_8);
12846 bf_set(lpfc_rq_context_page_size,
12847 &rq_create->u.request.context,
12848 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
12849 } else {
12850 switch (hrq->entry_count) {
12851 default:
12852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12853 "2535 Unsupported RQ count. (%d)\n",
12854 hrq->entry_count);
4f4c1863
JS
12855 if (hrq->entry_count < 512) {
12856 status = -EINVAL;
12857 goto out;
12858 }
5a6f133e
JS
12859 /* otherwise default to smallest count (drop through) */
12860 case 512:
12861 bf_set(lpfc_rq_context_rqe_count,
12862 &rq_create->u.request.context,
12863 LPFC_RQ_RING_SIZE_512);
12864 break;
12865 case 1024:
12866 bf_set(lpfc_rq_context_rqe_count,
12867 &rq_create->u.request.context,
12868 LPFC_RQ_RING_SIZE_1024);
12869 break;
12870 case 2048:
12871 bf_set(lpfc_rq_context_rqe_count,
12872 &rq_create->u.request.context,
12873 LPFC_RQ_RING_SIZE_2048);
12874 break;
12875 case 4096:
12876 bf_set(lpfc_rq_context_rqe_count,
12877 &rq_create->u.request.context,
12878 LPFC_RQ_RING_SIZE_4096);
12879 break;
12880 }
12881 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12882 LPFC_HDR_BUF_SIZE);
4f774513
JS
12883 }
12884 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12885 cq->queue_id);
12886 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12887 hrq->page_count);
4f774513 12888 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 12889 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12890 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12891 putPaddrLow(dmabuf->phys);
12892 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12893 putPaddrHigh(dmabuf->phys);
12894 }
12895 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12896 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12897 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12898 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12899 if (shdr_status || shdr_add_status || rc) {
12900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12901 "2504 RQ_CREATE mailbox failed with "
12902 "status x%x add_status x%x, mbx status x%x\n",
12903 shdr_status, shdr_add_status, rc);
12904 status = -ENXIO;
12905 goto out;
12906 }
12907 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12908 if (hrq->queue_id == 0xFFFF) {
12909 status = -ENXIO;
12910 goto out;
12911 }
12912 hrq->type = LPFC_HRQ;
2a622bfb 12913 hrq->assoc_qid = cq->queue_id;
4f774513
JS
12914 hrq->subtype = subtype;
12915 hrq->host_index = 0;
12916 hrq->hba_index = 0;
12917
12918 /* now create the data queue */
12919 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12920 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12921 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
12922 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12923 phba->sli4_hba.pc_sli4_params.rqv);
12924 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12925 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 12926 &rq_create->u.request.context, hrq->entry_count);
5a6f133e 12927 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
c31098ce
JS
12928 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
12929 LPFC_RQE_SIZE_8);
12930 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
12931 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
12932 } else {
12933 switch (drq->entry_count) {
12934 default:
12935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12936 "2536 Unsupported RQ count. (%d)\n",
12937 drq->entry_count);
4f4c1863
JS
12938 if (drq->entry_count < 512) {
12939 status = -EINVAL;
12940 goto out;
12941 }
5a6f133e
JS
12942 /* otherwise default to smallest count (drop through) */
12943 case 512:
12944 bf_set(lpfc_rq_context_rqe_count,
12945 &rq_create->u.request.context,
12946 LPFC_RQ_RING_SIZE_512);
12947 break;
12948 case 1024:
12949 bf_set(lpfc_rq_context_rqe_count,
12950 &rq_create->u.request.context,
12951 LPFC_RQ_RING_SIZE_1024);
12952 break;
12953 case 2048:
12954 bf_set(lpfc_rq_context_rqe_count,
12955 &rq_create->u.request.context,
12956 LPFC_RQ_RING_SIZE_2048);
12957 break;
12958 case 4096:
12959 bf_set(lpfc_rq_context_rqe_count,
12960 &rq_create->u.request.context,
12961 LPFC_RQ_RING_SIZE_4096);
12962 break;
12963 }
12964 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12965 LPFC_DATA_BUF_SIZE);
4f774513
JS
12966 }
12967 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12968 cq->queue_id);
12969 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12970 drq->page_count);
4f774513
JS
12971 list_for_each_entry(dmabuf, &drq->page_list, list) {
12972 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12973 putPaddrLow(dmabuf->phys);
12974 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12975 putPaddrHigh(dmabuf->phys);
12976 }
12977 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12978 /* The IOCTL status is embedded in the mailbox subheader. */
12979 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12980 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12981 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12982 if (shdr_status || shdr_add_status || rc) {
12983 status = -ENXIO;
12984 goto out;
12985 }
12986 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12987 if (drq->queue_id == 0xFFFF) {
12988 status = -ENXIO;
12989 goto out;
12990 }
12991 drq->type = LPFC_DRQ;
2a622bfb 12992 drq->assoc_qid = cq->queue_id;
4f774513
JS
12993 drq->subtype = subtype;
12994 drq->host_index = 0;
12995 drq->hba_index = 0;
12996
12997 /* link the header and data RQs onto the parent cq child list */
12998 list_add_tail(&hrq->list, &cq->child_list);
12999 list_add_tail(&drq->list, &cq->child_list);
13000
13001out:
8fa38513 13002 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
13003 return status;
13004}
13005
13006/**
13007 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13008 * @eq: The queue structure associated with the queue to destroy.
13009 *
13010 * This function destroys a queue, as detailed in @eq by sending an mailbox
13011 * command, specific to the type of queue, to the HBA.
13012 *
13013 * The @eq struct is used to get the queue ID of the queue to destroy.
13014 *
13015 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13016 * command fails this function will return -ENXIO.
4f774513
JS
13017 **/
13018uint32_t
13019lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13020{
13021 LPFC_MBOXQ_t *mbox;
13022 int rc, length, status = 0;
13023 uint32_t shdr_status, shdr_add_status;
13024 union lpfc_sli4_cfg_shdr *shdr;
13025
2e90f4b5 13026 /* sanity check on queue memory */
4f774513
JS
13027 if (!eq)
13028 return -ENODEV;
13029 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13030 if (!mbox)
13031 return -ENOMEM;
13032 length = (sizeof(struct lpfc_mbx_eq_destroy) -
13033 sizeof(struct lpfc_sli4_cfg_mhdr));
13034 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13035 LPFC_MBOX_OPCODE_EQ_DESTROY,
13036 length, LPFC_SLI4_MBX_EMBED);
13037 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13038 eq->queue_id);
13039 mbox->vport = eq->phba->pport;
13040 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13041
13042 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13043 /* The IOCTL status is embedded in the mailbox subheader. */
13044 shdr = (union lpfc_sli4_cfg_shdr *)
13045 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13046 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13047 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13048 if (shdr_status || shdr_add_status || rc) {
13049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13050 "2505 EQ_DESTROY mailbox failed with "
13051 "status x%x add_status x%x, mbx status x%x\n",
13052 shdr_status, shdr_add_status, rc);
13053 status = -ENXIO;
13054 }
13055
13056 /* Remove eq from any list */
13057 list_del_init(&eq->list);
8fa38513 13058 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
13059 return status;
13060}
13061
13062/**
13063 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13064 * @cq: The queue structure associated with the queue to destroy.
13065 *
13066 * This function destroys a queue, as detailed in @cq by sending an mailbox
13067 * command, specific to the type of queue, to the HBA.
13068 *
13069 * The @cq struct is used to get the queue ID of the queue to destroy.
13070 *
13071 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13072 * command fails this function will return -ENXIO.
4f774513
JS
13073 **/
13074uint32_t
13075lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13076{
13077 LPFC_MBOXQ_t *mbox;
13078 int rc, length, status = 0;
13079 uint32_t shdr_status, shdr_add_status;
13080 union lpfc_sli4_cfg_shdr *shdr;
13081
2e90f4b5 13082 /* sanity check on queue memory */
4f774513
JS
13083 if (!cq)
13084 return -ENODEV;
13085 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13086 if (!mbox)
13087 return -ENOMEM;
13088 length = (sizeof(struct lpfc_mbx_cq_destroy) -
13089 sizeof(struct lpfc_sli4_cfg_mhdr));
13090 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13091 LPFC_MBOX_OPCODE_CQ_DESTROY,
13092 length, LPFC_SLI4_MBX_EMBED);
13093 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13094 cq->queue_id);
13095 mbox->vport = cq->phba->pport;
13096 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13097 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13098 /* The IOCTL status is embedded in the mailbox subheader. */
13099 shdr = (union lpfc_sli4_cfg_shdr *)
13100 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13101 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13102 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13103 if (shdr_status || shdr_add_status || rc) {
13104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13105 "2506 CQ_DESTROY mailbox failed with "
13106 "status x%x add_status x%x, mbx status x%x\n",
13107 shdr_status, shdr_add_status, rc);
13108 status = -ENXIO;
13109 }
13110 /* Remove cq from any list */
13111 list_del_init(&cq->list);
8fa38513 13112 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
13113 return status;
13114}
13115
04c68496
JS
13116/**
13117 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13118 * @qm: The queue structure associated with the queue to destroy.
13119 *
13120 * This function destroys a queue, as detailed in @mq by sending an mailbox
13121 * command, specific to the type of queue, to the HBA.
13122 *
13123 * The @mq struct is used to get the queue ID of the queue to destroy.
13124 *
13125 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13126 * command fails this function will return -ENXIO.
04c68496
JS
13127 **/
13128uint32_t
13129lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13130{
13131 LPFC_MBOXQ_t *mbox;
13132 int rc, length, status = 0;
13133 uint32_t shdr_status, shdr_add_status;
13134 union lpfc_sli4_cfg_shdr *shdr;
13135
2e90f4b5 13136 /* sanity check on queue memory */
04c68496
JS
13137 if (!mq)
13138 return -ENODEV;
13139 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13140 if (!mbox)
13141 return -ENOMEM;
13142 length = (sizeof(struct lpfc_mbx_mq_destroy) -
13143 sizeof(struct lpfc_sli4_cfg_mhdr));
13144 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13145 LPFC_MBOX_OPCODE_MQ_DESTROY,
13146 length, LPFC_SLI4_MBX_EMBED);
13147 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13148 mq->queue_id);
13149 mbox->vport = mq->phba->pport;
13150 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13151 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13152 /* The IOCTL status is embedded in the mailbox subheader. */
13153 shdr = (union lpfc_sli4_cfg_shdr *)
13154 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13155 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13156 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13157 if (shdr_status || shdr_add_status || rc) {
13158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13159 "2507 MQ_DESTROY mailbox failed with "
13160 "status x%x add_status x%x, mbx status x%x\n",
13161 shdr_status, shdr_add_status, rc);
13162 status = -ENXIO;
13163 }
13164 /* Remove mq from any list */
13165 list_del_init(&mq->list);
8fa38513 13166 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
13167 return status;
13168}
13169
4f774513
JS
13170/**
13171 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13172 * @wq: The queue structure associated with the queue to destroy.
13173 *
13174 * This function destroys a queue, as detailed in @wq by sending an mailbox
13175 * command, specific to the type of queue, to the HBA.
13176 *
13177 * The @wq struct is used to get the queue ID of the queue to destroy.
13178 *
13179 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13180 * command fails this function will return -ENXIO.
4f774513
JS
13181 **/
13182uint32_t
13183lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13184{
13185 LPFC_MBOXQ_t *mbox;
13186 int rc, length, status = 0;
13187 uint32_t shdr_status, shdr_add_status;
13188 union lpfc_sli4_cfg_shdr *shdr;
13189
2e90f4b5 13190 /* sanity check on queue memory */
4f774513
JS
13191 if (!wq)
13192 return -ENODEV;
13193 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13194 if (!mbox)
13195 return -ENOMEM;
13196 length = (sizeof(struct lpfc_mbx_wq_destroy) -
13197 sizeof(struct lpfc_sli4_cfg_mhdr));
13198 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13199 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13200 length, LPFC_SLI4_MBX_EMBED);
13201 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13202 wq->queue_id);
13203 mbox->vport = wq->phba->pport;
13204 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13205 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13206 shdr = (union lpfc_sli4_cfg_shdr *)
13207 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13208 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13209 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13210 if (shdr_status || shdr_add_status || rc) {
13211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13212 "2508 WQ_DESTROY mailbox failed with "
13213 "status x%x add_status x%x, mbx status x%x\n",
13214 shdr_status, shdr_add_status, rc);
13215 status = -ENXIO;
13216 }
13217 /* Remove wq from any list */
13218 list_del_init(&wq->list);
8fa38513 13219 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
13220 return status;
13221}
13222
13223/**
13224 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13225 * @rq: The queue structure associated with the queue to destroy.
13226 *
13227 * This function destroys a queue, as detailed in @rq by sending an mailbox
13228 * command, specific to the type of queue, to the HBA.
13229 *
13230 * The @rq struct is used to get the queue ID of the queue to destroy.
13231 *
13232 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13233 * command fails this function will return -ENXIO.
4f774513
JS
13234 **/
13235uint32_t
13236lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13237 struct lpfc_queue *drq)
13238{
13239 LPFC_MBOXQ_t *mbox;
13240 int rc, length, status = 0;
13241 uint32_t shdr_status, shdr_add_status;
13242 union lpfc_sli4_cfg_shdr *shdr;
13243
2e90f4b5 13244 /* sanity check on queue memory */
4f774513
JS
13245 if (!hrq || !drq)
13246 return -ENODEV;
13247 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13248 if (!mbox)
13249 return -ENOMEM;
13250 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 13251 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
13252 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13253 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13254 length, LPFC_SLI4_MBX_EMBED);
13255 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13256 hrq->queue_id);
13257 mbox->vport = hrq->phba->pport;
13258 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13259 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13260 /* The IOCTL status is embedded in the mailbox subheader. */
13261 shdr = (union lpfc_sli4_cfg_shdr *)
13262 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13263 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13264 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13265 if (shdr_status || shdr_add_status || rc) {
13266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13267 "2509 RQ_DESTROY mailbox failed with "
13268 "status x%x add_status x%x, mbx status x%x\n",
13269 shdr_status, shdr_add_status, rc);
13270 if (rc != MBX_TIMEOUT)
13271 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13272 return -ENXIO;
13273 }
13274 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13275 drq->queue_id);
13276 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13277 shdr = (union lpfc_sli4_cfg_shdr *)
13278 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13281 if (shdr_status || shdr_add_status || rc) {
13282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13283 "2510 RQ_DESTROY mailbox failed with "
13284 "status x%x add_status x%x, mbx status x%x\n",
13285 shdr_status, shdr_add_status, rc);
13286 status = -ENXIO;
13287 }
13288 list_del_init(&hrq->list);
13289 list_del_init(&drq->list);
8fa38513 13290 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
13291 return status;
13292}
13293
13294/**
13295 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13296 * @phba: The virtual port for which this call being executed.
13297 * @pdma_phys_addr0: Physical address of the 1st SGL page.
13298 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13299 * @xritag: the xritag that ties this io to the SGL pages.
13300 *
13301 * This routine will post the sgl pages for the IO that has the xritag
13302 * that is in the iocbq structure. The xritag is assigned during iocbq
13303 * creation and persists for as long as the driver is loaded.
13304 * if the caller has fewer than 256 scatter gather segments to map then
13305 * pdma_phys_addr1 should be 0.
13306 * If the caller needs to map more than 256 scatter gather segment then
13307 * pdma_phys_addr1 should be a valid physical address.
13308 * physical address for SGLs must be 64 byte aligned.
13309 * If you are going to map 2 SGL's then the first one must have 256 entries
13310 * the second sgl can have between 1 and 256 entries.
13311 *
13312 * Return codes:
13313 * 0 - Success
13314 * -ENXIO, -ENOMEM - Failure
13315 **/
13316int
13317lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13318 dma_addr_t pdma_phys_addr0,
13319 dma_addr_t pdma_phys_addr1,
13320 uint16_t xritag)
13321{
13322 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13323 LPFC_MBOXQ_t *mbox;
13324 int rc;
13325 uint32_t shdr_status, shdr_add_status;
6d368e53 13326 uint32_t mbox_tmo;
4f774513
JS
13327 union lpfc_sli4_cfg_shdr *shdr;
13328
13329 if (xritag == NO_XRI) {
13330 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13331 "0364 Invalid param:\n");
13332 return -EINVAL;
13333 }
13334
13335 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13336 if (!mbox)
13337 return -ENOMEM;
13338
13339 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13340 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13341 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 13342 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
13343
13344 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13345 &mbox->u.mqe.un.post_sgl_pages;
13346 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13347 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13348
13349 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13350 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13351 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13352 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13353
13354 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13355 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13356 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13357 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13358 if (!phba->sli4_hba.intr_enable)
13359 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 13360 else {
a183a15f 13361 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
13362 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13363 }
4f774513
JS
13364 /* The IOCTL status is embedded in the mailbox subheader. */
13365 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13366 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13367 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13368 if (rc != MBX_TIMEOUT)
13369 mempool_free(mbox, phba->mbox_mem_pool);
13370 if (shdr_status || shdr_add_status || rc) {
13371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13372 "2511 POST_SGL mailbox failed with "
13373 "status x%x add_status x%x, mbx status x%x\n",
13374 shdr_status, shdr_add_status, rc);
13375 rc = -ENXIO;
13376 }
13377 return 0;
13378}
4f774513 13379
6d368e53 13380/**
88a2cfbb 13381 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
13382 * @phba: pointer to lpfc hba data structure.
13383 *
13384 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
13385 * HBA consistent with the SLI-4 interface spec. This routine
13386 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13387 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 13388 *
88a2cfbb
JS
13389 * Returns
13390 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13391 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
13392 **/
6d368e53
JS
13393uint16_t
13394lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13395{
13396 unsigned long xri;
13397
13398 /*
13399 * Fetch the next logical xri. Because this index is logical,
13400 * the driver starts at 0 each time.
13401 */
13402 spin_lock_irq(&phba->hbalock);
13403 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13404 phba->sli4_hba.max_cfg_param.max_xri, 0);
13405 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13406 spin_unlock_irq(&phba->hbalock);
13407 return NO_XRI;
13408 } else {
13409 set_bit(xri, phba->sli4_hba.xri_bmask);
13410 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 13411 }
6d368e53
JS
13412 spin_unlock_irq(&phba->hbalock);
13413 return xri;
13414}
13415
13416/**
13417 * lpfc_sli4_free_xri - Release an xri for reuse.
13418 * @phba: pointer to lpfc hba data structure.
13419 *
13420 * This routine is invoked to release an xri to the pool of
13421 * available rpis maintained by the driver.
13422 **/
13423void
13424__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13425{
13426 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
13427 phba->sli4_hba.max_cfg_param.xri_used--;
13428 }
13429}
13430
13431/**
13432 * lpfc_sli4_free_xri - Release an xri for reuse.
13433 * @phba: pointer to lpfc hba data structure.
13434 *
13435 * This routine is invoked to release an xri to the pool of
13436 * available rpis maintained by the driver.
13437 **/
13438void
13439lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13440{
13441 spin_lock_irq(&phba->hbalock);
13442 __lpfc_sli4_free_xri(phba, xri);
13443 spin_unlock_irq(&phba->hbalock);
13444}
13445
4f774513
JS
13446/**
13447 * lpfc_sli4_next_xritag - Get an xritag for the io
13448 * @phba: Pointer to HBA context object.
13449 *
13450 * This function gets an xritag for the iocb. If there is no unused xritag
13451 * it will return 0xffff.
13452 * The function returns the allocated xritag if successful, else returns zero.
13453 * Zero is not a valid xritag.
13454 * The caller is not required to hold any lock.
13455 **/
13456uint16_t
13457lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13458{
6d368e53 13459 uint16_t xri_index;
4f774513 13460
6d368e53 13461 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
13462 if (xri_index == NO_XRI)
13463 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13464 "2004 Failed to allocate XRI.last XRITAG is %d"
13465 " Max XRI is %d, Used XRI is %d\n",
13466 xri_index,
13467 phba->sli4_hba.max_cfg_param.max_xri,
13468 phba->sli4_hba.max_cfg_param.xri_used);
13469 return xri_index;
4f774513
JS
13470}
13471
13472/**
6d368e53 13473 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
4f774513 13474 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
13475 * @post_sgl_list: pointer to els sgl entry list.
13476 * @count: number of els sgl entries on the list.
4f774513
JS
13477 *
13478 * This routine is invoked to post a block of driver's sgl pages to the
13479 * HBA using non-embedded mailbox command. No Lock is held. This routine
13480 * is only called when the driver is loading and after all IO has been
13481 * stopped.
13482 **/
8a9d2e80
JS
13483static int
13484lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13485 struct list_head *post_sgl_list,
13486 int post_cnt)
4f774513 13487{
8a9d2e80 13488 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
13489 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13490 struct sgl_page_pairs *sgl_pg_pairs;
13491 void *viraddr;
13492 LPFC_MBOXQ_t *mbox;
13493 uint32_t reqlen, alloclen, pg_pairs;
13494 uint32_t mbox_tmo;
8a9d2e80
JS
13495 uint16_t xritag_start = 0;
13496 int rc = 0;
4f774513
JS
13497 uint32_t shdr_status, shdr_add_status;
13498 union lpfc_sli4_cfg_shdr *shdr;
13499
8a9d2e80 13500 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
4f774513 13501 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13502 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13503 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13504 "2559 Block sgl registration required DMA "
13505 "size (%d) great than a page\n", reqlen);
13506 return -ENOMEM;
13507 }
13508 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 13509 if (!mbox)
4f774513 13510 return -ENOMEM;
4f774513
JS
13511
13512 /* Allocate DMA memory and set up the non-embedded mailbox command */
13513 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13514 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13515 LPFC_SLI4_MBX_NEMBED);
13516
13517 if (alloclen < reqlen) {
13518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13519 "0285 Allocated DMA memory size (%d) is "
13520 "less than the requested DMA memory "
13521 "size (%d)\n", alloclen, reqlen);
13522 lpfc_sli4_mbox_cmd_free(phba, mbox);
13523 return -ENOMEM;
13524 }
4f774513 13525 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 13526 viraddr = mbox->sge_array->addr[0];
4f774513
JS
13527 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13528 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13529
8a9d2e80
JS
13530 pg_pairs = 0;
13531 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
13532 /* Set up the sge entry */
13533 sgl_pg_pairs->sgl_pg0_addr_lo =
13534 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13535 sgl_pg_pairs->sgl_pg0_addr_hi =
13536 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13537 sgl_pg_pairs->sgl_pg1_addr_lo =
13538 cpu_to_le32(putPaddrLow(0));
13539 sgl_pg_pairs->sgl_pg1_addr_hi =
13540 cpu_to_le32(putPaddrHigh(0));
6d368e53 13541
4f774513
JS
13542 /* Keep the first xritag on the list */
13543 if (pg_pairs == 0)
13544 xritag_start = sglq_entry->sli4_xritag;
13545 sgl_pg_pairs++;
8a9d2e80 13546 pg_pairs++;
4f774513 13547 }
6d368e53
JS
13548
13549 /* Complete initialization and perform endian conversion. */
4f774513 13550 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
8a9d2e80 13551 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
4f774513 13552 sgl->word0 = cpu_to_le32(sgl->word0);
4f774513
JS
13553 if (!phba->sli4_hba.intr_enable)
13554 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13555 else {
a183a15f 13556 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13557 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13558 }
13559 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13560 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13561 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13562 if (rc != MBX_TIMEOUT)
13563 lpfc_sli4_mbox_cmd_free(phba, mbox);
13564 if (shdr_status || shdr_add_status || rc) {
13565 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13566 "2513 POST_SGL_BLOCK mailbox command failed "
13567 "status x%x add_status x%x mbx status x%x\n",
13568 shdr_status, shdr_add_status, rc);
13569 rc = -ENXIO;
13570 }
13571 return rc;
13572}
13573
13574/**
13575 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13576 * @phba: pointer to lpfc hba data structure.
13577 * @sblist: pointer to scsi buffer list.
13578 * @count: number of scsi buffers on the list.
13579 *
13580 * This routine is invoked to post a block of @count scsi sgl pages from a
13581 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13582 * No Lock is held.
13583 *
13584 **/
13585int
8a9d2e80
JS
13586lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13587 struct list_head *sblist,
13588 int count)
4f774513
JS
13589{
13590 struct lpfc_scsi_buf *psb;
13591 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13592 struct sgl_page_pairs *sgl_pg_pairs;
13593 void *viraddr;
13594 LPFC_MBOXQ_t *mbox;
13595 uint32_t reqlen, alloclen, pg_pairs;
13596 uint32_t mbox_tmo;
13597 uint16_t xritag_start = 0;
13598 int rc = 0;
13599 uint32_t shdr_status, shdr_add_status;
13600 dma_addr_t pdma_phys_bpl1;
13601 union lpfc_sli4_cfg_shdr *shdr;
13602
13603 /* Calculate the requested length of the dma memory */
8a9d2e80 13604 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 13605 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13606 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13607 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13608 "0217 Block sgl registration required DMA "
13609 "size (%d) great than a page\n", reqlen);
13610 return -ENOMEM;
13611 }
13612 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13613 if (!mbox) {
13614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13615 "0283 Failed to allocate mbox cmd memory\n");
13616 return -ENOMEM;
13617 }
13618
13619 /* Allocate DMA memory and set up the non-embedded mailbox command */
13620 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13621 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13622 LPFC_SLI4_MBX_NEMBED);
13623
13624 if (alloclen < reqlen) {
13625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13626 "2561 Allocated DMA memory size (%d) is "
13627 "less than the requested DMA memory "
13628 "size (%d)\n", alloclen, reqlen);
13629 lpfc_sli4_mbox_cmd_free(phba, mbox);
13630 return -ENOMEM;
13631 }
6d368e53 13632
4f774513 13633 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
13634 viraddr = mbox->sge_array->addr[0];
13635
13636 /* Set up the SGL pages in the non-embedded DMA pages */
13637 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13638 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13639
13640 pg_pairs = 0;
13641 list_for_each_entry(psb, sblist, list) {
13642 /* Set up the sge entry */
13643 sgl_pg_pairs->sgl_pg0_addr_lo =
13644 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13645 sgl_pg_pairs->sgl_pg0_addr_hi =
13646 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13647 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13648 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13649 else
13650 pdma_phys_bpl1 = 0;
13651 sgl_pg_pairs->sgl_pg1_addr_lo =
13652 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13653 sgl_pg_pairs->sgl_pg1_addr_hi =
13654 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13655 /* Keep the first xritag on the list */
13656 if (pg_pairs == 0)
13657 xritag_start = psb->cur_iocbq.sli4_xritag;
13658 sgl_pg_pairs++;
13659 pg_pairs++;
13660 }
13661 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13662 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13663 /* Perform endian conversion if necessary */
13664 sgl->word0 = cpu_to_le32(sgl->word0);
13665
13666 if (!phba->sli4_hba.intr_enable)
13667 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13668 else {
a183a15f 13669 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13670 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13671 }
13672 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13673 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13674 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13675 if (rc != MBX_TIMEOUT)
13676 lpfc_sli4_mbox_cmd_free(phba, mbox);
13677 if (shdr_status || shdr_add_status || rc) {
13678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13679 "2564 POST_SGL_BLOCK mailbox command failed "
13680 "status x%x add_status x%x mbx status x%x\n",
13681 shdr_status, shdr_add_status, rc);
13682 rc = -ENXIO;
13683 }
13684 return rc;
13685}
13686
13687/**
13688 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13689 * @phba: pointer to lpfc_hba struct that the frame was received on
13690 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13691 *
13692 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13693 * valid type of frame that the LPFC driver will handle. This function will
13694 * return a zero if the frame is a valid frame or a non zero value when the
13695 * frame does not pass the check.
13696 **/
13697static int
13698lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13699{
474ffb74
TH
13700 /* make rctl_names static to save stack space */
13701 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
4f774513
JS
13702 char *type_names[] = FC_TYPE_NAMES_INIT;
13703 struct fc_vft_header *fc_vft_hdr;
546fc854 13704 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
13705
13706 switch (fc_hdr->fh_r_ctl) {
13707 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13708 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13709 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13710 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13711 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13712 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13713 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13714 case FC_RCTL_DD_CMD_STATUS: /* command status */
13715 case FC_RCTL_ELS_REQ: /* extended link services request */
13716 case FC_RCTL_ELS_REP: /* extended link services reply */
13717 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13718 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13719 case FC_RCTL_BA_NOP: /* basic link service NOP */
13720 case FC_RCTL_BA_ABTS: /* basic link service abort */
13721 case FC_RCTL_BA_RMC: /* remove connection */
13722 case FC_RCTL_BA_ACC: /* basic accept */
13723 case FC_RCTL_BA_RJT: /* basic reject */
13724 case FC_RCTL_BA_PRMT:
13725 case FC_RCTL_ACK_1: /* acknowledge_1 */
13726 case FC_RCTL_ACK_0: /* acknowledge_0 */
13727 case FC_RCTL_P_RJT: /* port reject */
13728 case FC_RCTL_F_RJT: /* fabric reject */
13729 case FC_RCTL_P_BSY: /* port busy */
13730 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13731 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13732 case FC_RCTL_LCR: /* link credit reset */
13733 case FC_RCTL_END: /* end */
13734 break;
13735 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13736 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13737 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13738 return lpfc_fc_frame_check(phba, fc_hdr);
13739 default:
13740 goto drop;
13741 }
13742 switch (fc_hdr->fh_type) {
13743 case FC_TYPE_BLS:
13744 case FC_TYPE_ELS:
13745 case FC_TYPE_FCP:
13746 case FC_TYPE_CT:
13747 break;
13748 case FC_TYPE_IP:
13749 case FC_TYPE_ILS:
13750 default:
13751 goto drop;
13752 }
546fc854 13753
4f774513 13754 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
13755 "2538 Received frame rctl:%s type:%s "
13756 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
4f774513 13757 rctl_names[fc_hdr->fh_r_ctl],
546fc854
JS
13758 type_names[fc_hdr->fh_type],
13759 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13760 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13761 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
4f774513
JS
13762 return 0;
13763drop:
13764 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13765 "2539 Dropped frame rctl:%s type:%s\n",
13766 rctl_names[fc_hdr->fh_r_ctl],
13767 type_names[fc_hdr->fh_type]);
13768 return 1;
13769}
13770
13771/**
13772 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13773 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13774 *
13775 * This function processes the FC header to retrieve the VFI from the VF
13776 * header, if one exists. This function will return the VFI if one exists
13777 * or 0 if no VSAN Header exists.
13778 **/
13779static uint32_t
13780lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13781{
13782 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13783
13784 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13785 return 0;
13786 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13787}
13788
13789/**
13790 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13791 * @phba: Pointer to the HBA structure to search for the vport on
13792 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13793 * @fcfi: The FC Fabric ID that the frame came from
13794 *
13795 * This function searches the @phba for a vport that matches the content of the
13796 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13797 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13798 * returns the matching vport pointer or NULL if unable to match frame to a
13799 * vport.
13800 **/
13801static struct lpfc_vport *
13802lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13803 uint16_t fcfi)
13804{
13805 struct lpfc_vport **vports;
13806 struct lpfc_vport *vport = NULL;
13807 int i;
13808 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13809 fc_hdr->fh_d_id[1] << 8 |
13810 fc_hdr->fh_d_id[2]);
939723a4 13811
bf08611b
JS
13812 if (did == Fabric_DID)
13813 return phba->pport;
939723a4
JS
13814 if ((phba->pport->fc_flag & FC_PT2PT) &&
13815 !(phba->link_state == LPFC_HBA_READY))
13816 return phba->pport;
13817
4f774513
JS
13818 vports = lpfc_create_vport_work_array(phba);
13819 if (vports != NULL)
13820 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
13821 if (phba->fcf.fcfi == fcfi &&
13822 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
13823 vports[i]->fc_myDID == did) {
13824 vport = vports[i];
13825 break;
13826 }
13827 }
13828 lpfc_destroy_vport_work_array(phba, vports);
13829 return vport;
13830}
13831
45ed1190
JS
13832/**
13833 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
13834 * @vport: The vport to work on.
13835 *
13836 * This function updates the receive sequence time stamp for this vport. The
13837 * receive sequence time stamp indicates the time that the last frame of the
13838 * the sequence that has been idle for the longest amount of time was received.
13839 * the driver uses this time stamp to indicate if any received sequences have
13840 * timed out.
13841 **/
13842void
13843lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
13844{
13845 struct lpfc_dmabuf *h_buf;
13846 struct hbq_dmabuf *dmabuf = NULL;
13847
13848 /* get the oldest sequence on the rcv list */
13849 h_buf = list_get_first(&vport->rcv_buffer_list,
13850 struct lpfc_dmabuf, list);
13851 if (!h_buf)
13852 return;
13853 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13854 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
13855}
13856
13857/**
13858 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
13859 * @vport: The vport that the received sequences were sent to.
13860 *
13861 * This function cleans up all outstanding received sequences. This is called
13862 * by the driver when a link event or user action invalidates all the received
13863 * sequences.
13864 **/
13865void
13866lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
13867{
13868 struct lpfc_dmabuf *h_buf, *hnext;
13869 struct lpfc_dmabuf *d_buf, *dnext;
13870 struct hbq_dmabuf *dmabuf = NULL;
13871
13872 /* start with the oldest sequence on the rcv list */
13873 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13874 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13875 list_del_init(&dmabuf->hbuf.list);
13876 list_for_each_entry_safe(d_buf, dnext,
13877 &dmabuf->dbuf.list, list) {
13878 list_del_init(&d_buf->list);
13879 lpfc_in_buf_free(vport->phba, d_buf);
13880 }
13881 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13882 }
13883}
13884
13885/**
13886 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
13887 * @vport: The vport that the received sequences were sent to.
13888 *
13889 * This function determines whether any received sequences have timed out by
13890 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
13891 * indicates that there is at least one timed out sequence this routine will
13892 * go through the received sequences one at a time from most inactive to most
13893 * active to determine which ones need to be cleaned up. Once it has determined
13894 * that a sequence needs to be cleaned up it will simply free up the resources
13895 * without sending an abort.
13896 **/
13897void
13898lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
13899{
13900 struct lpfc_dmabuf *h_buf, *hnext;
13901 struct lpfc_dmabuf *d_buf, *dnext;
13902 struct hbq_dmabuf *dmabuf = NULL;
13903 unsigned long timeout;
13904 int abort_count = 0;
13905
13906 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13907 vport->rcv_buffer_time_stamp);
13908 if (list_empty(&vport->rcv_buffer_list) ||
13909 time_before(jiffies, timeout))
13910 return;
13911 /* start with the oldest sequence on the rcv list */
13912 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13913 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13914 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13915 dmabuf->time_stamp);
13916 if (time_before(jiffies, timeout))
13917 break;
13918 abort_count++;
13919 list_del_init(&dmabuf->hbuf.list);
13920 list_for_each_entry_safe(d_buf, dnext,
13921 &dmabuf->dbuf.list, list) {
13922 list_del_init(&d_buf->list);
13923 lpfc_in_buf_free(vport->phba, d_buf);
13924 }
13925 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13926 }
13927 if (abort_count)
13928 lpfc_update_rcv_time_stamp(vport);
13929}
13930
4f774513
JS
13931/**
13932 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
13933 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
13934 *
13935 * This function searches through the existing incomplete sequences that have
13936 * been sent to this @vport. If the frame matches one of the incomplete
13937 * sequences then the dbuf in the @dmabuf is added to the list of frames that
13938 * make up that sequence. If no sequence is found that matches this frame then
13939 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
13940 * This function returns a pointer to the first dmabuf in the sequence list that
13941 * the frame was linked to.
13942 **/
13943static struct hbq_dmabuf *
13944lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
13945{
13946 struct fc_frame_header *new_hdr;
13947 struct fc_frame_header *temp_hdr;
13948 struct lpfc_dmabuf *d_buf;
13949 struct lpfc_dmabuf *h_buf;
13950 struct hbq_dmabuf *seq_dmabuf = NULL;
13951 struct hbq_dmabuf *temp_dmabuf = NULL;
13952
4d9ab994 13953 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 13954 dmabuf->time_stamp = jiffies;
4f774513
JS
13955 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13956 /* Use the hdr_buf to find the sequence that this frame belongs to */
13957 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13958 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13959 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13960 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13961 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13962 continue;
13963 /* found a pending sequence that matches this frame */
13964 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13965 break;
13966 }
13967 if (!seq_dmabuf) {
13968 /*
13969 * This indicates first frame received for this sequence.
13970 * Queue the buffer on the vport's rcv_buffer_list.
13971 */
13972 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 13973 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
13974 return dmabuf;
13975 }
13976 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
13977 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
13978 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
13979 list_del_init(&seq_dmabuf->hbuf.list);
13980 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13981 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 13982 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
13983 return dmabuf;
13984 }
45ed1190
JS
13985 /* move this sequence to the tail to indicate a young sequence */
13986 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
13987 seq_dmabuf->time_stamp = jiffies;
13988 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
13989 if (list_empty(&seq_dmabuf->dbuf.list)) {
13990 temp_hdr = dmabuf->hbuf.virt;
13991 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13992 return seq_dmabuf;
13993 }
4f774513
JS
13994 /* find the correct place in the sequence to insert this frame */
13995 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
13996 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13997 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
13998 /*
13999 * If the frame's sequence count is greater than the frame on
14000 * the list then insert the frame right after this frame
14001 */
eeead811
JS
14002 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14003 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513
JS
14004 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14005 return seq_dmabuf;
14006 }
14007 }
14008 return NULL;
14009}
14010
6669f9bb
JS
14011/**
14012 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14013 * @vport: pointer to a vitural port
14014 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14015 *
14016 * This function tries to abort from the partially assembed sequence, described
14017 * by the information from basic abbort @dmabuf. It checks to see whether such
14018 * partially assembled sequence held by the driver. If so, it shall free up all
14019 * the frames from the partially assembled sequence.
14020 *
14021 * Return
14022 * true -- if there is matching partially assembled sequence present and all
14023 * the frames freed with the sequence;
14024 * false -- if there is no matching partially assembled sequence present so
14025 * nothing got aborted in the lower layer driver
14026 **/
14027static bool
14028lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14029 struct hbq_dmabuf *dmabuf)
14030{
14031 struct fc_frame_header *new_hdr;
14032 struct fc_frame_header *temp_hdr;
14033 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14034 struct hbq_dmabuf *seq_dmabuf = NULL;
14035
14036 /* Use the hdr_buf to find the sequence that matches this frame */
14037 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14038 INIT_LIST_HEAD(&dmabuf->hbuf.list);
14039 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14040 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14041 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14042 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14043 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14044 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14045 continue;
14046 /* found a pending sequence that matches this frame */
14047 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14048 break;
14049 }
14050
14051 /* Free up all the frames from the partially assembled sequence */
14052 if (seq_dmabuf) {
14053 list_for_each_entry_safe(d_buf, n_buf,
14054 &seq_dmabuf->dbuf.list, list) {
14055 list_del_init(&d_buf->list);
14056 lpfc_in_buf_free(vport->phba, d_buf);
14057 }
14058 return true;
14059 }
14060 return false;
14061}
14062
14063/**
546fc854 14064 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
14065 * @phba: Pointer to HBA context object.
14066 * @cmd_iocbq: pointer to the command iocbq structure.
14067 * @rsp_iocbq: pointer to the response iocbq structure.
14068 *
546fc854 14069 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
14070 * event. It properly releases the memory allocated to the sequence abort
14071 * accept iocb.
14072 **/
14073static void
546fc854 14074lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
14075 struct lpfc_iocbq *cmd_iocbq,
14076 struct lpfc_iocbq *rsp_iocbq)
14077{
14078 if (cmd_iocbq)
14079 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6b5151fd
JS
14080
14081 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14082 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14084 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
14085 rsp_iocbq->iocb.ulpStatus,
14086 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
14087}
14088
6d368e53
JS
14089/**
14090 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14091 * @phba: Pointer to HBA context object.
14092 * @xri: xri id in transaction.
14093 *
14094 * This function validates the xri maps to the known range of XRIs allocated an
14095 * used by the driver.
14096 **/
7851fe2c 14097uint16_t
6d368e53
JS
14098lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14099 uint16_t xri)
14100{
14101 int i;
14102
14103 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14104 if (xri == phba->sli4_hba.xri_ids[i])
14105 return i;
14106 }
14107 return NO_XRI;
14108}
14109
6669f9bb 14110/**
546fc854 14111 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
14112 * @phba: Pointer to HBA context object.
14113 * @fc_hdr: pointer to a FC frame header.
14114 *
546fc854 14115 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
14116 * event after aborting the sequence handling.
14117 **/
14118static void
546fc854 14119lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
6669f9bb
JS
14120 struct fc_frame_header *fc_hdr)
14121{
14122 struct lpfc_iocbq *ctiocb = NULL;
14123 struct lpfc_nodelist *ndlp;
ee0f4fe1 14124 uint16_t oxid, rxid, xri, lxri;
5ffc266e 14125 uint32_t sid, fctl;
6669f9bb 14126 IOCB_t *icmd;
546fc854 14127 int rc;
6669f9bb
JS
14128
14129 if (!lpfc_is_link_up(phba))
14130 return;
14131
14132 sid = sli4_sid_from_fc_hdr(fc_hdr);
14133 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 14134 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb
JS
14135
14136 ndlp = lpfc_findnode_did(phba->pport, sid);
14137 if (!ndlp) {
14138 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14139 "1268 Find ndlp returned NULL for oxid:x%x "
14140 "SID:x%x\n", oxid, sid);
14141 return;
14142 }
14143
546fc854 14144 /* Allocate buffer for rsp iocb */
6669f9bb
JS
14145 ctiocb = lpfc_sli_get_iocbq(phba);
14146 if (!ctiocb)
14147 return;
14148
5ffc266e
JS
14149 /* Extract the F_CTL field from FC_HDR */
14150 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14151
6669f9bb 14152 icmd = &ctiocb->iocb;
6669f9bb 14153 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 14154 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
14155 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14156 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14157 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14158
14159 /* Fill in the rest of iocb fields */
14160 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14161 icmd->ulpBdeCount = 0;
14162 icmd->ulpLe = 1;
14163 icmd->ulpClass = CLASS3;
6d368e53 14164 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
be858b65 14165 ctiocb->context1 = ndlp;
6669f9bb 14166
6669f9bb
JS
14167 ctiocb->iocb_cmpl = NULL;
14168 ctiocb->vport = phba->pport;
546fc854 14169 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 14170 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
14171 ctiocb->sli4_xritag = NO_XRI;
14172
ee0f4fe1
JS
14173 if (fctl & FC_FC_EX_CTX)
14174 /* Exchange responder sent the abort so we
14175 * own the oxid.
14176 */
14177 xri = oxid;
14178 else
14179 xri = rxid;
14180 lxri = lpfc_sli4_xri_inrange(phba, xri);
14181 if (lxri != NO_XRI)
14182 lpfc_set_rrq_active(phba, ndlp, lxri,
14183 (xri == oxid) ? rxid : oxid, 0);
546fc854
JS
14184 /* If the oxid maps to the FCP XRI range or if it is out of range,
14185 * send a BLS_RJT. The driver no longer has that exchange.
14186 * Override the IOCB for a BA_RJT.
14187 */
ee0f4fe1 14188 if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
546fc854 14189 phba->sli4_hba.max_cfg_param.xri_base) ||
ee0f4fe1 14190 xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
546fc854
JS
14191 phba->sli4_hba.max_cfg_param.xri_base)) {
14192 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14193 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14194 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14195 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14196 }
6669f9bb 14197
5ffc266e
JS
14198 if (fctl & FC_FC_EX_CTX) {
14199 /* ABTS sent by responder to CT exchange, construction
14200 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14201 * field and RX_ID from ABTS for RX_ID field.
14202 */
546fc854 14203 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
14204 } else {
14205 /* ABTS sent by initiator to CT exchange, construction
14206 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 14207 * XRI_TAG field.
5ffc266e 14208 */
546fc854 14209 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 14210 }
f09c3acc 14211 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 14212 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 14213
546fc854 14214 /* Xmit CT abts response on exchange <xid> */
6669f9bb 14215 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
14216 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14217 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14218
14219 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14220 if (rc == IOCB_ERROR) {
14221 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
14222 "2925 Failed to issue CT ABTS RSP x%x on "
14223 "xri x%x, Data x%x\n",
14224 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14225 phba->link_state);
14226 lpfc_sli_release_iocbq(phba, ctiocb);
14227 }
6669f9bb
JS
14228}
14229
14230/**
14231 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14232 * @vport: Pointer to the vport on which this sequence was received
14233 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14234 *
14235 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14236 * receive sequence is only partially assembed by the driver, it shall abort
14237 * the partially assembled frames for the sequence. Otherwise, if the
14238 * unsolicited receive sequence has been completely assembled and passed to
14239 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14240 * unsolicited sequence has been aborted. After that, it will issue a basic
14241 * accept to accept the abort.
14242 **/
14243void
14244lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14245 struct hbq_dmabuf *dmabuf)
14246{
14247 struct lpfc_hba *phba = vport->phba;
14248 struct fc_frame_header fc_hdr;
5ffc266e 14249 uint32_t fctl;
6669f9bb
JS
14250 bool abts_par;
14251
6669f9bb
JS
14252 /* Make a copy of fc_hdr before the dmabuf being released */
14253 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 14254 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 14255
5ffc266e
JS
14256 if (fctl & FC_FC_EX_CTX) {
14257 /*
14258 * ABTS sent by responder to exchange, just free the buffer
14259 */
6669f9bb 14260 lpfc_in_buf_free(phba, &dmabuf->dbuf);
5ffc266e
JS
14261 } else {
14262 /*
14263 * ABTS sent by initiator to exchange, need to do cleanup
14264 */
14265 /* Try to abort partially assembled seq */
14266 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14267
14268 /* Send abort to ULP if partially seq abort failed */
14269 if (abts_par == false)
14270 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14271 else
14272 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14273 }
6669f9bb 14274 /* Send basic accept (BA_ACC) to the abort requester */
546fc854 14275 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
6669f9bb
JS
14276}
14277
4f774513
JS
14278/**
14279 * lpfc_seq_complete - Indicates if a sequence is complete
14280 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14281 *
14282 * This function checks the sequence, starting with the frame described by
14283 * @dmabuf, to see if all the frames associated with this sequence are present.
14284 * the frames associated with this sequence are linked to the @dmabuf using the
14285 * dbuf list. This function looks for two major things. 1) That the first frame
14286 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14287 * set. 3) That there are no holes in the sequence count. The function will
14288 * return 1 when the sequence is complete, otherwise it will return 0.
14289 **/
14290static int
14291lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14292{
14293 struct fc_frame_header *hdr;
14294 struct lpfc_dmabuf *d_buf;
14295 struct hbq_dmabuf *seq_dmabuf;
14296 uint32_t fctl;
14297 int seq_count = 0;
14298
14299 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14300 /* make sure first fame of sequence has a sequence count of zero */
14301 if (hdr->fh_seq_cnt != seq_count)
14302 return 0;
14303 fctl = (hdr->fh_f_ctl[0] << 16 |
14304 hdr->fh_f_ctl[1] << 8 |
14305 hdr->fh_f_ctl[2]);
14306 /* If last frame of sequence we can return success. */
14307 if (fctl & FC_FC_END_SEQ)
14308 return 1;
14309 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14310 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14311 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14312 /* If there is a hole in the sequence count then fail. */
eeead811 14313 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
14314 return 0;
14315 fctl = (hdr->fh_f_ctl[0] << 16 |
14316 hdr->fh_f_ctl[1] << 8 |
14317 hdr->fh_f_ctl[2]);
14318 /* If last frame of sequence we can return success. */
14319 if (fctl & FC_FC_END_SEQ)
14320 return 1;
14321 }
14322 return 0;
14323}
14324
14325/**
14326 * lpfc_prep_seq - Prep sequence for ULP processing
14327 * @vport: Pointer to the vport on which this sequence was received
14328 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14329 *
14330 * This function takes a sequence, described by a list of frames, and creates
14331 * a list of iocbq structures to describe the sequence. This iocbq list will be
14332 * used to issue to the generic unsolicited sequence handler. This routine
14333 * returns a pointer to the first iocbq in the list. If the function is unable
14334 * to allocate an iocbq then it throw out the received frames that were not
14335 * able to be described and return a pointer to the first iocbq. If unable to
14336 * allocate any iocbqs (including the first) this function will return NULL.
14337 **/
14338static struct lpfc_iocbq *
14339lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14340{
7851fe2c 14341 struct hbq_dmabuf *hbq_buf;
4f774513
JS
14342 struct lpfc_dmabuf *d_buf, *n_buf;
14343 struct lpfc_iocbq *first_iocbq, *iocbq;
14344 struct fc_frame_header *fc_hdr;
14345 uint32_t sid;
7851fe2c 14346 uint32_t len, tot_len;
eeead811 14347 struct ulp_bde64 *pbde;
4f774513
JS
14348
14349 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14350 /* remove from receive buffer list */
14351 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 14352 lpfc_update_rcv_time_stamp(vport);
4f774513 14353 /* get the Remote Port's SID */
6669f9bb 14354 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 14355 tot_len = 0;
4f774513
JS
14356 /* Get an iocbq struct to fill in. */
14357 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14358 if (first_iocbq) {
14359 /* Initialize the first IOCB. */
8fa38513 14360 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 14361 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
939723a4
JS
14362
14363 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14364 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14365 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14366 first_iocbq->iocb.un.rcvels.parmRo =
14367 sli4_did_from_fc_hdr(fc_hdr);
14368 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14369 } else
14370 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
14371 first_iocbq->iocb.ulpContext = NO_XRI;
14372 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14373 be16_to_cpu(fc_hdr->fh_ox_id);
14374 /* iocbq is prepped for internal consumption. Physical vpi. */
14375 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14376 vport->phba->vpi_ids[vport->vpi];
4f774513
JS
14377 /* put the first buffer into the first IOCBq */
14378 first_iocbq->context2 = &seq_dmabuf->dbuf;
14379 first_iocbq->context3 = NULL;
14380 first_iocbq->iocb.ulpBdeCount = 1;
14381 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14382 LPFC_DATA_BUF_SIZE;
14383 first_iocbq->iocb.un.rcvels.remoteID = sid;
7851fe2c 14384 tot_len = bf_get(lpfc_rcqe_length,
4d9ab994 14385 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7851fe2c 14386 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
14387 }
14388 iocbq = first_iocbq;
14389 /*
14390 * Each IOCBq can have two Buffers assigned, so go through the list
14391 * of buffers for this sequence and save two buffers in each IOCBq
14392 */
14393 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14394 if (!iocbq) {
14395 lpfc_in_buf_free(vport->phba, d_buf);
14396 continue;
14397 }
14398 if (!iocbq->context3) {
14399 iocbq->context3 = d_buf;
14400 iocbq->iocb.ulpBdeCount++;
eeead811
JS
14401 pbde = (struct ulp_bde64 *)
14402 &iocbq->iocb.unsli3.sli3Words[4];
14403 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14404
14405 /* We need to get the size out of the right CQE */
14406 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14407 len = bf_get(lpfc_rcqe_length,
14408 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14409 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14410 tot_len += len;
4f774513
JS
14411 } else {
14412 iocbq = lpfc_sli_get_iocbq(vport->phba);
14413 if (!iocbq) {
14414 if (first_iocbq) {
14415 first_iocbq->iocb.ulpStatus =
14416 IOSTAT_FCP_RSP_ERROR;
14417 first_iocbq->iocb.un.ulpWord[4] =
14418 IOERR_NO_RESOURCES;
14419 }
14420 lpfc_in_buf_free(vport->phba, d_buf);
14421 continue;
14422 }
14423 iocbq->context2 = d_buf;
14424 iocbq->context3 = NULL;
14425 iocbq->iocb.ulpBdeCount = 1;
14426 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14427 LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14428
14429 /* We need to get the size out of the right CQE */
14430 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14431 len = bf_get(lpfc_rcqe_length,
14432 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14433 tot_len += len;
14434 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14435
4f774513
JS
14436 iocbq->iocb.un.rcvels.remoteID = sid;
14437 list_add_tail(&iocbq->list, &first_iocbq->list);
14438 }
14439 }
14440 return first_iocbq;
14441}
14442
6669f9bb
JS
14443static void
14444lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14445 struct hbq_dmabuf *seq_dmabuf)
14446{
14447 struct fc_frame_header *fc_hdr;
14448 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14449 struct lpfc_hba *phba = vport->phba;
14450
14451 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14452 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14453 if (!iocbq) {
14454 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14455 "2707 Ring %d handler: Failed to allocate "
14456 "iocb Rctl x%x Type x%x received\n",
14457 LPFC_ELS_RING,
14458 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14459 return;
14460 }
14461 if (!lpfc_complete_unsol_iocb(phba,
14462 &phba->sli.ring[LPFC_ELS_RING],
14463 iocbq, fc_hdr->fh_r_ctl,
14464 fc_hdr->fh_type))
6d368e53 14465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
14466 "2540 Ring %d handler: unexpected Rctl "
14467 "x%x Type x%x received\n",
14468 LPFC_ELS_RING,
14469 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14470
14471 /* Free iocb created in lpfc_prep_seq */
14472 list_for_each_entry_safe(curr_iocb, next_iocb,
14473 &iocbq->list, list) {
14474 list_del_init(&curr_iocb->list);
14475 lpfc_sli_release_iocbq(phba, curr_iocb);
14476 }
14477 lpfc_sli_release_iocbq(phba, iocbq);
14478}
14479
4f774513
JS
14480/**
14481 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14482 * @phba: Pointer to HBA context object.
14483 *
14484 * This function is called with no lock held. This function processes all
14485 * the received buffers and gives it to upper layers when a received buffer
14486 * indicates that it is the final frame in the sequence. The interrupt
14487 * service routine processes received buffers at interrupt contexts and adds
14488 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14489 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14490 * appropriate receive function when the final frame in a sequence is received.
14491 **/
4d9ab994
JS
14492void
14493lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14494 struct hbq_dmabuf *dmabuf)
4f774513 14495{
4d9ab994 14496 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
14497 struct fc_frame_header *fc_hdr;
14498 struct lpfc_vport *vport;
14499 uint32_t fcfi;
939723a4 14500 uint32_t did;
4f774513 14501
4f774513 14502 /* Process each received buffer */
4d9ab994
JS
14503 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14504 /* check to see if this a valid type of frame */
14505 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14506 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14507 return;
14508 }
7851fe2c
JS
14509 if ((bf_get(lpfc_cqe_code,
14510 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14511 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14512 &dmabuf->cq_event.cqe.rcqe_cmpl);
14513 else
14514 fcfi = bf_get(lpfc_rcqe_fcf_id,
14515 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 14516
4d9ab994 14517 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
939723a4 14518 if (!vport) {
4d9ab994
JS
14519 /* throw out the frame */
14520 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14521 return;
14522 }
939723a4
JS
14523
14524 /* d_id this frame is directed to */
14525 did = sli4_did_from_fc_hdr(fc_hdr);
14526
14527 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14528 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14529 (did != Fabric_DID)) {
14530 /*
14531 * Throw out the frame if we are not pt2pt.
14532 * The pt2pt protocol allows for discovery frames
14533 * to be received without a registered VPI.
14534 */
14535 if (!(vport->fc_flag & FC_PT2PT) ||
14536 (phba->link_state == LPFC_HBA_READY)) {
14537 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14538 return;
14539 }
14540 }
14541
6669f9bb
JS
14542 /* Handle the basic abort sequence (BA_ABTS) event */
14543 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14544 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14545 return;
14546 }
14547
4d9ab994
JS
14548 /* Link this frame */
14549 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14550 if (!seq_dmabuf) {
14551 /* unable to add frame to vport - throw it out */
14552 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14553 return;
14554 }
14555 /* If not last frame in sequence continue processing frames. */
def9c7a9 14556 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 14557 return;
def9c7a9 14558
6669f9bb
JS
14559 /* Send the complete sequence to the upper layer protocol */
14560 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 14561}
6fb120a7
JS
14562
14563/**
14564 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14565 * @phba: pointer to lpfc hba data structure.
14566 *
14567 * This routine is invoked to post rpi header templates to the
14568 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14569 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14570 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14571 *
14572 * This routine does not require any locks. It's usage is expected
14573 * to be driver load or reset recovery when the driver is
14574 * sequential.
14575 *
14576 * Return codes
af901ca1 14577 * 0 - successful
d439d286 14578 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14579 * When this error occurs, the driver is not guaranteed
14580 * to have any rpi regions posted to the device and
14581 * must either attempt to repost the regions or take a
14582 * fatal error.
14583 **/
14584int
14585lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14586{
14587 struct lpfc_rpi_hdr *rpi_page;
14588 uint32_t rc = 0;
6d368e53
JS
14589 uint16_t lrpi = 0;
14590
14591 /* SLI4 ports that support extents do not require RPI headers. */
14592 if (!phba->sli4_hba.rpi_hdrs_in_use)
14593 goto exit;
14594 if (phba->sli4_hba.extents_in_use)
14595 return -EIO;
6fb120a7 14596
6fb120a7 14597 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
14598 /*
14599 * Assign the rpi headers a physical rpi only if the driver
14600 * has not initialized those resources. A port reset only
14601 * needs the headers posted.
14602 */
14603 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14604 LPFC_RPI_RSRC_RDY)
14605 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14606
6fb120a7
JS
14607 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14608 if (rc != MBX_SUCCESS) {
14609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14610 "2008 Error %d posting all rpi "
14611 "headers\n", rc);
14612 rc = -EIO;
14613 break;
14614 }
14615 }
14616
6d368e53
JS
14617 exit:
14618 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14619 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
14620 return rc;
14621}
14622
14623/**
14624 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14625 * @phba: pointer to lpfc hba data structure.
14626 * @rpi_page: pointer to the rpi memory region.
14627 *
14628 * This routine is invoked to post a single rpi header to the
14629 * HBA consistent with the SLI-4 interface spec. This memory region
14630 * maps up to 64 rpi context regions.
14631 *
14632 * Return codes
af901ca1 14633 * 0 - successful
d439d286
JS
14634 * -ENOMEM - No available memory
14635 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14636 **/
14637int
14638lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14639{
14640 LPFC_MBOXQ_t *mboxq;
14641 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14642 uint32_t rc = 0;
6fb120a7
JS
14643 uint32_t shdr_status, shdr_add_status;
14644 union lpfc_sli4_cfg_shdr *shdr;
14645
6d368e53
JS
14646 /* SLI4 ports that support extents do not require RPI headers. */
14647 if (!phba->sli4_hba.rpi_hdrs_in_use)
14648 return rc;
14649 if (phba->sli4_hba.extents_in_use)
14650 return -EIO;
14651
6fb120a7
JS
14652 /* The port is notified of the header region via a mailbox command. */
14653 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14654 if (!mboxq) {
14655 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14656 "2001 Unable to allocate memory for issuing "
14657 "SLI_CONFIG_SPECIAL mailbox command\n");
14658 return -ENOMEM;
14659 }
14660
14661 /* Post all rpi memory regions to the port. */
14662 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
14663 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14664 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14665 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
14666 sizeof(struct lpfc_sli4_cfg_mhdr),
14667 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
14668
14669
14670 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
14671 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14672 rpi_page->start_rpi);
6d368e53
JS
14673 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14674 hdr_tmpl, rpi_page->page_count);
14675
6fb120a7
JS
14676 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14677 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 14678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
14679 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14680 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14681 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14682 if (rc != MBX_TIMEOUT)
14683 mempool_free(mboxq, phba->mbox_mem_pool);
14684 if (shdr_status || shdr_add_status || rc) {
14685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14686 "2514 POST_RPI_HDR mailbox failed with "
14687 "status x%x add_status x%x, mbx status x%x\n",
14688 shdr_status, shdr_add_status, rc);
14689 rc = -ENXIO;
14690 }
14691 return rc;
14692}
14693
14694/**
14695 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14696 * @phba: pointer to lpfc hba data structure.
14697 *
14698 * This routine is invoked to post rpi header templates to the
14699 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14700 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14701 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14702 *
14703 * Returns
af901ca1 14704 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
14705 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14706 **/
14707int
14708lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14709{
6d368e53
JS
14710 unsigned long rpi;
14711 uint16_t max_rpi, rpi_limit;
14712 uint16_t rpi_remaining, lrpi = 0;
6fb120a7
JS
14713 struct lpfc_rpi_hdr *rpi_hdr;
14714
14715 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6fb120a7
JS
14716 rpi_limit = phba->sli4_hba.next_rpi;
14717
14718 /*
6d368e53
JS
14719 * Fetch the next logical rpi. Because this index is logical,
14720 * the driver starts at 0 each time.
6fb120a7
JS
14721 */
14722 spin_lock_irq(&phba->hbalock);
6d368e53
JS
14723 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14724 if (rpi >= rpi_limit)
6fb120a7
JS
14725 rpi = LPFC_RPI_ALLOC_ERROR;
14726 else {
14727 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14728 phba->sli4_hba.max_cfg_param.rpi_used++;
14729 phba->sli4_hba.rpi_count++;
14730 }
14731
14732 /*
14733 * Don't try to allocate more rpi header regions if the device limit
6d368e53 14734 * has been exhausted.
6fb120a7
JS
14735 */
14736 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14737 (phba->sli4_hba.rpi_count >= max_rpi)) {
14738 spin_unlock_irq(&phba->hbalock);
14739 return rpi;
14740 }
14741
6d368e53
JS
14742 /*
14743 * RPI header postings are not required for SLI4 ports capable of
14744 * extents.
14745 */
14746 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14747 spin_unlock_irq(&phba->hbalock);
14748 return rpi;
14749 }
14750
6fb120a7
JS
14751 /*
14752 * If the driver is running low on rpi resources, allocate another
14753 * page now. Note that the next_rpi value is used because
14754 * it represents how many are actually in use whereas max_rpi notes
14755 * how many are supported max by the device.
14756 */
6d368e53 14757 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
6fb120a7
JS
14758 spin_unlock_irq(&phba->hbalock);
14759 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
14760 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
14761 if (!rpi_hdr) {
14762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14763 "2002 Error Could not grow rpi "
14764 "count\n");
14765 } else {
6d368e53
JS
14766 lrpi = rpi_hdr->start_rpi;
14767 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
14768 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
14769 }
14770 }
14771
14772 return rpi;
14773}
14774
d7c47992
JS
14775/**
14776 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14777 * @phba: pointer to lpfc hba data structure.
14778 *
14779 * This routine is invoked to release an rpi to the pool of
14780 * available rpis maintained by the driver.
14781 **/
14782void
14783__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14784{
14785 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
14786 phba->sli4_hba.rpi_count--;
14787 phba->sli4_hba.max_cfg_param.rpi_used--;
14788 }
14789}
14790
6fb120a7
JS
14791/**
14792 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14793 * @phba: pointer to lpfc hba data structure.
14794 *
14795 * This routine is invoked to release an rpi to the pool of
14796 * available rpis maintained by the driver.
14797 **/
14798void
14799lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14800{
14801 spin_lock_irq(&phba->hbalock);
d7c47992 14802 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
14803 spin_unlock_irq(&phba->hbalock);
14804}
14805
14806/**
14807 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
14808 * @phba: pointer to lpfc hba data structure.
14809 *
14810 * This routine is invoked to remove the memory region that
14811 * provided rpi via a bitmask.
14812 **/
14813void
14814lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14815{
14816 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
14817 kfree(phba->sli4_hba.rpi_ids);
14818 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
14819}
14820
14821/**
14822 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
14823 * @phba: pointer to lpfc hba data structure.
14824 *
14825 * This routine is invoked to remove the memory region that
14826 * provided rpi via a bitmask.
14827 **/
14828int
6b5151fd
JS
14829lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
14830 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
14831{
14832 LPFC_MBOXQ_t *mboxq;
14833 struct lpfc_hba *phba = ndlp->phba;
14834 int rc;
14835
14836 /* The port is notified of the header region via a mailbox command. */
14837 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14838 if (!mboxq)
14839 return -ENOMEM;
14840
14841 /* Post all rpi memory regions to the port. */
14842 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
14843 if (cmpl) {
14844 mboxq->mbox_cmpl = cmpl;
14845 mboxq->context1 = arg;
14846 mboxq->context2 = ndlp;
72859909
JS
14847 } else
14848 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 14849 mboxq->vport = ndlp->vport;
6fb120a7
JS
14850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14851 if (rc == MBX_NOT_FINISHED) {
14852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14853 "2010 Resume RPI Mailbox failed "
14854 "status %d, mbxStatus x%x\n", rc,
14855 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14856 mempool_free(mboxq, phba->mbox_mem_pool);
14857 return -EIO;
14858 }
14859 return 0;
14860}
14861
14862/**
14863 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 14864 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 14865 *
76a95d75 14866 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
14867 *
14868 * Returns:
14869 * 0 success
14870 * -Evalue otherwise
14871 **/
14872int
76a95d75 14873lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
14874{
14875 LPFC_MBOXQ_t *mboxq;
14876 int rc = 0;
6a9c52cf 14877 int retval = MBX_SUCCESS;
6fb120a7 14878 uint32_t mbox_tmo;
76a95d75 14879 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
14880 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14881 if (!mboxq)
14882 return -ENOMEM;
76a95d75 14883 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 14884 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 14885 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 14886 if (rc != MBX_SUCCESS) {
76a95d75 14887 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
14888 "2022 INIT VPI Mailbox failed "
14889 "status %d, mbxStatus x%x\n", rc,
14890 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 14891 retval = -EIO;
6fb120a7 14892 }
6a9c52cf 14893 if (rc != MBX_TIMEOUT)
76a95d75 14894 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
14895
14896 return retval;
6fb120a7
JS
14897}
14898
14899/**
14900 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
14901 * @phba: pointer to lpfc hba data structure.
14902 * @mboxq: Pointer to mailbox object.
14903 *
14904 * This routine is invoked to manually add a single FCF record. The caller
14905 * must pass a completely initialized FCF_Record. This routine takes
14906 * care of the nonembedded mailbox operations.
14907 **/
14908static void
14909lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
14910{
14911 void *virt_addr;
14912 union lpfc_sli4_cfg_shdr *shdr;
14913 uint32_t shdr_status, shdr_add_status;
14914
14915 virt_addr = mboxq->sge_array->addr[0];
14916 /* The IOCTL status is embedded in the mailbox subheader. */
14917 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
14918 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14919 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14920
14921 if ((shdr_status || shdr_add_status) &&
14922 (shdr_status != STATUS_FCF_IN_USE))
14923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14924 "2558 ADD_FCF_RECORD mailbox failed with "
14925 "status x%x add_status x%x\n",
14926 shdr_status, shdr_add_status);
14927
14928 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14929}
14930
14931/**
14932 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
14933 * @phba: pointer to lpfc hba data structure.
14934 * @fcf_record: pointer to the initialized fcf record to add.
14935 *
14936 * This routine is invoked to manually add a single FCF record. The caller
14937 * must pass a completely initialized FCF_Record. This routine takes
14938 * care of the nonembedded mailbox operations.
14939 **/
14940int
14941lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
14942{
14943 int rc = 0;
14944 LPFC_MBOXQ_t *mboxq;
14945 uint8_t *bytep;
14946 void *virt_addr;
14947 dma_addr_t phys_addr;
14948 struct lpfc_mbx_sge sge;
14949 uint32_t alloc_len, req_len;
14950 uint32_t fcfindex;
14951
14952 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14953 if (!mboxq) {
14954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14955 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
14956 return -ENOMEM;
14957 }
14958
14959 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
14960 sizeof(uint32_t);
14961
14962 /* Allocate DMA memory and set up the non-embedded mailbox command */
14963 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14964 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
14965 req_len, LPFC_SLI4_MBX_NEMBED);
14966 if (alloc_len < req_len) {
14967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14968 "2523 Allocated DMA memory size (x%x) is "
14969 "less than the requested DMA memory "
14970 "size (x%x)\n", alloc_len, req_len);
14971 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14972 return -ENOMEM;
14973 }
14974
14975 /*
14976 * Get the first SGE entry from the non-embedded DMA memory. This
14977 * routine only uses a single SGE.
14978 */
14979 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
14980 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
14981 virt_addr = mboxq->sge_array->addr[0];
14982 /*
14983 * Configure the FCF record for FCFI 0. This is the driver's
14984 * hardcoded default and gets used in nonFIP mode.
14985 */
14986 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
14987 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
14988 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
14989
14990 /*
14991 * Copy the fcf_index and the FCF Record Data. The data starts after
14992 * the FCoE header plus word10. The data copy needs to be endian
14993 * correct.
14994 */
14995 bytep += sizeof(uint32_t);
14996 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
14997 mboxq->vport = phba->pport;
14998 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
14999 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15000 if (rc == MBX_NOT_FINISHED) {
15001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15002 "2515 ADD_FCF_RECORD mailbox failed with "
15003 "status 0x%x\n", rc);
15004 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15005 rc = -EIO;
15006 } else
15007 rc = 0;
15008
15009 return rc;
15010}
15011
15012/**
15013 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15014 * @phba: pointer to lpfc hba data structure.
15015 * @fcf_record: pointer to the fcf record to write the default data.
15016 * @fcf_index: FCF table entry index.
15017 *
15018 * This routine is invoked to build the driver's default FCF record. The
15019 * values used are hardcoded. This routine handles memory initialization.
15020 *
15021 **/
15022void
15023lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15024 struct fcf_record *fcf_record,
15025 uint16_t fcf_index)
15026{
15027 memset(fcf_record, 0, sizeof(struct fcf_record));
15028 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15029 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15030 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15031 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15032 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15033 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15034 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15035 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15036 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15037 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15038 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15039 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15040 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 15041 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
15042 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15043 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15044 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15045 /* Set the VLAN bit map */
15046 if (phba->valid_vlan) {
15047 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15048 = 1 << (phba->vlan_id % 8);
15049 }
15050}
15051
15052/**
0c9ab6f5 15053 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
15054 * @phba: pointer to lpfc hba data structure.
15055 * @fcf_index: FCF table entry offset.
15056 *
0c9ab6f5
JS
15057 * This routine is invoked to scan the entire FCF table by reading FCF
15058 * record and processing it one at a time starting from the @fcf_index
15059 * for initial FCF discovery or fast FCF failover rediscovery.
15060 *
25985edc 15061 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 15062 * otherwise.
6fb120a7
JS
15063 **/
15064int
0c9ab6f5 15065lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
15066{
15067 int rc = 0, error;
15068 LPFC_MBOXQ_t *mboxq;
6fb120a7 15069
32b9793f 15070 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 15071 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
15072 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15073 if (!mboxq) {
15074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15075 "2000 Failed to allocate mbox for "
15076 "READ_FCF cmd\n");
4d9ab994 15077 error = -ENOMEM;
0c9ab6f5 15078 goto fail_fcf_scan;
6fb120a7 15079 }
ecfd03c6 15080 /* Construct the read FCF record mailbox command */
0c9ab6f5 15081 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
15082 if (rc) {
15083 error = -EINVAL;
0c9ab6f5 15084 goto fail_fcf_scan;
6fb120a7 15085 }
ecfd03c6 15086 /* Issue the mailbox command asynchronously */
6fb120a7 15087 mboxq->vport = phba->pport;
0c9ab6f5 15088 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
15089
15090 spin_lock_irq(&phba->hbalock);
15091 phba->hba_flag |= FCF_TS_INPROG;
15092 spin_unlock_irq(&phba->hbalock);
15093
6fb120a7 15094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 15095 if (rc == MBX_NOT_FINISHED)
6fb120a7 15096 error = -EIO;
ecfd03c6 15097 else {
38b92ef8
JS
15098 /* Reset eligible FCF count for new scan */
15099 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 15100 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 15101 error = 0;
32b9793f 15102 }
0c9ab6f5 15103fail_fcf_scan:
4d9ab994
JS
15104 if (error) {
15105 if (mboxq)
15106 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 15107 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 15108 spin_lock_irq(&phba->hbalock);
a93ff37a 15109 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
15110 spin_unlock_irq(&phba->hbalock);
15111 }
6fb120a7
JS
15112 return error;
15113}
a0c87cbd 15114
0c9ab6f5 15115/**
a93ff37a 15116 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
15117 * @phba: pointer to lpfc hba data structure.
15118 * @fcf_index: FCF table entry offset.
15119 *
15120 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 15121 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 15122 *
25985edc 15123 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15124 * otherwise.
15125 **/
15126int
15127lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15128{
15129 int rc = 0, error;
15130 LPFC_MBOXQ_t *mboxq;
15131
15132 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15133 if (!mboxq) {
15134 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15135 "2763 Failed to allocate mbox for "
15136 "READ_FCF cmd\n");
15137 error = -ENOMEM;
15138 goto fail_fcf_read;
15139 }
15140 /* Construct the read FCF record mailbox command */
15141 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15142 if (rc) {
15143 error = -EINVAL;
15144 goto fail_fcf_read;
15145 }
15146 /* Issue the mailbox command asynchronously */
15147 mboxq->vport = phba->pport;
15148 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15149 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15150 if (rc == MBX_NOT_FINISHED)
15151 error = -EIO;
15152 else
15153 error = 0;
15154
15155fail_fcf_read:
15156 if (error && mboxq)
15157 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15158 return error;
15159}
15160
15161/**
15162 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15163 * @phba: pointer to lpfc hba data structure.
15164 * @fcf_index: FCF table entry offset.
15165 *
15166 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 15167 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 15168 *
25985edc 15169 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15170 * otherwise.
15171 **/
15172int
15173lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15174{
15175 int rc = 0, error;
15176 LPFC_MBOXQ_t *mboxq;
15177
15178 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15179 if (!mboxq) {
15180 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15181 "2758 Failed to allocate mbox for "
15182 "READ_FCF cmd\n");
15183 error = -ENOMEM;
15184 goto fail_fcf_read;
15185 }
15186 /* Construct the read FCF record mailbox command */
15187 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15188 if (rc) {
15189 error = -EINVAL;
15190 goto fail_fcf_read;
15191 }
15192 /* Issue the mailbox command asynchronously */
15193 mboxq->vport = phba->pport;
15194 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15195 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15196 if (rc == MBX_NOT_FINISHED)
15197 error = -EIO;
15198 else
15199 error = 0;
15200
15201fail_fcf_read:
15202 if (error && mboxq)
15203 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15204 return error;
15205}
15206
7d791df7
JS
15207/**
15208 * lpfc_check_next_fcf_pri
15209 * phba pointer to the lpfc_hba struct for this port.
15210 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15211 * routine when the rr_bmask is empty. The FCF indecies are put into the
15212 * rr_bmask based on their priority level. Starting from the highest priority
15213 * to the lowest. The most likely FCF candidate will be in the highest
15214 * priority group. When this routine is called it searches the fcf_pri list for
15215 * next lowest priority group and repopulates the rr_bmask with only those
15216 * fcf_indexes.
15217 * returns:
15218 * 1=success 0=failure
15219 **/
15220int
15221lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15222{
15223 uint16_t next_fcf_pri;
15224 uint16_t last_index;
15225 struct lpfc_fcf_pri *fcf_pri;
15226 int rc;
15227 int ret = 0;
15228
15229 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15230 LPFC_SLI4_FCF_TBL_INDX_MAX);
15231 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15232 "3060 Last IDX %d\n", last_index);
15233 if (list_empty(&phba->fcf.fcf_pri_list)) {
15234 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15235 "3061 Last IDX %d\n", last_index);
15236 return 0; /* Empty rr list */
15237 }
15238 next_fcf_pri = 0;
15239 /*
15240 * Clear the rr_bmask and set all of the bits that are at this
15241 * priority.
15242 */
15243 memset(phba->fcf.fcf_rr_bmask, 0,
15244 sizeof(*phba->fcf.fcf_rr_bmask));
15245 spin_lock_irq(&phba->hbalock);
15246 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15247 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15248 continue;
15249 /*
15250 * the 1st priority that has not FLOGI failed
15251 * will be the highest.
15252 */
15253 if (!next_fcf_pri)
15254 next_fcf_pri = fcf_pri->fcf_rec.priority;
15255 spin_unlock_irq(&phba->hbalock);
15256 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15257 rc = lpfc_sli4_fcf_rr_index_set(phba,
15258 fcf_pri->fcf_rec.fcf_index);
15259 if (rc)
15260 return 0;
15261 }
15262 spin_lock_irq(&phba->hbalock);
15263 }
15264 /*
15265 * if next_fcf_pri was not set above and the list is not empty then
15266 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 15267 * and start at the beginning.
7d791df7
JS
15268 */
15269 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15270 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15271 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15272 /*
15273 * the 1st priority that has not FLOGI failed
15274 * will be the highest.
15275 */
15276 if (!next_fcf_pri)
15277 next_fcf_pri = fcf_pri->fcf_rec.priority;
15278 spin_unlock_irq(&phba->hbalock);
15279 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15280 rc = lpfc_sli4_fcf_rr_index_set(phba,
15281 fcf_pri->fcf_rec.fcf_index);
15282 if (rc)
15283 return 0;
15284 }
15285 spin_lock_irq(&phba->hbalock);
15286 }
15287 } else
15288 ret = 1;
15289 spin_unlock_irq(&phba->hbalock);
15290
15291 return ret;
15292}
0c9ab6f5
JS
15293/**
15294 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15295 * @phba: pointer to lpfc hba data structure.
15296 *
15297 * This routine is to get the next eligible FCF record index in a round
15298 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 15299 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
15300 * shall be returned, otherwise, the next eligible FCF record's index
15301 * shall be returned.
15302 **/
15303uint16_t
15304lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15305{
15306 uint16_t next_fcf_index;
15307
3804dc84 15308 /* Search start from next bit of currently registered FCF index */
7d791df7 15309next_priority:
3804dc84
JS
15310 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
15311 LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
15312 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15313 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
15314 next_fcf_index);
15315
0c9ab6f5 15316 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
15317 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15318 /*
15319 * If we have wrapped then we need to clear the bits that
15320 * have been tested so that we can detect when we should
15321 * change the priority level.
15322 */
0c9ab6f5
JS
15323 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15324 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
15325 }
15326
3804dc84
JS
15327
15328 /* Check roundrobin failover list empty condition */
7d791df7
JS
15329 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15330 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15331 /*
15332 * If next fcf index is not found check if there are lower
15333 * Priority level fcf's in the fcf_priority list.
15334 * Set up the rr_bmask with all of the avaiable fcf bits
15335 * at that level and continue the selection process.
15336 */
15337 if (lpfc_check_next_fcf_pri_level(phba))
15338 goto next_priority;
3804dc84
JS
15339 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15340 "2844 No roundrobin failover FCF available\n");
7d791df7
JS
15341 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15342 return LPFC_FCOE_FCF_NEXT_NONE;
15343 else {
15344 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15345 "3063 Only FCF available idx %d, flag %x\n",
15346 next_fcf_index,
15347 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15348 return next_fcf_index;
15349 }
3804dc84
JS
15350 }
15351
7d791df7
JS
15352 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15353 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15354 LPFC_FCF_FLOGI_FAILED)
15355 goto next_priority;
15356
3804dc84 15357 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
15358 "2845 Get next roundrobin failover FCF (x%x)\n",
15359 next_fcf_index);
15360
0c9ab6f5
JS
15361 return next_fcf_index;
15362}
15363
15364/**
15365 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15366 * @phba: pointer to lpfc hba data structure.
15367 *
15368 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 15369 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15370 * does not go beyond the range of the driver allocated bmask dimension
15371 * before setting the bit.
15372 *
15373 * Returns 0 if the index bit successfully set, otherwise, it returns
15374 * -EINVAL.
15375 **/
15376int
15377lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15378{
15379 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15381 "2610 FCF (x%x) reached driver's book "
15382 "keeping dimension:x%x\n",
0c9ab6f5
JS
15383 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15384 return -EINVAL;
15385 }
15386 /* Set the eligible FCF record index bmask */
15387 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15388
3804dc84 15389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15390 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
15391 "bmask\n", fcf_index);
15392
0c9ab6f5
JS
15393 return 0;
15394}
15395
15396/**
3804dc84 15397 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
15398 * @phba: pointer to lpfc hba data structure.
15399 *
15400 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 15401 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15402 * does not go beyond the range of the driver allocated bmask dimension
15403 * before clearing the bit.
15404 **/
15405void
15406lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15407{
7d791df7 15408 struct lpfc_fcf_pri *fcf_pri;
0c9ab6f5
JS
15409 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15410 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15411 "2762 FCF (x%x) reached driver's book "
15412 "keeping dimension:x%x\n",
0c9ab6f5
JS
15413 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15414 return;
15415 }
15416 /* Clear the eligible FCF record index bmask */
7d791df7
JS
15417 spin_lock_irq(&phba->hbalock);
15418 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15419 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15420 list_del_init(&fcf_pri->list);
15421 break;
15422 }
15423 }
15424 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 15425 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
15426
15427 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15428 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 15429 "bmask\n", fcf_index);
0c9ab6f5
JS
15430}
15431
ecfd03c6
JS
15432/**
15433 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15434 * @phba: pointer to lpfc hba data structure.
15435 *
15436 * This routine is the completion routine for the rediscover FCF table mailbox
15437 * command. If the mailbox command returned failure, it will try to stop the
15438 * FCF rediscover wait timer.
15439 **/
15440void
15441lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15442{
15443 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15444 uint32_t shdr_status, shdr_add_status;
15445
15446 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15447
15448 shdr_status = bf_get(lpfc_mbox_hdr_status,
15449 &redisc_fcf->header.cfg_shdr.response);
15450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15451 &redisc_fcf->header.cfg_shdr.response);
15452 if (shdr_status || shdr_add_status) {
0c9ab6f5 15453 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
15454 "2746 Requesting for FCF rediscovery failed "
15455 "status x%x add_status x%x\n",
15456 shdr_status, shdr_add_status);
0c9ab6f5 15457 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 15458 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15459 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
15460 spin_unlock_irq(&phba->hbalock);
15461 /*
15462 * CVL event triggered FCF rediscover request failed,
15463 * last resort to re-try current registered FCF entry.
15464 */
15465 lpfc_retry_pport_discovery(phba);
15466 } else {
15467 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15468 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
15469 spin_unlock_irq(&phba->hbalock);
15470 /*
15471 * DEAD FCF event triggered FCF rediscover request
15472 * failed, last resort to fail over as a link down
15473 * to FCF registration.
15474 */
15475 lpfc_sli4_fcf_dead_failthrough(phba);
15476 }
0c9ab6f5
JS
15477 } else {
15478 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15479 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
15480 /*
15481 * Start FCF rediscovery wait timer for pending FCF
15482 * before rescan FCF record table.
15483 */
15484 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 15485 }
ecfd03c6
JS
15486
15487 mempool_free(mbox, phba->mbox_mem_pool);
15488}
15489
15490/**
3804dc84 15491 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
15492 * @phba: pointer to lpfc hba data structure.
15493 *
15494 * This routine is invoked to request for rediscovery of the entire FCF table
15495 * by the port.
15496 **/
15497int
15498lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15499{
15500 LPFC_MBOXQ_t *mbox;
15501 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15502 int rc, length;
15503
0c9ab6f5
JS
15504 /* Cancel retry delay timers to all vports before FCF rediscover */
15505 lpfc_cancel_all_vport_retry_delay_timer(phba);
15506
ecfd03c6
JS
15507 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15508 if (!mbox) {
15509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15510 "2745 Failed to allocate mbox for "
15511 "requesting FCF rediscover.\n");
15512 return -ENOMEM;
15513 }
15514
15515 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15516 sizeof(struct lpfc_sli4_cfg_mhdr));
15517 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15518 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15519 length, LPFC_SLI4_MBX_EMBED);
15520
15521 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15522 /* Set count to 0 for invalidating the entire FCF database */
15523 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15524
15525 /* Issue the mailbox command asynchronously */
15526 mbox->vport = phba->pport;
15527 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15528 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15529
15530 if (rc == MBX_NOT_FINISHED) {
15531 mempool_free(mbox, phba->mbox_mem_pool);
15532 return -EIO;
15533 }
15534 return 0;
15535}
15536
fc2b989b
JS
15537/**
15538 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15539 * @phba: pointer to lpfc hba data structure.
15540 *
15541 * This function is the failover routine as a last resort to the FCF DEAD
15542 * event when driver failed to perform fast FCF failover.
15543 **/
15544void
15545lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15546{
15547 uint32_t link_state;
15548
15549 /*
15550 * Last resort as FCF DEAD event failover will treat this as
15551 * a link down, but save the link state because we don't want
15552 * it to be changed to Link Down unless it is already down.
15553 */
15554 link_state = phba->link_state;
15555 lpfc_linkdown(phba);
15556 phba->link_state = link_state;
15557
15558 /* Unregister FCF if no devices connected to it */
15559 lpfc_unregister_unused_fcf(phba);
15560}
15561
a0c87cbd 15562/**
026abb87 15563 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 15564 * @phba: pointer to lpfc hba data structure.
026abb87 15565 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 15566 *
026abb87
JS
15567 * This function gets SLI3 port configure region 23 data through memory dump
15568 * mailbox command. When it successfully retrieves data, the size of the data
15569 * will be returned, otherwise, 0 will be returned.
a0c87cbd 15570 **/
026abb87
JS
15571static uint32_t
15572lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
15573{
15574 LPFC_MBOXQ_t *pmb = NULL;
15575 MAILBOX_t *mb;
026abb87 15576 uint32_t offset = 0;
a0c87cbd
JS
15577 int rc;
15578
026abb87
JS
15579 if (!rgn23_data)
15580 return 0;
15581
a0c87cbd
JS
15582 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15583 if (!pmb) {
15584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
15585 "2600 failed to allocate mailbox memory\n");
15586 return 0;
a0c87cbd
JS
15587 }
15588 mb = &pmb->u.mb;
15589
a0c87cbd
JS
15590 do {
15591 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15593
15594 if (rc != MBX_SUCCESS) {
15595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
15596 "2601 failed to read config "
15597 "region 23, rc 0x%x Status 0x%x\n",
15598 rc, mb->mbxStatus);
a0c87cbd
JS
15599 mb->un.varDmp.word_cnt = 0;
15600 }
15601 /*
15602 * dump mem may return a zero when finished or we got a
15603 * mailbox error, either way we are done.
15604 */
15605 if (mb->un.varDmp.word_cnt == 0)
15606 break;
15607 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15608 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15609
15610 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
15611 rgn23_data + offset,
15612 mb->un.varDmp.word_cnt);
a0c87cbd
JS
15613 offset += mb->un.varDmp.word_cnt;
15614 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15615
026abb87
JS
15616 mempool_free(pmb, phba->mbox_mem_pool);
15617 return offset;
15618}
15619
15620/**
15621 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15622 * @phba: pointer to lpfc hba data structure.
15623 * @rgn23_data: pointer to configure region 23 data.
15624 *
15625 * This function gets SLI4 port configure region 23 data through memory dump
15626 * mailbox command. When it successfully retrieves data, the size of the data
15627 * will be returned, otherwise, 0 will be returned.
15628 **/
15629static uint32_t
15630lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15631{
15632 LPFC_MBOXQ_t *mboxq = NULL;
15633 struct lpfc_dmabuf *mp = NULL;
15634 struct lpfc_mqe *mqe;
15635 uint32_t data_length = 0;
15636 int rc;
15637
15638 if (!rgn23_data)
15639 return 0;
15640
15641 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15642 if (!mboxq) {
15643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15644 "3105 failed to allocate mailbox memory\n");
15645 return 0;
15646 }
15647
15648 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15649 goto out;
15650 mqe = &mboxq->u.mqe;
15651 mp = (struct lpfc_dmabuf *) mboxq->context1;
15652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15653 if (rc)
15654 goto out;
15655 data_length = mqe->un.mb_words[5];
15656 if (data_length == 0)
15657 goto out;
15658 if (data_length > DMP_RGN23_SIZE) {
15659 data_length = 0;
15660 goto out;
15661 }
15662 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15663out:
15664 mempool_free(mboxq, phba->mbox_mem_pool);
15665 if (mp) {
15666 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15667 kfree(mp);
15668 }
15669 return data_length;
15670}
15671
15672/**
15673 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15674 * @phba: pointer to lpfc hba data structure.
15675 *
15676 * This function read region 23 and parse TLV for port status to
15677 * decide if the user disaled the port. If the TLV indicates the
15678 * port is disabled, the hba_flag is set accordingly.
15679 **/
15680void
15681lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15682{
15683 uint8_t *rgn23_data = NULL;
15684 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15685 uint32_t offset = 0;
15686
15687 /* Get adapter Region 23 data */
15688 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15689 if (!rgn23_data)
15690 goto out;
15691
15692 if (phba->sli_rev < LPFC_SLI_REV4)
15693 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15694 else {
15695 if_type = bf_get(lpfc_sli_intf_if_type,
15696 &phba->sli4_hba.sli_intf);
15697 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15698 goto out;
15699 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15700 }
a0c87cbd
JS
15701
15702 if (!data_size)
15703 goto out;
15704
15705 /* Check the region signature first */
15706 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15708 "2619 Config region 23 has bad signature\n");
15709 goto out;
15710 }
15711 offset += 4;
15712
15713 /* Check the data structure version */
15714 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15716 "2620 Config region 23 has bad version\n");
15717 goto out;
15718 }
15719 offset += 4;
15720
15721 /* Parse TLV entries in the region */
15722 while (offset < data_size) {
15723 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15724 break;
15725 /*
15726 * If the TLV is not driver specific TLV or driver id is
15727 * not linux driver id, skip the record.
15728 */
15729 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15730 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15731 (rgn23_data[offset + 3] != 0)) {
15732 offset += rgn23_data[offset + 1] * 4 + 4;
15733 continue;
15734 }
15735
15736 /* Driver found a driver specific TLV in the config region */
15737 sub_tlv_len = rgn23_data[offset + 1] * 4;
15738 offset += 4;
15739 tlv_offset = 0;
15740
15741 /*
15742 * Search for configured port state sub-TLV.
15743 */
15744 while ((offset < data_size) &&
15745 (tlv_offset < sub_tlv_len)) {
15746 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
15747 offset += 4;
15748 tlv_offset += 4;
15749 break;
15750 }
15751 if (rgn23_data[offset] != PORT_STE_TYPE) {
15752 offset += rgn23_data[offset + 1] * 4 + 4;
15753 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
15754 continue;
15755 }
15756
15757 /* This HBA contains PORT_STE configured */
15758 if (!rgn23_data[offset + 2])
15759 phba->hba_flag |= LINK_DISABLED;
15760
15761 goto out;
15762 }
15763 }
026abb87 15764
a0c87cbd 15765out:
a0c87cbd
JS
15766 kfree(rgn23_data);
15767 return;
15768}
695a814e 15769
52d52440
JS
15770/**
15771 * lpfc_wr_object - write an object to the firmware
15772 * @phba: HBA structure that indicates port to create a queue on.
15773 * @dmabuf_list: list of dmabufs to write to the port.
15774 * @size: the total byte value of the objects to write to the port.
15775 * @offset: the current offset to be used to start the transfer.
15776 *
15777 * This routine will create a wr_object mailbox command to send to the port.
15778 * the mailbox command will be constructed using the dma buffers described in
15779 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
15780 * BDEs that the imbedded mailbox can support. The @offset variable will be
15781 * used to indicate the starting offset of the transfer and will also return
15782 * the offset after the write object mailbox has completed. @size is used to
15783 * determine the end of the object and whether the eof bit should be set.
15784 *
15785 * Return 0 is successful and offset will contain the the new offset to use
15786 * for the next write.
15787 * Return negative value for error cases.
15788 **/
15789int
15790lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15791 uint32_t size, uint32_t *offset)
15792{
15793 struct lpfc_mbx_wr_object *wr_object;
15794 LPFC_MBOXQ_t *mbox;
15795 int rc = 0, i = 0;
15796 uint32_t shdr_status, shdr_add_status;
15797 uint32_t mbox_tmo;
15798 union lpfc_sli4_cfg_shdr *shdr;
15799 struct lpfc_dmabuf *dmabuf;
15800 uint32_t written = 0;
15801
15802 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15803 if (!mbox)
15804 return -ENOMEM;
15805
15806 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15807 LPFC_MBOX_OPCODE_WRITE_OBJECT,
15808 sizeof(struct lpfc_mbx_wr_object) -
15809 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15810
15811 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
15812 wr_object->u.request.write_offset = *offset;
15813 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
15814 wr_object->u.request.object_name[0] =
15815 cpu_to_le32(wr_object->u.request.object_name[0]);
15816 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
15817 list_for_each_entry(dmabuf, dmabuf_list, list) {
15818 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
15819 break;
15820 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
15821 wr_object->u.request.bde[i].addrHigh =
15822 putPaddrHigh(dmabuf->phys);
15823 if (written + SLI4_PAGE_SIZE >= size) {
15824 wr_object->u.request.bde[i].tus.f.bdeSize =
15825 (size - written);
15826 written += (size - written);
15827 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
15828 } else {
15829 wr_object->u.request.bde[i].tus.f.bdeSize =
15830 SLI4_PAGE_SIZE;
15831 written += SLI4_PAGE_SIZE;
15832 }
15833 i++;
15834 }
15835 wr_object->u.request.bde_count = i;
15836 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
15837 if (!phba->sli4_hba.intr_enable)
15838 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15839 else {
a183a15f 15840 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
15841 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15842 }
15843 /* The IOCTL status is embedded in the mailbox subheader. */
15844 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
15845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15847 if (rc != MBX_TIMEOUT)
15848 mempool_free(mbox, phba->mbox_mem_pool);
15849 if (shdr_status || shdr_add_status || rc) {
15850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15851 "3025 Write Object mailbox failed with "
15852 "status x%x add_status x%x, mbx status x%x\n",
15853 shdr_status, shdr_add_status, rc);
15854 rc = -ENXIO;
15855 } else
15856 *offset += wr_object->u.response.actual_write_length;
15857 return rc;
15858}
15859
695a814e
JS
15860/**
15861 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
15862 * @vport: pointer to vport data structure.
15863 *
15864 * This function iterate through the mailboxq and clean up all REG_LOGIN
15865 * and REG_VPI mailbox commands associated with the vport. This function
15866 * is called when driver want to restart discovery of the vport due to
15867 * a Clear Virtual Link event.
15868 **/
15869void
15870lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
15871{
15872 struct lpfc_hba *phba = vport->phba;
15873 LPFC_MBOXQ_t *mb, *nextmb;
15874 struct lpfc_dmabuf *mp;
78730cfe 15875 struct lpfc_nodelist *ndlp;
d439d286 15876 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 15877 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 15878 LIST_HEAD(mbox_cmd_list);
63e801ce 15879 uint8_t restart_loop;
695a814e 15880
d439d286 15881 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
15882 spin_lock_irq(&phba->hbalock);
15883 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
15884 if (mb->vport != vport)
15885 continue;
15886
15887 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15888 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15889 continue;
15890
d439d286
JS
15891 list_del(&mb->list);
15892 list_add_tail(&mb->list, &mbox_cmd_list);
15893 }
15894 /* Clean up active mailbox command with the vport */
15895 mb = phba->sli.mbox_active;
15896 if (mb && (mb->vport == vport)) {
15897 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
15898 (mb->u.mb.mbxCommand == MBX_REG_VPI))
15899 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15900 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15901 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
15902 /* Put reference count for delayed processing */
15903 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
15904 /* Unregister the RPI when mailbox complete */
15905 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15906 }
15907 }
63e801ce
JS
15908 /* Cleanup any mailbox completions which are not yet processed */
15909 do {
15910 restart_loop = 0;
15911 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
15912 /*
15913 * If this mailox is already processed or it is
15914 * for another vport ignore it.
15915 */
15916 if ((mb->vport != vport) ||
15917 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
15918 continue;
15919
15920 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15921 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15922 continue;
15923
15924 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15925 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15926 ndlp = (struct lpfc_nodelist *)mb->context2;
15927 /* Unregister the RPI when mailbox complete */
15928 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15929 restart_loop = 1;
15930 spin_unlock_irq(&phba->hbalock);
15931 spin_lock(shost->host_lock);
15932 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15933 spin_unlock(shost->host_lock);
15934 spin_lock_irq(&phba->hbalock);
15935 break;
15936 }
15937 }
15938 } while (restart_loop);
15939
d439d286
JS
15940 spin_unlock_irq(&phba->hbalock);
15941
15942 /* Release the cleaned-up mailbox commands */
15943 while (!list_empty(&mbox_cmd_list)) {
15944 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e
JS
15945 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15946 mp = (struct lpfc_dmabuf *) (mb->context1);
15947 if (mp) {
15948 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
15949 kfree(mp);
15950 }
78730cfe 15951 ndlp = (struct lpfc_nodelist *) mb->context2;
d439d286 15952 mb->context2 = NULL;
78730cfe 15953 if (ndlp) {
ec21b3b0 15954 spin_lock(shost->host_lock);
589a52d6 15955 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 15956 spin_unlock(shost->host_lock);
78730cfe 15957 lpfc_nlp_put(ndlp);
78730cfe 15958 }
695a814e 15959 }
695a814e
JS
15960 mempool_free(mb, phba->mbox_mem_pool);
15961 }
d439d286
JS
15962
15963 /* Release the ndlp with the cleaned-up active mailbox command */
15964 if (act_mbx_ndlp) {
15965 spin_lock(shost->host_lock);
15966 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15967 spin_unlock(shost->host_lock);
15968 lpfc_nlp_put(act_mbx_ndlp);
695a814e 15969 }
695a814e
JS
15970}
15971
2a9bf3d0
JS
15972/**
15973 * lpfc_drain_txq - Drain the txq
15974 * @phba: Pointer to HBA context object.
15975 *
15976 * This function attempt to submit IOCBs on the txq
15977 * to the adapter. For SLI4 adapters, the txq contains
15978 * ELS IOCBs that have been deferred because the there
15979 * are no SGLs. This congestion can occur with large
15980 * vport counts during node discovery.
15981 **/
15982
15983uint32_t
15984lpfc_drain_txq(struct lpfc_hba *phba)
15985{
15986 LIST_HEAD(completions);
15987 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
15988 struct lpfc_iocbq *piocbq = 0;
15989 unsigned long iflags = 0;
15990 char *fail_msg = NULL;
15991 struct lpfc_sglq *sglq;
15992 union lpfc_wqe wqe;
15993
15994 spin_lock_irqsave(&phba->hbalock, iflags);
15995 if (pring->txq_cnt > pring->txq_max)
15996 pring->txq_max = pring->txq_cnt;
15997
15998 spin_unlock_irqrestore(&phba->hbalock, iflags);
15999
16000 while (pring->txq_cnt) {
16001 spin_lock_irqsave(&phba->hbalock, iflags);
16002
19ca7609 16003 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a
JS
16004 if (!piocbq) {
16005 spin_unlock_irqrestore(&phba->hbalock, iflags);
16006 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16007 "2823 txq empty and txq_cnt is %d\n ",
16008 pring->txq_cnt);
16009 break;
16010 }
19ca7609 16011 sglq = __lpfc_sli_get_sglq(phba, piocbq);
2a9bf3d0 16012 if (!sglq) {
19ca7609 16013 __lpfc_sli_ringtx_put(phba, pring, piocbq);
2a9bf3d0
JS
16014 spin_unlock_irqrestore(&phba->hbalock, iflags);
16015 break;
2a9bf3d0
JS
16016 }
16017
16018 /* The xri and iocb resources secured,
16019 * attempt to issue request
16020 */
6d368e53 16021 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
16022 piocbq->sli4_xritag = sglq->sli4_xritag;
16023 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16024 fail_msg = "to convert bpl to sgl";
16025 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16026 fail_msg = "to convert iocb to wqe";
16027 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16028 fail_msg = " - Wq is full";
16029 else
16030 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16031
16032 if (fail_msg) {
16033 /* Failed means we can't issue and need to cancel */
16034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16035 "2822 IOCB failed %s iotag 0x%x "
16036 "xri 0x%x\n",
16037 fail_msg,
16038 piocbq->iotag, piocbq->sli4_xritag);
16039 list_add_tail(&piocbq->list, &completions);
16040 }
16041 spin_unlock_irqrestore(&phba->hbalock, iflags);
16042 }
16043
2a9bf3d0
JS
16044 /* Cancel all the IOCBs that cannot be issued */
16045 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16046 IOERR_SLI_ABORTED);
16047
16048 return pring->txq_cnt;
16049}