[SCSI] lpfc 8.3.39: Fix driver issues with large s/g lists for BlockGuard
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
bdcd2b92 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
dea3101e 27
91886523 28#include <scsi/scsi.h>
dea3101e 29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
f888ba3c 32#include <scsi/scsi_transport_fc.h>
da0436e9 33#include <scsi/fc/fc_fs.h>
0d878419 34#include <linux/aer.h>
dea3101e 35
da0436e9 36#include "lpfc_hw4.h"
dea3101e 37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e 41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_crtn.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_compat.h"
858c9f6c 47#include "lpfc_debugfs.h"
04c68496 48#include "lpfc_vport.h"
dea3101e 49
50/* There are only four IOCB completion types. */
51typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56} lpfc_iocb_type;
57
4f774513
JS
58
59/* Provide function prototypes local to this module. */
60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
63 uint8_t *, uint32_t *);
64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
6669f9bb
JS
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
0558056c
JS
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
8a9d2e80
JS
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
ba20c853
JS
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
0558056c 74
4f774513
JS
75static IOCB_t *
76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77{
78 return &iocbq->iocb;
79}
80
81/**
82 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83 * @q: The Work Queue to operate on.
84 * @wqe: The work Queue Entry to put on the Work queue.
85 *
86 * This routine will copy the contents of @wqe to the next available entry on
87 * the @q. This function will then ring the Work Queue Doorbell to signal the
88 * HBA to start processing the Work Queue Entry. This function returns 0 if
89 * successful. If no entries are available on @q then this function will return
90 * -ENOMEM.
91 * The caller is expected to hold the hbalock when calling this routine.
92 **/
93static uint32_t
94lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95{
2e90f4b5 96 union lpfc_wqe *temp_wqe;
4f774513
JS
97 struct lpfc_register doorbell;
98 uint32_t host_index;
027140ea 99 uint32_t idx;
4f774513 100
2e90f4b5
JS
101 /* sanity check on queue memory */
102 if (unlikely(!q))
103 return -ENOMEM;
104 temp_wqe = q->qe[q->host_index].wqe;
105
4f774513 106 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
b84daac9 109 q->WQ_overflow++;
4f774513 110 return -ENOMEM;
b84daac9
JS
111 }
112 q->WQ_posted++;
4f774513 113 /* set consumption flag every once in a while */
ff78d8f9 114 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
fedd3b7b
JS
116 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
4f774513
JS
118 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120 /* Update the host index before invoking device */
121 host_index = q->host_index;
027140ea
JS
122
123 q->host_index = idx;
4f774513
JS
124
125 /* Ring Doorbell */
126 doorbell.word0 = 0;
962bc51b
JS
127 if (q->db_format == LPFC_DB_LIST_FORMAT) {
128 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134 } else {
135 return -EINVAL;
136 }
137 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
138
139 return 0;
140}
141
142/**
143 * lpfc_sli4_wq_release - Updates internal hba index for WQ
144 * @q: The Work Queue to operate on.
145 * @index: The index to advance the hba index to.
146 *
147 * This routine will update the HBA index of a queue to reflect consumption of
148 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
149 * an entry the host calls this function to update the queue's internal
150 * pointers. This routine returns the number of entries that were consumed by
151 * the HBA.
152 **/
153static uint32_t
154lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
155{
156 uint32_t released = 0;
157
2e90f4b5
JS
158 /* sanity check on queue memory */
159 if (unlikely(!q))
160 return 0;
161
4f774513
JS
162 if (q->hba_index == index)
163 return 0;
164 do {
165 q->hba_index = ((q->hba_index + 1) % q->entry_count);
166 released++;
167 } while (q->hba_index != index);
168 return released;
169}
170
171/**
172 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
173 * @q: The Mailbox Queue to operate on.
174 * @wqe: The Mailbox Queue Entry to put on the Work queue.
175 *
176 * This routine will copy the contents of @mqe to the next available entry on
177 * the @q. This function will then ring the Work Queue Doorbell to signal the
178 * HBA to start processing the Work Queue Entry. This function returns 0 if
179 * successful. If no entries are available on @q then this function will return
180 * -ENOMEM.
181 * The caller is expected to hold the hbalock when calling this routine.
182 **/
183static uint32_t
184lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
185{
2e90f4b5 186 struct lpfc_mqe *temp_mqe;
4f774513
JS
187 struct lpfc_register doorbell;
188 uint32_t host_index;
189
2e90f4b5
JS
190 /* sanity check on queue memory */
191 if (unlikely(!q))
192 return -ENOMEM;
193 temp_mqe = q->qe[q->host_index].mqe;
194
4f774513
JS
195 /* If the host has not yet processed the next entry then we are done */
196 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
197 return -ENOMEM;
198 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
199 /* Save off the mailbox pointer for completion */
200 q->phba->mbox = (MAILBOX_t *)temp_mqe;
201
202 /* Update the host index before invoking device */
203 host_index = q->host_index;
204 q->host_index = ((q->host_index + 1) % q->entry_count);
205
206 /* Ring Doorbell */
207 doorbell.word0 = 0;
208 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
209 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
210 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
211 return 0;
212}
213
214/**
215 * lpfc_sli4_mq_release - Updates internal hba index for MQ
216 * @q: The Mailbox Queue to operate on.
217 *
218 * This routine will update the HBA index of a queue to reflect consumption of
219 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
220 * an entry the host calls this function to update the queue's internal
221 * pointers. This routine returns the number of entries that were consumed by
222 * the HBA.
223 **/
224static uint32_t
225lpfc_sli4_mq_release(struct lpfc_queue *q)
226{
2e90f4b5
JS
227 /* sanity check on queue memory */
228 if (unlikely(!q))
229 return 0;
230
4f774513
JS
231 /* Clear the mailbox pointer for completion */
232 q->phba->mbox = NULL;
233 q->hba_index = ((q->hba_index + 1) % q->entry_count);
234 return 1;
235}
236
237/**
238 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
239 * @q: The Event Queue to get the first valid EQE from
240 *
241 * This routine will get the first valid Event Queue Entry from @q, update
242 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
243 * the Queue (no more work to do), or the Queue is full of EQEs that have been
244 * processed, but not popped back to the HBA then this routine will return NULL.
245 **/
246static struct lpfc_eqe *
247lpfc_sli4_eq_get(struct lpfc_queue *q)
248{
2e90f4b5 249 struct lpfc_eqe *eqe;
027140ea 250 uint32_t idx;
2e90f4b5
JS
251
252 /* sanity check on queue memory */
253 if (unlikely(!q))
254 return NULL;
255 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
256
257 /* If the next EQE is not valid then we are done */
cb5172ea 258 if (!bf_get_le32(lpfc_eqe_valid, eqe))
4f774513
JS
259 return NULL;
260 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
261 idx = ((q->hba_index + 1) % q->entry_count);
262 if (idx == q->host_index)
4f774513
JS
263 return NULL;
264
027140ea 265 q->hba_index = idx;
4f774513
JS
266 return eqe;
267}
268
ba20c853
JS
269/**
270 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
271 * @q: The Event Queue to disable interrupts
272 *
273 **/
274static inline void
275lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
276{
277 struct lpfc_register doorbell;
278
279 doorbell.word0 = 0;
280 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
281 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
282 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
283 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
284 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
285 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
286}
287
4f774513
JS
288/**
289 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
290 * @q: The Event Queue that the host has completed processing for.
291 * @arm: Indicates whether the host wants to arms this CQ.
292 *
293 * This routine will mark all Event Queue Entries on @q, from the last
294 * known completed entry to the last entry that was processed, as completed
295 * by clearing the valid bit for each completion queue entry. Then it will
296 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
297 * The internal host index in the @q will be updated by this routine to indicate
298 * that the host has finished processing the entries. The @arm parameter
299 * indicates that the queue should be rearmed when ringing the doorbell.
300 *
301 * This function will return the number of EQEs that were popped.
302 **/
303uint32_t
304lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
305{
306 uint32_t released = 0;
307 struct lpfc_eqe *temp_eqe;
308 struct lpfc_register doorbell;
309
2e90f4b5
JS
310 /* sanity check on queue memory */
311 if (unlikely(!q))
312 return 0;
313
4f774513
JS
314 /* while there are valid entries */
315 while (q->hba_index != q->host_index) {
316 temp_eqe = q->qe[q->host_index].eqe;
cb5172ea 317 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
4f774513
JS
318 released++;
319 q->host_index = ((q->host_index + 1) % q->entry_count);
320 }
321 if (unlikely(released == 0 && !arm))
322 return 0;
323
324 /* ring doorbell for number popped */
325 doorbell.word0 = 0;
326 if (arm) {
327 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
328 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
329 }
330 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
331 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
332 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
333 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
334 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
4f774513 335 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
336 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
337 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
338 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
339 return released;
340}
341
342/**
343 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
344 * @q: The Completion Queue to get the first valid CQE from
345 *
346 * This routine will get the first valid Completion Queue Entry from @q, update
347 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
348 * the Queue (no more work to do), or the Queue is full of CQEs that have been
349 * processed, but not popped back to the HBA then this routine will return NULL.
350 **/
351static struct lpfc_cqe *
352lpfc_sli4_cq_get(struct lpfc_queue *q)
353{
354 struct lpfc_cqe *cqe;
027140ea 355 uint32_t idx;
4f774513 356
2e90f4b5
JS
357 /* sanity check on queue memory */
358 if (unlikely(!q))
359 return NULL;
360
4f774513 361 /* If the next CQE is not valid then we are done */
cb5172ea 362 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
4f774513
JS
363 return NULL;
364 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
365 idx = ((q->hba_index + 1) % q->entry_count);
366 if (idx == q->host_index)
4f774513
JS
367 return NULL;
368
369 cqe = q->qe[q->hba_index].cqe;
027140ea 370 q->hba_index = idx;
4f774513
JS
371 return cqe;
372}
373
374/**
375 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
376 * @q: The Completion Queue that the host has completed processing for.
377 * @arm: Indicates whether the host wants to arms this CQ.
378 *
379 * This routine will mark all Completion queue entries on @q, from the last
380 * known completed entry to the last entry that was processed, as completed
381 * by clearing the valid bit for each completion queue entry. Then it will
382 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
383 * The internal host index in the @q will be updated by this routine to indicate
384 * that the host has finished processing the entries. The @arm parameter
385 * indicates that the queue should be rearmed when ringing the doorbell.
386 *
387 * This function will return the number of CQEs that were released.
388 **/
389uint32_t
390lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
391{
392 uint32_t released = 0;
393 struct lpfc_cqe *temp_qe;
394 struct lpfc_register doorbell;
395
2e90f4b5
JS
396 /* sanity check on queue memory */
397 if (unlikely(!q))
398 return 0;
4f774513
JS
399 /* while there are valid entries */
400 while (q->hba_index != q->host_index) {
401 temp_qe = q->qe[q->host_index].cqe;
cb5172ea 402 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
4f774513
JS
403 released++;
404 q->host_index = ((q->host_index + 1) % q->entry_count);
405 }
406 if (unlikely(released == 0 && !arm))
407 return 0;
408
409 /* ring doorbell for number popped */
410 doorbell.word0 = 0;
411 if (arm)
412 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
414 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
415 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
416 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
417 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
4f774513
JS
418 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
419 return released;
420}
421
422/**
423 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
424 * @q: The Header Receive Queue to operate on.
425 * @wqe: The Receive Queue Entry to put on the Receive queue.
426 *
427 * This routine will copy the contents of @wqe to the next available entry on
428 * the @q. This function will then ring the Receive Queue Doorbell to signal the
429 * HBA to start processing the Receive Queue Entry. This function returns the
430 * index that the rqe was copied to if successful. If no entries are available
431 * on @q then this function will return -ENOMEM.
432 * The caller is expected to hold the hbalock when calling this routine.
433 **/
434static int
435lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
436 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
437{
2e90f4b5
JS
438 struct lpfc_rqe *temp_hrqe;
439 struct lpfc_rqe *temp_drqe;
4f774513 440 struct lpfc_register doorbell;
5a25bf36 441 int put_index;
4f774513 442
2e90f4b5
JS
443 /* sanity check on queue memory */
444 if (unlikely(!hq) || unlikely(!dq))
445 return -ENOMEM;
5a25bf36 446 put_index = hq->host_index;
2e90f4b5
JS
447 temp_hrqe = hq->qe[hq->host_index].rqe;
448 temp_drqe = dq->qe[dq->host_index].rqe;
449
4f774513
JS
450 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
451 return -EINVAL;
452 if (hq->host_index != dq->host_index)
453 return -EINVAL;
454 /* If the host has not yet processed the next entry then we are done */
455 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
456 return -EBUSY;
457 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
458 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
459
460 /* Update the host index to point to the next slot */
461 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
462 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
463
464 /* Ring The Header Receive Queue Doorbell */
73d91e50 465 if (!(hq->host_index % hq->entry_repost)) {
4f774513 466 doorbell.word0 = 0;
962bc51b
JS
467 if (hq->db_format == LPFC_DB_RING_FORMAT) {
468 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
469 hq->entry_repost);
470 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
471 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
472 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
473 hq->entry_repost);
474 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
475 hq->host_index);
476 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
477 } else {
478 return -EINVAL;
479 }
480 writel(doorbell.word0, hq->db_regaddr);
4f774513
JS
481 }
482 return put_index;
483}
484
485/**
486 * lpfc_sli4_rq_release - Updates internal hba index for RQ
487 * @q: The Header Receive Queue to operate on.
488 *
489 * This routine will update the HBA index of a queue to reflect consumption of
490 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
491 * consumed an entry the host calls this function to update the queue's
492 * internal pointers. This routine returns the number of entries that were
493 * consumed by the HBA.
494 **/
495static uint32_t
496lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
497{
2e90f4b5
JS
498 /* sanity check on queue memory */
499 if (unlikely(!hq) || unlikely(!dq))
500 return 0;
501
4f774513
JS
502 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
503 return 0;
504 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
505 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
506 return 1;
507}
508
e59058c4 509/**
3621a710 510 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
511 * @phba: Pointer to HBA context object.
512 * @pring: Pointer to driver SLI ring object.
513 *
514 * This function returns pointer to next command iocb entry
515 * in the command ring. The caller must hold hbalock to prevent
516 * other threads consume the next command iocb.
517 * SLI-2/SLI-3 provide different sized iocbs.
518 **/
ed957684
JS
519static inline IOCB_t *
520lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
521{
7e56aa25
JS
522 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
523 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
524}
525
e59058c4 526/**
3621a710 527 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
528 * @phba: Pointer to HBA context object.
529 * @pring: Pointer to driver SLI ring object.
530 *
531 * This function returns pointer to next response iocb entry
532 * in the response ring. The caller must hold hbalock to make sure
533 * that no other thread consume the next response iocb.
534 * SLI-2/SLI-3 provide different sized iocbs.
535 **/
ed957684
JS
536static inline IOCB_t *
537lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
538{
7e56aa25
JS
539 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
540 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
541}
542
e59058c4 543/**
3621a710 544 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
545 * @phba: Pointer to HBA context object.
546 *
547 * This function is called with hbalock held. This function
548 * allocates a new driver iocb object from the iocb pool. If the
549 * allocation is successful, it returns pointer to the newly
550 * allocated iocb object else it returns NULL.
551 **/
4f2e66c6 552struct lpfc_iocbq *
2e0fef85 553__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
554{
555 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
556 struct lpfc_iocbq * iocbq = NULL;
557
558 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
559 if (iocbq)
560 phba->iocb_cnt++;
561 if (phba->iocb_cnt > phba->iocb_max)
562 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
563 return iocbq;
564}
565
da0436e9
JS
566/**
567 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
568 * @phba: Pointer to HBA context object.
569 * @xritag: XRI value.
570 *
571 * This function clears the sglq pointer from the array of acive
572 * sglq's. The xritag that is passed in is used to index into the
573 * array. Before the xritag can be used it needs to be adjusted
574 * by subtracting the xribase.
575 *
576 * Returns sglq ponter = success, NULL = Failure.
577 **/
578static struct lpfc_sglq *
579__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
580{
da0436e9 581 struct lpfc_sglq *sglq;
6d368e53
JS
582
583 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
584 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
585 return sglq;
586}
587
588/**
589 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
590 * @phba: Pointer to HBA context object.
591 * @xritag: XRI value.
592 *
593 * This function returns the sglq pointer from the array of acive
594 * sglq's. The xritag that is passed in is used to index into the
595 * array. Before the xritag can be used it needs to be adjusted
596 * by subtracting the xribase.
597 *
598 * Returns sglq ponter = success, NULL = Failure.
599 **/
0f65ff68 600struct lpfc_sglq *
da0436e9
JS
601__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
602{
da0436e9 603 struct lpfc_sglq *sglq;
6d368e53
JS
604
605 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
606 return sglq;
607}
608
19ca7609 609/**
1151e3ec 610 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
611 * @phba: Pointer to HBA context object.
612 * @xritag: xri used in this exchange.
613 * @rrq: The RRQ to be cleared.
614 *
19ca7609 615 **/
1151e3ec
JS
616void
617lpfc_clr_rrq_active(struct lpfc_hba *phba,
618 uint16_t xritag,
619 struct lpfc_node_rrq *rrq)
19ca7609 620{
1151e3ec 621 struct lpfc_nodelist *ndlp = NULL;
19ca7609 622
1151e3ec
JS
623 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
624 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
625
626 /* The target DID could have been swapped (cable swap)
627 * we should use the ndlp from the findnode if it is
628 * available.
629 */
1151e3ec 630 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
631 ndlp = rrq->ndlp;
632
1151e3ec
JS
633 if (!ndlp)
634 goto out;
635
6d368e53 636 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
19ca7609
JS
637 rrq->send_rrq = 0;
638 rrq->xritag = 0;
639 rrq->rrq_stop_time = 0;
640 }
1151e3ec 641out:
19ca7609
JS
642 mempool_free(rrq, phba->rrq_pool);
643}
644
645/**
646 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
647 * @phba: Pointer to HBA context object.
648 *
649 * This function is called with hbalock held. This function
650 * Checks if stop_time (ratov from setting rrq active) has
651 * been reached, if it has and the send_rrq flag is set then
652 * it will call lpfc_send_rrq. If the send_rrq flag is not set
653 * then it will just call the routine to clear the rrq and
654 * free the rrq resource.
655 * The timer is set to the next rrq that is going to expire before
656 * leaving the routine.
657 *
658 **/
659void
660lpfc_handle_rrq_active(struct lpfc_hba *phba)
661{
662 struct lpfc_node_rrq *rrq;
663 struct lpfc_node_rrq *nextrrq;
664 unsigned long next_time;
665 unsigned long iflags;
1151e3ec 666 LIST_HEAD(send_rrq);
19ca7609
JS
667
668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 670 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 671 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time))
674 list_move(&rrq->list, &send_rrq);
675 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
676 next_time = rrq->rrq_stop_time;
677 }
678 spin_unlock_irqrestore(&phba->hbalock, iflags);
679 if (!list_empty(&phba->active_rrq_list))
680 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
681 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
682 list_del(&rrq->list);
683 if (!rrq->send_rrq)
684 /* this call will free the rrq */
685 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
686 else if (lpfc_send_rrq(phba, rrq)) {
687 /* if we send the rrq then the completion handler
688 * will clear the bit in the xribitmap.
689 */
690 lpfc_clr_rrq_active(phba, rrq->xritag,
691 rrq);
692 }
693 }
19ca7609
JS
694}
695
696/**
697 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
698 * @vport: Pointer to vport context object.
699 * @xri: The xri used in the exchange.
700 * @did: The targets DID for this exchange.
701 *
702 * returns NULL = rrq not found in the phba->active_rrq_list.
703 * rrq = rrq for this xri and target.
704 **/
705struct lpfc_node_rrq *
706lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
707{
708 struct lpfc_hba *phba = vport->phba;
709 struct lpfc_node_rrq *rrq;
710 struct lpfc_node_rrq *nextrrq;
711 unsigned long iflags;
712
713 if (phba->sli_rev != LPFC_SLI_REV4)
714 return NULL;
715 spin_lock_irqsave(&phba->hbalock, iflags);
716 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
717 if (rrq->vport == vport && rrq->xritag == xri &&
718 rrq->nlp_DID == did){
719 list_del(&rrq->list);
720 spin_unlock_irqrestore(&phba->hbalock, iflags);
721 return rrq;
722 }
723 }
724 spin_unlock_irqrestore(&phba->hbalock, iflags);
725 return NULL;
726}
727
728/**
729 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
730 * @vport: Pointer to vport context object.
1151e3ec
JS
731 * @ndlp: Pointer to the lpfc_node_list structure.
732 * If ndlp is NULL Remove all active RRQs for this vport from the
733 * phba->active_rrq_list and clear the rrq.
734 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
735 **/
736void
1151e3ec 737lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
738
739{
740 struct lpfc_hba *phba = vport->phba;
741 struct lpfc_node_rrq *rrq;
742 struct lpfc_node_rrq *nextrrq;
743 unsigned long iflags;
1151e3ec 744 LIST_HEAD(rrq_list);
19ca7609
JS
745
746 if (phba->sli_rev != LPFC_SLI_REV4)
747 return;
1151e3ec
JS
748 if (!ndlp) {
749 lpfc_sli4_vport_delete_els_xri_aborted(vport);
750 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 751 }
1151e3ec
JS
752 spin_lock_irqsave(&phba->hbalock, iflags);
753 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
754 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
755 list_move(&rrq->list, &rrq_list);
19ca7609 756 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
757
758 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
759 list_del(&rrq->list);
760 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
761 }
19ca7609
JS
762}
763
764/**
765 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
766 * @phba: Pointer to HBA context object.
767 *
768 * Remove all rrqs from the phba->active_rrq_list and free them by
769 * calling __lpfc_clr_active_rrq
770 *
771 **/
772void
773lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
774{
775 struct lpfc_node_rrq *rrq;
776 struct lpfc_node_rrq *nextrrq;
777 unsigned long next_time;
778 unsigned long iflags;
1151e3ec 779 LIST_HEAD(rrq_list);
19ca7609
JS
780
781 if (phba->sli_rev != LPFC_SLI_REV4)
782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 785 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
1151e3ec
JS
786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788
789 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
19ca7609 790 list_del(&rrq->list);
1151e3ec 791 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
19ca7609 792 }
19ca7609
JS
793 if (!list_empty(&phba->active_rrq_list))
794 mod_timer(&phba->rrq_tmr, next_time);
795}
796
797
798/**
1151e3ec 799 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
800 * @phba: Pointer to HBA context object.
801 * @ndlp: Targets nodelist pointer for this exchange.
802 * @xritag the xri in the bitmap to test.
803 *
804 * This function is called with hbalock held. This function
805 * returns 0 = rrq not active for this xri
806 * 1 = rrq is valid for this xri.
807 **/
1151e3ec
JS
808int
809lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
810 uint16_t xritag)
811{
19ca7609
JS
812 if (!ndlp)
813 return 0;
6d368e53 814 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
19ca7609
JS
815 return 1;
816 else
817 return 0;
818}
819
820/**
821 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
822 * @phba: Pointer to HBA context object.
823 * @ndlp: nodelist pointer for this target.
824 * @xritag: xri used in this exchange.
825 * @rxid: Remote Exchange ID.
826 * @send_rrq: Flag used to determine if we should send rrq els cmd.
827 *
828 * This function takes the hbalock.
829 * The active bit is always set in the active rrq xri_bitmap even
830 * if there is no slot avaiable for the other rrq information.
831 *
832 * returns 0 rrq actived for this xri
833 * < 0 No memory or invalid ndlp.
834 **/
835int
836lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 837 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 838{
19ca7609 839 unsigned long iflags;
b42c07c8
JS
840 struct lpfc_node_rrq *rrq;
841 int empty;
842
843 if (!ndlp)
844 return -EINVAL;
845
846 if (!phba->cfg_enable_rrq)
847 return -EINVAL;
19ca7609
JS
848
849 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
850 if (phba->pport->load_flag & FC_UNLOADING) {
851 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
852 goto out;
853 }
854
855 /*
856 * set the active bit even if there is no mem available.
857 */
858 if (NLP_CHK_FREE_REQ(ndlp))
859 goto out;
860
861 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
862 goto out;
863
864 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
865 goto out;
866
19ca7609 867 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
868 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
869 if (!rrq) {
870 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
871 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
872 " DID:0x%x Send:%d\n",
873 xritag, rxid, ndlp->nlp_DID, send_rrq);
874 return -EINVAL;
875 }
e5771b4d
JS
876 if (phba->cfg_enable_rrq == 1)
877 rrq->send_rrq = send_rrq;
878 else
879 rrq->send_rrq = 0;
b42c07c8 880 rrq->xritag = xritag;
256ec0d0
JS
881 rrq->rrq_stop_time = jiffies +
882 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
883 rrq->ndlp = ndlp;
884 rrq->nlp_DID = ndlp->nlp_DID;
885 rrq->vport = ndlp->vport;
886 rrq->rxid = rxid;
b42c07c8
JS
887 spin_lock_irqsave(&phba->hbalock, iflags);
888 empty = list_empty(&phba->active_rrq_list);
889 list_add_tail(&rrq->list, &phba->active_rrq_list);
890 phba->hba_flag |= HBA_RRQ_ACTIVE;
891 if (empty)
892 lpfc_worker_wake_up(phba);
893 spin_unlock_irqrestore(&phba->hbalock, iflags);
894 return 0;
895out:
896 spin_unlock_irqrestore(&phba->hbalock, iflags);
897 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
898 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
899 " DID:0x%x Send:%d\n",
900 xritag, rxid, ndlp->nlp_DID, send_rrq);
901 return -EINVAL;
19ca7609
JS
902}
903
da0436e9
JS
904/**
905 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
906 * @phba: Pointer to HBA context object.
19ca7609 907 * @piocb: Pointer to the iocbq.
da0436e9
JS
908 *
909 * This function is called with hbalock held. This function
6d368e53 910 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
911 * list is not empty then it is successful, it returns pointer to the newly
912 * allocated sglq object else it returns NULL.
913 **/
914static struct lpfc_sglq *
19ca7609 915__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9
JS
916{
917 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
918 struct lpfc_sglq *sglq = NULL;
19ca7609 919 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
920 struct lpfc_scsi_buf *lpfc_cmd;
921 struct lpfc_nodelist *ndlp;
922 int found = 0;
923
924 if (piocbq->iocb_flag & LPFC_IO_FCP) {
925 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
926 ndlp = lpfc_cmd->rdata->pnode;
be858b65
JS
927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
19ca7609 929 ndlp = piocbq->context_un.ndlp;
93d1379e
JS
930 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
931 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
932 ndlp = piocbq->context_un.ndlp;
19ca7609
JS
933 else
934 ndlp = piocbq->context1;
935
da0436e9 936 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
937 start_sglq = sglq;
938 while (!found) {
939 if (!sglq)
940 return NULL;
ee0f4fe1 941 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
19ca7609
JS
942 /* This xri has an rrq outstanding for this DID.
943 * put it back in the list and get another xri.
944 */
945 list_add_tail(&sglq->list, lpfc_sgl_list);
946 sglq = NULL;
947 list_remove_head(lpfc_sgl_list, sglq,
948 struct lpfc_sglq, list);
949 if (sglq == start_sglq) {
950 sglq = NULL;
951 break;
952 } else
953 continue;
954 }
955 sglq->ndlp = ndlp;
956 found = 1;
6d368e53 957 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
958 sglq->state = SGL_ALLOCATED;
959 }
da0436e9
JS
960 return sglq;
961}
962
e59058c4 963/**
3621a710 964 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
965 * @phba: Pointer to HBA context object.
966 *
967 * This function is called with no lock held. This function
968 * allocates a new driver iocb object from the iocb pool. If the
969 * allocation is successful, it returns pointer to the newly
970 * allocated iocb object else it returns NULL.
971 **/
2e0fef85
JS
972struct lpfc_iocbq *
973lpfc_sli_get_iocbq(struct lpfc_hba *phba)
974{
975 struct lpfc_iocbq * iocbq = NULL;
976 unsigned long iflags;
977
978 spin_lock_irqsave(&phba->hbalock, iflags);
979 iocbq = __lpfc_sli_get_iocbq(phba);
980 spin_unlock_irqrestore(&phba->hbalock, iflags);
981 return iocbq;
982}
983
4f774513
JS
984/**
985 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
986 * @phba: Pointer to HBA context object.
987 * @iocbq: Pointer to driver iocb object.
988 *
989 * This function is called with hbalock held to release driver
990 * iocb object to the iocb pool. The iotag in the iocb object
991 * does not change for each use of the iocb object. This function
992 * clears all other fields of the iocb object when it is freed.
993 * The sqlq structure that holds the xritag and phys and virtual
994 * mappings for the scatter gather list is retrieved from the
995 * active array of sglq. The get of the sglq pointer also clears
996 * the entry in the array. If the status of the IO indiactes that
997 * this IO was aborted then the sglq entry it put on the
998 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
999 * IO has good status or fails for any other reason then the sglq
1000 * entry is added to the free list (lpfc_sgl_list).
1001 **/
1002static void
1003__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1004{
1005 struct lpfc_sglq *sglq;
1006 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0
JS
1007 unsigned long iflag = 0;
1008 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4f774513
JS
1009
1010 if (iocbq->sli4_xritag == NO_XRI)
1011 sglq = NULL;
1012 else
6d368e53
JS
1013 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1014
0e9bb8d7
JS
1015 /*
1016 ** This should have been removed from the txcmplq before calling
1017 ** iocbq_release. The normal completion
1018 ** path should have already done the list_del_init.
1019 */
1020 if (unlikely(!list_empty(&iocbq->list))) {
1021 if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
1022 iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
1023 list_del_init(&iocbq->list);
1024 }
1025
1026
4f774513 1027 if (sglq) {
0f65ff68
JS
1028 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1029 (sglq->state != SGL_XRI_ABORTED)) {
4f774513
JS
1030 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1031 iflag);
1032 list_add(&sglq->list,
1033 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1034 spin_unlock_irqrestore(
1035 &phba->sli4_hba.abts_sgl_list_lock, iflag);
0f65ff68
JS
1036 } else {
1037 sglq->state = SGL_FREED;
19ca7609 1038 sglq->ndlp = NULL;
fedd3b7b
JS
1039 list_add_tail(&sglq->list,
1040 &phba->sli4_hba.lpfc_sgl_list);
2a9bf3d0
JS
1041
1042 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1043 if (!list_empty(&pring->txq))
2a9bf3d0 1044 lpfc_worker_wake_up(phba);
0f65ff68 1045 }
4f774513
JS
1046 }
1047
1048
1049 /*
1050 * Clean all volatile data fields, preserve iotag and node struct.
1051 */
1052 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1053 iocbq->sli4_lxritag = NO_XRI;
4f774513
JS
1054 iocbq->sli4_xritag = NO_XRI;
1055 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1056}
1057
2a9bf3d0 1058
e59058c4 1059/**
3772a991 1060 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1061 * @phba: Pointer to HBA context object.
1062 * @iocbq: Pointer to driver iocb object.
1063 *
1064 * This function is called with hbalock held to release driver
1065 * iocb object to the iocb pool. The iotag in the iocb object
1066 * does not change for each use of the iocb object. This function
1067 * clears all other fields of the iocb object when it is freed.
1068 **/
a6ababd2 1069static void
3772a991 1070__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1071{
2e0fef85 1072 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1073
0e9bb8d7
JS
1074 /*
1075 ** This should have been removed from the txcmplq before calling
1076 ** iocbq_release. The normal completion
1077 ** path should have already done the list_del_init.
1078 */
1079 if (unlikely(!list_empty(&iocbq->list)))
1080 list_del_init(&iocbq->list);
1081
604a3e30
JB
1082 /*
1083 * Clean all volatile data fields, preserve iotag and node struct.
1084 */
1085 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1086 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1087 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1088}
1089
3772a991
JS
1090/**
1091 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1092 * @phba: Pointer to HBA context object.
1093 * @iocbq: Pointer to driver iocb object.
1094 *
1095 * This function is called with hbalock held to release driver
1096 * iocb object to the iocb pool. The iotag in the iocb object
1097 * does not change for each use of the iocb object. This function
1098 * clears all other fields of the iocb object when it is freed.
1099 **/
1100static void
1101__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1102{
1103 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1104 phba->iocb_cnt--;
3772a991
JS
1105}
1106
e59058c4 1107/**
3621a710 1108 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1109 * @phba: Pointer to HBA context object.
1110 * @iocbq: Pointer to driver iocb object.
1111 *
1112 * This function is called with no lock held to release the iocb to
1113 * iocb pool.
1114 **/
2e0fef85
JS
1115void
1116lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1117{
1118 unsigned long iflags;
1119
1120 /*
1121 * Clean all volatile data fields, preserve iotag and node struct.
1122 */
1123 spin_lock_irqsave(&phba->hbalock, iflags);
1124 __lpfc_sli_release_iocbq(phba, iocbq);
1125 spin_unlock_irqrestore(&phba->hbalock, iflags);
1126}
1127
a257bf90
JS
1128/**
1129 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1130 * @phba: Pointer to HBA context object.
1131 * @iocblist: List of IOCBs.
1132 * @ulpstatus: ULP status in IOCB command field.
1133 * @ulpWord4: ULP word-4 in IOCB command field.
1134 *
1135 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1136 * on the list by invoking the complete callback function associated with the
1137 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1138 * fields.
1139 **/
1140void
1141lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1142 uint32_t ulpstatus, uint32_t ulpWord4)
1143{
1144 struct lpfc_iocbq *piocb;
1145
1146 while (!list_empty(iocblist)) {
1147 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
a257bf90
JS
1148 if (!piocb->iocb_cmpl)
1149 lpfc_sli_release_iocbq(phba, piocb);
1150 else {
1151 piocb->iocb.ulpStatus = ulpstatus;
1152 piocb->iocb.un.ulpWord[4] = ulpWord4;
1153 (piocb->iocb_cmpl) (phba, piocb, piocb);
1154 }
1155 }
1156 return;
1157}
1158
e59058c4 1159/**
3621a710
JS
1160 * lpfc_sli_iocb_cmd_type - Get the iocb type
1161 * @iocb_cmnd: iocb command code.
e59058c4
JS
1162 *
1163 * This function is called by ring event handler function to get the iocb type.
1164 * This function translates the iocb command to an iocb command type used to
1165 * decide the final disposition of each completed IOCB.
1166 * The function returns
1167 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1168 * LPFC_SOL_IOCB if it is a solicited iocb completion
1169 * LPFC_ABORT_IOCB if it is an abort iocb
1170 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1171 *
1172 * The caller is not required to hold any lock.
1173 **/
dea3101e 1174static lpfc_iocb_type
1175lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1176{
1177 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1178
1179 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1180 return 0;
1181
1182 switch (iocb_cmnd) {
1183 case CMD_XMIT_SEQUENCE_CR:
1184 case CMD_XMIT_SEQUENCE_CX:
1185 case CMD_XMIT_BCAST_CN:
1186 case CMD_XMIT_BCAST_CX:
1187 case CMD_ELS_REQUEST_CR:
1188 case CMD_ELS_REQUEST_CX:
1189 case CMD_CREATE_XRI_CR:
1190 case CMD_CREATE_XRI_CX:
1191 case CMD_GET_RPI_CN:
1192 case CMD_XMIT_ELS_RSP_CX:
1193 case CMD_GET_RPI_CR:
1194 case CMD_FCP_IWRITE_CR:
1195 case CMD_FCP_IWRITE_CX:
1196 case CMD_FCP_IREAD_CR:
1197 case CMD_FCP_IREAD_CX:
1198 case CMD_FCP_ICMND_CR:
1199 case CMD_FCP_ICMND_CX:
f5603511
JS
1200 case CMD_FCP_TSEND_CX:
1201 case CMD_FCP_TRSP_CX:
1202 case CMD_FCP_TRECEIVE_CX:
1203 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1204 case CMD_ADAPTER_MSG:
1205 case CMD_ADAPTER_DUMP:
1206 case CMD_XMIT_SEQUENCE64_CR:
1207 case CMD_XMIT_SEQUENCE64_CX:
1208 case CMD_XMIT_BCAST64_CN:
1209 case CMD_XMIT_BCAST64_CX:
1210 case CMD_ELS_REQUEST64_CR:
1211 case CMD_ELS_REQUEST64_CX:
1212 case CMD_FCP_IWRITE64_CR:
1213 case CMD_FCP_IWRITE64_CX:
1214 case CMD_FCP_IREAD64_CR:
1215 case CMD_FCP_IREAD64_CX:
1216 case CMD_FCP_ICMND64_CR:
1217 case CMD_FCP_ICMND64_CX:
f5603511
JS
1218 case CMD_FCP_TSEND64_CX:
1219 case CMD_FCP_TRSP64_CX:
1220 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1221 case CMD_GEN_REQUEST64_CR:
1222 case CMD_GEN_REQUEST64_CX:
1223 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1224 case DSSCMD_IWRITE64_CR:
1225 case DSSCMD_IWRITE64_CX:
1226 case DSSCMD_IREAD64_CR:
1227 case DSSCMD_IREAD64_CX:
dea3101e 1228 type = LPFC_SOL_IOCB;
1229 break;
1230 case CMD_ABORT_XRI_CN:
1231 case CMD_ABORT_XRI_CX:
1232 case CMD_CLOSE_XRI_CN:
1233 case CMD_CLOSE_XRI_CX:
1234 case CMD_XRI_ABORTED_CX:
1235 case CMD_ABORT_MXRI64_CN:
6669f9bb 1236 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1237 type = LPFC_ABORT_IOCB;
1238 break;
1239 case CMD_RCV_SEQUENCE_CX:
1240 case CMD_RCV_ELS_REQ_CX:
1241 case CMD_RCV_SEQUENCE64_CX:
1242 case CMD_RCV_ELS_REQ64_CX:
57127f15 1243 case CMD_ASYNC_STATUS:
ed957684
JS
1244 case CMD_IOCB_RCV_SEQ64_CX:
1245 case CMD_IOCB_RCV_ELS64_CX:
1246 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1247 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1248 type = LPFC_UNSOL_IOCB;
1249 break;
3163f725
JS
1250 case CMD_IOCB_XMIT_MSEQ64_CR:
1251 case CMD_IOCB_XMIT_MSEQ64_CX:
1252 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1253 case CMD_IOCB_RCV_ELS_LIST64_CX:
1254 case CMD_IOCB_CLOSE_EXTENDED_CN:
1255 case CMD_IOCB_ABORT_EXTENDED_CN:
1256 case CMD_IOCB_RET_HBQE64_CN:
1257 case CMD_IOCB_FCP_IBIDIR64_CR:
1258 case CMD_IOCB_FCP_IBIDIR64_CX:
1259 case CMD_IOCB_FCP_ITASKMGT64_CX:
1260 case CMD_IOCB_LOGENTRY_CN:
1261 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1262 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1263 __func__, iocb_cmnd);
3163f725
JS
1264 type = LPFC_UNKNOWN_IOCB;
1265 break;
dea3101e 1266 default:
1267 type = LPFC_UNKNOWN_IOCB;
1268 break;
1269 }
1270
1271 return type;
1272}
1273
e59058c4 1274/**
3621a710 1275 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1276 * @phba: Pointer to HBA context object.
1277 *
1278 * This function is called from SLI initialization code
1279 * to configure every ring of the HBA's SLI interface. The
1280 * caller is not required to hold any lock. This function issues
1281 * a config_ring mailbox command for each ring.
1282 * This function returns zero if successful else returns a negative
1283 * error code.
1284 **/
dea3101e 1285static int
ed957684 1286lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1287{
1288 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1289 LPFC_MBOXQ_t *pmb;
1290 MAILBOX_t *pmbox;
1291 int i, rc, ret = 0;
dea3101e 1292
ed957684
JS
1293 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1294 if (!pmb)
1295 return -ENOMEM;
04c68496 1296 pmbox = &pmb->u.mb;
ed957684 1297 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1298 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1299 lpfc_config_ring(phba, i, pmb);
1300 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1301 if (rc != MBX_SUCCESS) {
92d7f7b0 1302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1303 "0446 Adapter failed to init (%d), "
dea3101e 1304 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1305 "ring %d\n",
e8b62011
JS
1306 rc, pmbox->mbxCommand,
1307 pmbox->mbxStatus, i);
2e0fef85 1308 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1309 ret = -ENXIO;
1310 break;
dea3101e 1311 }
1312 }
ed957684
JS
1313 mempool_free(pmb, phba->mbox_mem_pool);
1314 return ret;
dea3101e 1315}
1316
e59058c4 1317/**
3621a710 1318 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1319 * @phba: Pointer to HBA context object.
1320 * @pring: Pointer to driver SLI ring object.
1321 * @piocb: Pointer to the driver iocb object.
1322 *
1323 * This function is called with hbalock held. The function adds the
1324 * new iocb to txcmplq of the given ring. This function always returns
1325 * 0. If this function is called for ELS ring, this function checks if
1326 * there is a vport associated with the ELS command. This function also
1327 * starts els_tmofunc timer if this is an ELS command.
1328 **/
dea3101e 1329static int
2e0fef85
JS
1330lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1331 struct lpfc_iocbq *piocb)
dea3101e 1332{
dea3101e 1333 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1334 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 1335
92d7f7b0
JS
1336 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1337 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1338 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1339 if (!piocb->vport)
1340 BUG();
1341 else
1342 mod_timer(&piocb->vport->els_tmofunc,
256ec0d0
JS
1343 jiffies +
1344 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
92d7f7b0
JS
1345 }
1346
dea3101e 1347
2e0fef85 1348 return 0;
dea3101e 1349}
1350
e59058c4 1351/**
3621a710 1352 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1353 * @phba: Pointer to HBA context object.
1354 * @pring: Pointer to driver SLI ring object.
1355 *
1356 * This function is called with hbalock held to get next
1357 * iocb in txq of the given ring. If there is any iocb in
1358 * the txq, the function returns first iocb in the list after
1359 * removing the iocb from the list, else it returns NULL.
1360 **/
2a9bf3d0 1361struct lpfc_iocbq *
2e0fef85 1362lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1363{
dea3101e 1364 struct lpfc_iocbq *cmd_iocb;
1365
858c9f6c 1366 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1367 return cmd_iocb;
dea3101e 1368}
1369
e59058c4 1370/**
3621a710 1371 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1372 * @phba: Pointer to HBA context object.
1373 * @pring: Pointer to driver SLI ring object.
1374 *
1375 * This function is called with hbalock held and the caller must post the
1376 * iocb without releasing the lock. If the caller releases the lock,
1377 * iocb slot returned by the function is not guaranteed to be available.
1378 * The function returns pointer to the next available iocb slot if there
1379 * is available slot in the ring, else it returns NULL.
1380 * If the get index of the ring is ahead of the put index, the function
1381 * will post an error attention event to the worker thread to take the
1382 * HBA to offline state.
1383 **/
dea3101e 1384static IOCB_t *
1385lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1386{
34b02dcd 1387 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25
JS
1388 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1389 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1390 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1391 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1392
7e56aa25
JS
1393 if (unlikely(pring->sli.sli3.local_getidx ==
1394 pring->sli.sli3.next_cmdidx)) {
dea3101e 1395
7e56aa25 1396 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1397
7e56aa25 1398 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1399 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1400 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1401 "is bigger than cmd ring %d\n",
e8b62011 1402 pring->ringno,
7e56aa25
JS
1403 pring->sli.sli3.local_getidx,
1404 max_cmd_idx);
dea3101e 1405
2e0fef85 1406 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1407 /*
1408 * All error attention handlers are posted to
1409 * worker thread
1410 */
1411 phba->work_ha |= HA_ERATT;
1412 phba->work_hs = HS_FFER3;
92d7f7b0 1413
5e9d9b82 1414 lpfc_worker_wake_up(phba);
dea3101e 1415
1416 return NULL;
1417 }
1418
7e56aa25 1419 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1420 return NULL;
1421 }
1422
ed957684 1423 return lpfc_cmd_iocb(phba, pring);
dea3101e 1424}
1425
e59058c4 1426/**
3621a710 1427 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1428 * @phba: Pointer to HBA context object.
1429 * @iocbq: Pointer to driver iocb object.
1430 *
1431 * This function gets an iotag for the iocb. If there is no unused iotag and
1432 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1433 * array and assigns a new iotag.
1434 * The function returns the allocated iotag if successful, else returns zero.
1435 * Zero is not a valid iotag.
1436 * The caller is not required to hold any lock.
1437 **/
604a3e30 1438uint16_t
2e0fef85 1439lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1440{
2e0fef85
JS
1441 struct lpfc_iocbq **new_arr;
1442 struct lpfc_iocbq **old_arr;
604a3e30
JB
1443 size_t new_len;
1444 struct lpfc_sli *psli = &phba->sli;
1445 uint16_t iotag;
dea3101e 1446
2e0fef85 1447 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1448 iotag = psli->last_iotag;
1449 if(++iotag < psli->iocbq_lookup_len) {
1450 psli->last_iotag = iotag;
1451 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1452 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1453 iocbq->iotag = iotag;
1454 return iotag;
2e0fef85 1455 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1456 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1457 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
1458 spin_unlock_irq(&phba->hbalock);
1459 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1460 GFP_KERNEL);
1461 if (new_arr) {
2e0fef85 1462 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1463 old_arr = psli->iocbq_lookup;
1464 if (new_len <= psli->iocbq_lookup_len) {
1465 /* highly unprobable case */
1466 kfree(new_arr);
1467 iotag = psli->last_iotag;
1468 if(++iotag < psli->iocbq_lookup_len) {
1469 psli->last_iotag = iotag;
1470 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1471 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1472 iocbq->iotag = iotag;
1473 return iotag;
1474 }
2e0fef85 1475 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1476 return 0;
1477 }
1478 if (psli->iocbq_lookup)
1479 memcpy(new_arr, old_arr,
1480 ((psli->last_iotag + 1) *
311464ec 1481 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1482 psli->iocbq_lookup = new_arr;
1483 psli->iocbq_lookup_len = new_len;
1484 psli->last_iotag = iotag;
1485 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1486 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1487 iocbq->iotag = iotag;
1488 kfree(old_arr);
1489 return iotag;
1490 }
8f6d98d2 1491 } else
2e0fef85 1492 spin_unlock_irq(&phba->hbalock);
dea3101e 1493
bc73905a 1494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1495 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1496 psli->last_iotag);
dea3101e 1497
604a3e30 1498 return 0;
dea3101e 1499}
1500
e59058c4 1501/**
3621a710 1502 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1503 * @phba: Pointer to HBA context object.
1504 * @pring: Pointer to driver SLI ring object.
1505 * @iocb: Pointer to iocb slot in the ring.
1506 * @nextiocb: Pointer to driver iocb object which need to be
1507 * posted to firmware.
1508 *
1509 * This function is called with hbalock held to post a new iocb to
1510 * the firmware. This function copies the new iocb to ring iocb slot and
1511 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1512 * a completion call back for this iocb else the function will free the
1513 * iocb object.
1514 **/
dea3101e 1515static void
1516lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1517 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1518{
1519 /*
604a3e30 1520 * Set up an iotag
dea3101e 1521 */
604a3e30 1522 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1523
e2a0a9d6 1524
a58cbd52
JS
1525 if (pring->ringno == LPFC_ELS_RING) {
1526 lpfc_debugfs_slow_ring_trc(phba,
1527 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1528 *(((uint32_t *) &nextiocb->iocb) + 4),
1529 *(((uint32_t *) &nextiocb->iocb) + 6),
1530 *(((uint32_t *) &nextiocb->iocb) + 7));
1531 }
1532
dea3101e 1533 /*
1534 * Issue iocb command to adapter
1535 */
92d7f7b0 1536 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1537 wmb();
1538 pring->stats.iocb_cmd++;
1539
1540 /*
1541 * If there is no completion routine to call, we can release the
1542 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1543 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1544 */
1545 if (nextiocb->iocb_cmpl)
1546 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1547 else
2e0fef85 1548 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1549
1550 /*
1551 * Let the HBA know what IOCB slot will be the next one the
1552 * driver will put a command into.
1553 */
7e56aa25
JS
1554 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1555 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1556}
1557
e59058c4 1558/**
3621a710 1559 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1560 * @phba: Pointer to HBA context object.
1561 * @pring: Pointer to driver SLI ring object.
1562 *
1563 * The caller is not required to hold any lock for calling this function.
1564 * This function updates the chip attention bits for the ring to inform firmware
1565 * that there are pending work to be done for this ring and requests an
1566 * interrupt when there is space available in the ring. This function is
1567 * called when the driver is unable to post more iocbs to the ring due
1568 * to unavailability of space in the ring.
1569 **/
dea3101e 1570static void
2e0fef85 1571lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1572{
1573 int ringno = pring->ringno;
1574
1575 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1576
1577 wmb();
1578
1579 /*
1580 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1581 * The HBA will tell us when an IOCB entry is available.
1582 */
1583 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1584 readl(phba->CAregaddr); /* flush */
1585
1586 pring->stats.iocb_cmd_full++;
1587}
1588
e59058c4 1589/**
3621a710 1590 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1591 * @phba: Pointer to HBA context object.
1592 * @pring: Pointer to driver SLI ring object.
1593 *
1594 * This function updates the chip attention register bit for the
1595 * given ring to inform HBA that there is more work to be done
1596 * in this ring. The caller is not required to hold any lock.
1597 **/
dea3101e 1598static void
2e0fef85 1599lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1600{
1601 int ringno = pring->ringno;
1602
1603 /*
1604 * Tell the HBA that there is work to do in this ring.
1605 */
34b02dcd
JS
1606 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1607 wmb();
1608 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1609 readl(phba->CAregaddr); /* flush */
1610 }
dea3101e 1611}
1612
e59058c4 1613/**
3621a710 1614 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1615 * @phba: Pointer to HBA context object.
1616 * @pring: Pointer to driver SLI ring object.
1617 *
1618 * This function is called with hbalock held to post pending iocbs
1619 * in the txq to the firmware. This function is called when driver
1620 * detects space available in the ring.
1621 **/
dea3101e 1622static void
2e0fef85 1623lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1624{
1625 IOCB_t *iocb;
1626 struct lpfc_iocbq *nextiocb;
1627
1628 /*
1629 * Check to see if:
1630 * (a) there is anything on the txq to send
1631 * (b) link is up
1632 * (c) link attention events can be processed (fcp ring only)
1633 * (d) IOCB processing is not blocked by the outstanding mbox command.
1634 */
0e9bb8d7
JS
1635
1636 if (lpfc_is_link_up(phba) &&
1637 (!list_empty(&pring->txq)) &&
dea3101e 1638 (pring->ringno != phba->sli.fcp_ring ||
0b727fea 1639 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1640
1641 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1642 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1643 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1644
1645 if (iocb)
1646 lpfc_sli_update_ring(phba, pring);
1647 else
1648 lpfc_sli_update_full_ring(phba, pring);
1649 }
1650
1651 return;
1652}
1653
e59058c4 1654/**
3621a710 1655 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1656 * @phba: Pointer to HBA context object.
1657 * @hbqno: HBQ number.
1658 *
1659 * This function is called with hbalock held to get the next
1660 * available slot for the given HBQ. If there is free slot
1661 * available for the HBQ it will return pointer to the next available
1662 * HBQ entry else it will return NULL.
1663 **/
a6ababd2 1664static struct lpfc_hbq_entry *
ed957684
JS
1665lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1666{
1667 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1668
1669 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1670 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1671 hbqp->next_hbqPutIdx = 0;
1672
1673 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1674 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1675 uint32_t getidx = le32_to_cpu(raw_index);
1676
1677 hbqp->local_hbqGetIdx = getidx;
1678
1679 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1680 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1681 LOG_SLI | LOG_VPORT,
e8b62011 1682 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1683 "%u is > than hbqp->entry_count %u\n",
e8b62011 1684 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1685 hbqp->entry_count);
1686
1687 phba->link_state = LPFC_HBA_ERROR;
1688 return NULL;
1689 }
1690
1691 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1692 return NULL;
1693 }
1694
51ef4c26
JS
1695 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1696 hbqp->hbqPutIdx;
ed957684
JS
1697}
1698
e59058c4 1699/**
3621a710 1700 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1701 * @phba: Pointer to HBA context object.
1702 *
1703 * This function is called with no lock held to free all the
1704 * hbq buffers while uninitializing the SLI interface. It also
1705 * frees the HBQ buffers returned by the firmware but not yet
1706 * processed by the upper layers.
1707 **/
ed957684
JS
1708void
1709lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1710{
92d7f7b0
JS
1711 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1712 struct hbq_dmabuf *hbq_buf;
3163f725 1713 unsigned long flags;
51ef4c26 1714 int i, hbq_count;
3163f725 1715 uint32_t hbqno;
ed957684 1716
51ef4c26 1717 hbq_count = lpfc_sli_hbq_count();
ed957684 1718 /* Return all memory used by all HBQs */
3163f725 1719 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1720 for (i = 0; i < hbq_count; ++i) {
1721 list_for_each_entry_safe(dmabuf, next_dmabuf,
1722 &phba->hbqs[i].hbq_buffer_list, list) {
1723 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1724 list_del(&hbq_buf->dbuf.list);
1725 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1726 }
a8adb832 1727 phba->hbqs[i].buffer_count = 0;
ed957684 1728 }
3163f725 1729 /* Return all HBQ buffer that are in-fly */
3772a991
JS
1730 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1731 list) {
3163f725
JS
1732 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1733 list_del(&hbq_buf->dbuf.list);
1734 if (hbq_buf->tag == -1) {
1735 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1736 (phba, hbq_buf);
1737 } else {
1738 hbqno = hbq_buf->tag >> 16;
1739 if (hbqno >= LPFC_MAX_HBQS)
1740 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1741 (phba, hbq_buf);
1742 else
1743 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1744 hbq_buf);
1745 }
1746 }
1747
1748 /* Mark the HBQs not in use */
1749 phba->hbq_in_use = 0;
1750 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1751}
1752
e59058c4 1753/**
3621a710 1754 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1755 * @phba: Pointer to HBA context object.
1756 * @hbqno: HBQ number.
1757 * @hbq_buf: Pointer to HBQ buffer.
1758 *
1759 * This function is called with the hbalock held to post a
1760 * hbq buffer to the firmware. If the function finds an empty
1761 * slot in the HBQ, it will post the buffer. The function will return
1762 * pointer to the hbq entry if it successfully post the buffer
1763 * else it will return NULL.
1764 **/
3772a991 1765static int
ed957684 1766lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1767 struct hbq_dmabuf *hbq_buf)
3772a991
JS
1768{
1769 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1770}
1771
1772/**
1773 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1774 * @phba: Pointer to HBA context object.
1775 * @hbqno: HBQ number.
1776 * @hbq_buf: Pointer to HBQ buffer.
1777 *
1778 * This function is called with the hbalock held to post a hbq buffer to the
1779 * firmware. If the function finds an empty slot in the HBQ, it will post the
1780 * buffer and place it on the hbq_buffer_list. The function will return zero if
1781 * it successfully post the buffer else it will return an error.
1782 **/
1783static int
1784lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1785 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1786{
1787 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1788 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
1789
1790 /* Get next HBQ entry slot to use */
1791 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1792 if (hbqe) {
1793 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1794
92d7f7b0
JS
1795 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1796 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 1797 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 1798 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1799 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1800 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1801 /* Sync SLIM */
ed957684
JS
1802 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1803 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1804 /* flush */
ed957684 1805 readl(phba->hbq_put + hbqno);
51ef4c26 1806 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1807 return 0;
1808 } else
1809 return -ENOMEM;
ed957684
JS
1810}
1811
4f774513
JS
1812/**
1813 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1814 * @phba: Pointer to HBA context object.
1815 * @hbqno: HBQ number.
1816 * @hbq_buf: Pointer to HBQ buffer.
1817 *
1818 * This function is called with the hbalock held to post an RQE to the SLI4
1819 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1820 * the hbq_buffer_list and return zero, otherwise it will return an error.
1821 **/
1822static int
1823lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1824 struct hbq_dmabuf *hbq_buf)
1825{
1826 int rc;
1827 struct lpfc_rqe hrqe;
1828 struct lpfc_rqe drqe;
1829
1830 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1831 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1832 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1833 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1834 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1835 &hrqe, &drqe);
1836 if (rc < 0)
1837 return rc;
1838 hbq_buf->tag = rc;
1839 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1840 return 0;
1841}
1842
e59058c4 1843/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1844static struct lpfc_hbq_init lpfc_els_hbq = {
1845 .rn = 1,
def9c7a9 1846 .entry_count = 256,
92d7f7b0
JS
1847 .mask_count = 0,
1848 .profile = 0,
51ef4c26 1849 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1850 .buffer_count = 0,
a257bf90
JS
1851 .init_count = 40,
1852 .add_count = 40,
92d7f7b0 1853};
ed957684 1854
e59058c4 1855/* HBQ for the extra ring if needed */
51ef4c26
JS
1856static struct lpfc_hbq_init lpfc_extra_hbq = {
1857 .rn = 1,
1858 .entry_count = 200,
1859 .mask_count = 0,
1860 .profile = 0,
1861 .ring_mask = (1 << LPFC_EXTRA_RING),
1862 .buffer_count = 0,
1863 .init_count = 0,
1864 .add_count = 5,
1865};
1866
e59058c4 1867/* Array of HBQs */
78b2d852 1868struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 1869 &lpfc_els_hbq,
51ef4c26 1870 &lpfc_extra_hbq,
92d7f7b0 1871};
ed957684 1872
e59058c4 1873/**
3621a710 1874 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1875 * @phba: Pointer to HBA context object.
1876 * @hbqno: HBQ number.
1877 * @count: Number of HBQ buffers to be posted.
1878 *
d7c255b2
JS
1879 * This function is called with no lock held to post more hbq buffers to the
1880 * given HBQ. The function returns the number of HBQ buffers successfully
1881 * posted.
e59058c4 1882 **/
311464ec 1883static int
92d7f7b0 1884lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1885{
d7c255b2 1886 uint32_t i, posted = 0;
3163f725 1887 unsigned long flags;
92d7f7b0 1888 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1889 LIST_HEAD(hbq_buf_list);
eafe1df9 1890 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1891 return 0;
51ef4c26 1892
d7c255b2
JS
1893 if ((phba->hbqs[hbqno].buffer_count + count) >
1894 lpfc_hbq_defs[hbqno]->entry_count)
1895 count = lpfc_hbq_defs[hbqno]->entry_count -
1896 phba->hbqs[hbqno].buffer_count;
1897 if (!count)
1898 return 0;
1899 /* Allocate HBQ entries */
1900 for (i = 0; i < count; i++) {
1901 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1902 if (!hbq_buffer)
1903 break;
1904 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1905 }
3163f725
JS
1906 /* Check whether HBQ is still in use */
1907 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1908 if (!phba->hbq_in_use)
d7c255b2
JS
1909 goto err;
1910 while (!list_empty(&hbq_buf_list)) {
1911 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1912 dbuf.list);
1913 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1914 (hbqno << 16));
3772a991 1915 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1916 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1917 posted++;
1918 } else
51ef4c26 1919 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1920 }
3163f725 1921 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1922 return posted;
1923err:
eafe1df9 1924 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1925 while (!list_empty(&hbq_buf_list)) {
1926 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1927 dbuf.list);
1928 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1929 }
1930 return 0;
ed957684
JS
1931}
1932
e59058c4 1933/**
3621a710 1934 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1935 * @phba: Pointer to HBA context object.
1936 * @qno: HBQ number.
1937 *
1938 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1939 * is called with no lock held. The function returns the number of HBQ entries
1940 * successfully allocated.
e59058c4 1941 **/
92d7f7b0
JS
1942int
1943lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1944{
def9c7a9
JS
1945 if (phba->sli_rev == LPFC_SLI_REV4)
1946 return 0;
1947 else
1948 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1949 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1950}
ed957684 1951
e59058c4 1952/**
3621a710 1953 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1954 * @phba: Pointer to HBA context object.
1955 * @qno: HBQ queue number.
1956 *
1957 * This function is called from SLI initialization code path with
1958 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 1959 * function returns the number of HBQ entries successfully allocated.
e59058c4 1960 **/
a6ababd2 1961static int
92d7f7b0
JS
1962lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1963{
def9c7a9
JS
1964 if (phba->sli_rev == LPFC_SLI_REV4)
1965 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 1966 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
1967 else
1968 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1969 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
1970}
1971
3772a991
JS
1972/**
1973 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1974 * @phba: Pointer to HBA context object.
1975 * @hbqno: HBQ number.
1976 *
1977 * This function removes the first hbq buffer on an hbq list and returns a
1978 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1979 **/
1980static struct hbq_dmabuf *
1981lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1982{
1983 struct lpfc_dmabuf *d_buf;
1984
1985 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1986 if (!d_buf)
1987 return NULL;
1988 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1989}
1990
e59058c4 1991/**
3621a710 1992 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
1993 * @phba: Pointer to HBA context object.
1994 * @tag: Tag of the hbq buffer.
1995 *
1996 * This function is called with hbalock held. This function searches
1997 * for the hbq buffer associated with the given tag in the hbq buffer
1998 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1999 * it returns NULL.
2000 **/
a6ababd2 2001static struct hbq_dmabuf *
92d7f7b0 2002lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2003{
92d7f7b0
JS
2004 struct lpfc_dmabuf *d_buf;
2005 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2006 uint32_t hbqno;
2007
2008 hbqno = tag >> 16;
a0a74e45 2009 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2010 return NULL;
ed957684 2011
3772a991 2012 spin_lock_irq(&phba->hbalock);
51ef4c26 2013 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2014 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2015 if (hbq_buf->tag == tag) {
3772a991 2016 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2017 return hbq_buf;
ed957684
JS
2018 }
2019 }
3772a991 2020 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2021 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2022 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2023 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2024 return NULL;
ed957684
JS
2025}
2026
e59058c4 2027/**
3621a710 2028 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2029 * @phba: Pointer to HBA context object.
2030 * @hbq_buffer: Pointer to HBQ buffer.
2031 *
2032 * This function is called with hbalock. This function gives back
2033 * the hbq buffer to firmware. If the HBQ does not have space to
2034 * post the buffer, it will free the buffer.
2035 **/
ed957684 2036void
51ef4c26 2037lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2038{
2039 uint32_t hbqno;
2040
51ef4c26
JS
2041 if (hbq_buffer) {
2042 hbqno = hbq_buffer->tag >> 16;
3772a991 2043 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2044 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2045 }
2046}
2047
e59058c4 2048/**
3621a710 2049 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2050 * @mbxCommand: mailbox command code.
2051 *
2052 * This function is called by the mailbox event handler function to verify
2053 * that the completed mailbox command is a legitimate mailbox command. If the
2054 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2055 * and the mailbox event handler will take the HBA offline.
2056 **/
dea3101e 2057static int
2058lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2059{
2060 uint8_t ret;
2061
2062 switch (mbxCommand) {
2063 case MBX_LOAD_SM:
2064 case MBX_READ_NV:
2065 case MBX_WRITE_NV:
a8adb832 2066 case MBX_WRITE_VPARMS:
dea3101e 2067 case MBX_RUN_BIU_DIAG:
2068 case MBX_INIT_LINK:
2069 case MBX_DOWN_LINK:
2070 case MBX_CONFIG_LINK:
2071 case MBX_CONFIG_RING:
2072 case MBX_RESET_RING:
2073 case MBX_READ_CONFIG:
2074 case MBX_READ_RCONFIG:
2075 case MBX_READ_SPARM:
2076 case MBX_READ_STATUS:
2077 case MBX_READ_RPI:
2078 case MBX_READ_XRI:
2079 case MBX_READ_REV:
2080 case MBX_READ_LNK_STAT:
2081 case MBX_REG_LOGIN:
2082 case MBX_UNREG_LOGIN:
dea3101e 2083 case MBX_CLEAR_LA:
2084 case MBX_DUMP_MEMORY:
2085 case MBX_DUMP_CONTEXT:
2086 case MBX_RUN_DIAGS:
2087 case MBX_RESTART:
2088 case MBX_UPDATE_CFG:
2089 case MBX_DOWN_LOAD:
2090 case MBX_DEL_LD_ENTRY:
2091 case MBX_RUN_PROGRAM:
2092 case MBX_SET_MASK:
09372820 2093 case MBX_SET_VARIABLE:
dea3101e 2094 case MBX_UNREG_D_ID:
41415862 2095 case MBX_KILL_BOARD:
dea3101e 2096 case MBX_CONFIG_FARP:
41415862 2097 case MBX_BEACON:
dea3101e 2098 case MBX_LOAD_AREA:
2099 case MBX_RUN_BIU_DIAG64:
2100 case MBX_CONFIG_PORT:
2101 case MBX_READ_SPARM64:
2102 case MBX_READ_RPI64:
2103 case MBX_REG_LOGIN64:
76a95d75 2104 case MBX_READ_TOPOLOGY:
09372820 2105 case MBX_WRITE_WWN:
dea3101e 2106 case MBX_SET_DEBUG:
2107 case MBX_LOAD_EXP_ROM:
57127f15 2108 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2109 case MBX_REG_VPI:
2110 case MBX_UNREG_VPI:
858c9f6c 2111 case MBX_HEARTBEAT:
84774a4d
JS
2112 case MBX_PORT_CAPABILITIES:
2113 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2114 case MBX_SLI4_CONFIG:
2115 case MBX_SLI4_REQ_FTRS:
2116 case MBX_REG_FCFI:
2117 case MBX_UNREG_FCFI:
2118 case MBX_REG_VFI:
2119 case MBX_UNREG_VFI:
2120 case MBX_INIT_VPI:
2121 case MBX_INIT_VFI:
2122 case MBX_RESUME_RPI:
c7495937
JS
2123 case MBX_READ_EVENT_LOG_STATUS:
2124 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2125 case MBX_SECURITY_MGMT:
2126 case MBX_AUTH_PORT:
940eb687 2127 case MBX_ACCESS_VDATA:
dea3101e 2128 ret = mbxCommand;
2129 break;
2130 default:
2131 ret = MBX_SHUTDOWN;
2132 break;
2133 }
2e0fef85 2134 return ret;
dea3101e 2135}
e59058c4
JS
2136
2137/**
3621a710 2138 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2139 * @phba: Pointer to HBA context object.
2140 * @pmboxq: Pointer to mailbox command.
2141 *
2142 * This is completion handler function for mailbox commands issued from
2143 * lpfc_sli_issue_mbox_wait function. This function is called by the
2144 * mailbox event handler function with no lock held. This function
2145 * will wake up thread waiting on the wait queue pointed by context1
2146 * of the mailbox.
2147 **/
04c68496 2148void
2e0fef85 2149lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2150{
2151 wait_queue_head_t *pdone_q;
858c9f6c 2152 unsigned long drvr_flag;
dea3101e 2153
2154 /*
2155 * If pdone_q is empty, the driver thread gave up waiting and
2156 * continued running.
2157 */
7054a606 2158 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2159 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e 2160 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2161 if (pdone_q)
2162 wake_up_interruptible(pdone_q);
858c9f6c 2163 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2164 return;
2165}
2166
e59058c4
JS
2167
2168/**
3621a710 2169 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2170 * @phba: Pointer to HBA context object.
2171 * @pmb: Pointer to mailbox object.
2172 *
2173 * This function is the default mailbox completion handler. It
2174 * frees the memory resources associated with the completed mailbox
2175 * command. If the completed command is a REG_LOGIN mailbox command,
2176 * this function will issue a UREG_LOGIN to re-claim the RPI.
2177 **/
dea3101e 2178void
2e0fef85 2179lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2180{
d439d286 2181 struct lpfc_vport *vport = pmb->vport;
dea3101e 2182 struct lpfc_dmabuf *mp;
d439d286 2183 struct lpfc_nodelist *ndlp;
5af5eee7 2184 struct Scsi_Host *shost;
04c68496 2185 uint16_t rpi, vpi;
7054a606
JS
2186 int rc;
2187
dea3101e 2188 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 2189
dea3101e 2190 if (mp) {
2191 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2192 kfree(mp);
2193 }
7054a606
JS
2194
2195 /*
2196 * If a REG_LOGIN succeeded after node is destroyed or node
2197 * is in re-discovery driver need to cleanup the RPI.
2198 */
2e0fef85 2199 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2200 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2201 !pmb->u.mb.mbxStatus) {
2202 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2203 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2204 lpfc_unreg_login(phba, vpi, rpi, pmb);
92d7f7b0 2205 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2206 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2207 if (rc != MBX_NOT_FINISHED)
2208 return;
2209 }
2210
695a814e
JS
2211 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2212 !(phba->pport->load_flag & FC_UNLOADING) &&
2213 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2214 shost = lpfc_shost_from_vport(vport);
2215 spin_lock_irq(shost->host_lock);
2216 vport->vpi_state |= LPFC_VPI_REGISTERED;
2217 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2218 spin_unlock_irq(shost->host_lock);
695a814e
JS
2219 }
2220
d439d286
JS
2221 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2222 ndlp = (struct lpfc_nodelist *)pmb->context2;
2223 lpfc_nlp_put(ndlp);
2224 pmb->context2 = NULL;
2225 }
2226
dcf2a4e0
JS
2227 /* Check security permission status on INIT_LINK mailbox command */
2228 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2229 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2230 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2231 "2860 SLI authentication is required "
2232 "for INIT_LINK but has not done yet\n");
2233
04c68496
JS
2234 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2235 lpfc_sli4_mbox_cmd_free(phba, pmb);
2236 else
2237 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2238}
2239
e59058c4 2240/**
3621a710 2241 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2242 * @phba: Pointer to HBA context object.
2243 *
2244 * This function is called with no lock held. This function processes all
2245 * the completed mailbox commands and gives it to upper layers. The interrupt
2246 * service routine processes mailbox completion interrupt and adds completed
2247 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2248 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2249 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2250 * function returns the mailbox commands to the upper layer by calling the
2251 * completion handler function of each mailbox.
2252 **/
dea3101e 2253int
2e0fef85 2254lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2255{
92d7f7b0 2256 MAILBOX_t *pmbox;
dea3101e 2257 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2258 int rc;
2259 LIST_HEAD(cmplq);
dea3101e 2260
2261 phba->sli.slistat.mbox_event++;
2262
92d7f7b0
JS
2263 /* Get all completed mailboxe buffers into the cmplq */
2264 spin_lock_irq(&phba->hbalock);
2265 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2266 spin_unlock_irq(&phba->hbalock);
dea3101e 2267
92d7f7b0
JS
2268 /* Get a Mailbox buffer to setup mailbox commands for callback */
2269 do {
2270 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2271 if (pmb == NULL)
2272 break;
2e0fef85 2273
04c68496 2274 pmbox = &pmb->u.mb;
dea3101e 2275
858c9f6c
JS
2276 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2277 if (pmb->vport) {
2278 lpfc_debugfs_disc_trc(pmb->vport,
2279 LPFC_DISC_TRC_MBOX_VPORT,
2280 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2281 (uint32_t)pmbox->mbxCommand,
2282 pmbox->un.varWords[0],
2283 pmbox->un.varWords[1]);
2284 }
2285 else {
2286 lpfc_debugfs_disc_trc(phba->pport,
2287 LPFC_DISC_TRC_MBOX,
2288 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2289 (uint32_t)pmbox->mbxCommand,
2290 pmbox->un.varWords[0],
2291 pmbox->un.varWords[1]);
2292 }
2293 }
2294
dea3101e 2295 /*
2296 * It is a fatal error if unknown mbox command completion.
2297 */
2298 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2299 MBX_SHUTDOWN) {
af901ca1 2300 /* Unknown mailbox command compl */
92d7f7b0 2301 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2302 "(%d):0323 Unknown Mailbox command "
a183a15f 2303 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2304 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2305 pmbox->mbxCommand,
a183a15f
JS
2306 lpfc_sli_config_mbox_subsys_get(phba,
2307 pmb),
2308 lpfc_sli_config_mbox_opcode_get(phba,
2309 pmb));
2e0fef85 2310 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2311 phba->work_hs = HS_FFER3;
2312 lpfc_handle_eratt(phba);
92d7f7b0 2313 continue;
dea3101e 2314 }
2315
dea3101e 2316 if (pmbox->mbxStatus) {
2317 phba->sli.slistat.mbox_stat_err++;
2318 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2319 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2320 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2321 LOG_MBOX | LOG_SLI,
2322 "(%d):0305 Mbox cmd cmpl "
2323 "error - RETRYing Data: x%x "
2324 "(x%x/x%x) x%x x%x x%x\n",
2325 pmb->vport ? pmb->vport->vpi : 0,
2326 pmbox->mbxCommand,
2327 lpfc_sli_config_mbox_subsys_get(phba,
2328 pmb),
2329 lpfc_sli_config_mbox_opcode_get(phba,
2330 pmb),
2331 pmbox->mbxStatus,
2332 pmbox->un.varWords[0],
2333 pmb->vport->port_state);
dea3101e 2334 pmbox->mbxStatus = 0;
2335 pmbox->mbxOwner = OWN_HOST;
dea3101e 2336 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2337 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2338 continue;
dea3101e 2339 }
2340 }
2341
2342 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2343 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2344 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
e74c03c8
JS
2345 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2346 "x%x x%x x%x\n",
92d7f7b0 2347 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2348 pmbox->mbxCommand,
a183a15f
JS
2349 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2350 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2351 pmb->mbox_cmpl,
2352 *((uint32_t *) pmbox),
2353 pmbox->un.varWords[0],
2354 pmbox->un.varWords[1],
2355 pmbox->un.varWords[2],
2356 pmbox->un.varWords[3],
2357 pmbox->un.varWords[4],
2358 pmbox->un.varWords[5],
2359 pmbox->un.varWords[6],
e74c03c8
JS
2360 pmbox->un.varWords[7],
2361 pmbox->un.varWords[8],
2362 pmbox->un.varWords[9],
2363 pmbox->un.varWords[10]);
dea3101e 2364
92d7f7b0 2365 if (pmb->mbox_cmpl)
dea3101e 2366 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2367 } while (1);
2368 return 0;
2369}
dea3101e 2370
e59058c4 2371/**
3621a710 2372 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2373 * @phba: Pointer to HBA context object.
2374 * @pring: Pointer to driver SLI ring object.
2375 * @tag: buffer tag.
2376 *
2377 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2378 * is set in the tag the buffer is posted for a particular exchange,
2379 * the function will return the buffer without replacing the buffer.
2380 * If the buffer is for unsolicited ELS or CT traffic, this function
2381 * returns the buffer and also posts another buffer to the firmware.
2382 **/
76bb24ef
JS
2383static struct lpfc_dmabuf *
2384lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2385 struct lpfc_sli_ring *pring,
2386 uint32_t tag)
76bb24ef 2387{
9f1e1b50
JS
2388 struct hbq_dmabuf *hbq_entry;
2389
76bb24ef
JS
2390 if (tag & QUE_BUFTAG_BIT)
2391 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2392 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2393 if (!hbq_entry)
2394 return NULL;
2395 return &hbq_entry->dbuf;
76bb24ef 2396}
57127f15 2397
3772a991
JS
2398/**
2399 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2400 * @phba: Pointer to HBA context object.
2401 * @pring: Pointer to driver SLI ring object.
2402 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2403 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2404 * @fch_type: the type for the first frame of the sequence.
2405 *
2406 * This function is called with no lock held. This function uses the r_ctl and
2407 * type of the received sequence to find the correct callback function to call
2408 * to process the sequence.
2409 **/
2410static int
2411lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2412 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2413 uint32_t fch_type)
2414{
2415 int i;
2416
2417 /* unSolicited Responses */
2418 if (pring->prt[0].profile) {
2419 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2420 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2421 saveq);
2422 return 1;
2423 }
2424 /* We must search, based on rctl / type
2425 for the right routine */
2426 for (i = 0; i < pring->num_mask; i++) {
2427 if ((pring->prt[i].rctl == fch_r_ctl) &&
2428 (pring->prt[i].type == fch_type)) {
2429 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2430 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2431 (phba, pring, saveq);
2432 return 1;
2433 }
2434 }
2435 return 0;
2436}
e59058c4
JS
2437
2438/**
3621a710 2439 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2440 * @phba: Pointer to HBA context object.
2441 * @pring: Pointer to driver SLI ring object.
2442 * @saveq: Pointer to the unsolicited iocb.
2443 *
2444 * This function is called with no lock held by the ring event handler
2445 * when there is an unsolicited iocb posted to the response ring by the
2446 * firmware. This function gets the buffer associated with the iocbs
2447 * and calls the event handler for the ring. This function handles both
2448 * qring buffers and hbq buffers.
2449 * When the function returns 1 the caller can free the iocb object otherwise
2450 * upper layer functions will free the iocb objects.
2451 **/
dea3101e 2452static int
2453lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2454 struct lpfc_iocbq *saveq)
2455{
2456 IOCB_t * irsp;
2457 WORD5 * w5p;
2458 uint32_t Rctl, Type;
3772a991 2459 uint32_t match;
76bb24ef 2460 struct lpfc_iocbq *iocbq;
3163f725 2461 struct lpfc_dmabuf *dmzbuf;
dea3101e 2462
2463 match = 0;
2464 irsp = &(saveq->iocb);
57127f15
JS
2465
2466 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2467 if (pring->lpfc_sli_rcv_async_status)
2468 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2469 else
2470 lpfc_printf_log(phba,
2471 KERN_WARNING,
2472 LOG_SLI,
2473 "0316 Ring %d handler: unexpected "
2474 "ASYNC_STATUS iocb received evt_code "
2475 "0x%x\n",
2476 pring->ringno,
2477 irsp->un.asyncstat.evt_code);
2478 return 1;
2479 }
2480
3163f725
JS
2481 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2482 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2483 if (irsp->ulpBdeCount > 0) {
2484 dmzbuf = lpfc_sli_get_buff(phba, pring,
2485 irsp->un.ulpWord[3]);
2486 lpfc_in_buf_free(phba, dmzbuf);
2487 }
2488
2489 if (irsp->ulpBdeCount > 1) {
2490 dmzbuf = lpfc_sli_get_buff(phba, pring,
2491 irsp->unsli3.sli3Words[3]);
2492 lpfc_in_buf_free(phba, dmzbuf);
2493 }
2494
2495 if (irsp->ulpBdeCount > 2) {
2496 dmzbuf = lpfc_sli_get_buff(phba, pring,
2497 irsp->unsli3.sli3Words[7]);
2498 lpfc_in_buf_free(phba, dmzbuf);
2499 }
2500
2501 return 1;
2502 }
2503
92d7f7b0 2504 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2505 if (irsp->ulpBdeCount != 0) {
2506 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2507 irsp->un.ulpWord[3]);
2508 if (!saveq->context2)
2509 lpfc_printf_log(phba,
2510 KERN_ERR,
2511 LOG_SLI,
2512 "0341 Ring %d Cannot find buffer for "
2513 "an unsolicited iocb. tag 0x%x\n",
2514 pring->ringno,
2515 irsp->un.ulpWord[3]);
76bb24ef
JS
2516 }
2517 if (irsp->ulpBdeCount == 2) {
2518 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2519 irsp->unsli3.sli3Words[7]);
2520 if (!saveq->context3)
2521 lpfc_printf_log(phba,
2522 KERN_ERR,
2523 LOG_SLI,
2524 "0342 Ring %d Cannot find buffer for an"
2525 " unsolicited iocb. tag 0x%x\n",
2526 pring->ringno,
2527 irsp->unsli3.sli3Words[7]);
2528 }
2529 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2530 irsp = &(iocbq->iocb);
76bb24ef
JS
2531 if (irsp->ulpBdeCount != 0) {
2532 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2533 irsp->un.ulpWord[3]);
9c2face6 2534 if (!iocbq->context2)
76bb24ef
JS
2535 lpfc_printf_log(phba,
2536 KERN_ERR,
2537 LOG_SLI,
2538 "0343 Ring %d Cannot find "
2539 "buffer for an unsolicited iocb"
2540 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2541 irsp->un.ulpWord[3]);
76bb24ef
JS
2542 }
2543 if (irsp->ulpBdeCount == 2) {
2544 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2545 irsp->unsli3.sli3Words[7]);
9c2face6 2546 if (!iocbq->context3)
76bb24ef
JS
2547 lpfc_printf_log(phba,
2548 KERN_ERR,
2549 LOG_SLI,
2550 "0344 Ring %d Cannot find "
2551 "buffer for an unsolicited "
2552 "iocb. tag 0x%x\n",
2553 pring->ringno,
2554 irsp->unsli3.sli3Words[7]);
2555 }
2556 }
92d7f7b0 2557 }
9c2face6
JS
2558 if (irsp->ulpBdeCount != 0 &&
2559 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2560 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2561 int found = 0;
2562
2563 /* search continue save q for same XRI */
2564 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2565 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2566 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2567 list_add_tail(&saveq->list, &iocbq->list);
2568 found = 1;
2569 break;
2570 }
2571 }
2572 if (!found)
2573 list_add_tail(&saveq->clist,
2574 &pring->iocb_continue_saveq);
2575 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2576 list_del_init(&iocbq->clist);
2577 saveq = iocbq;
2578 irsp = &(saveq->iocb);
2579 } else
2580 return 0;
2581 }
2582 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2583 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2584 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2585 Rctl = FC_RCTL_ELS_REQ;
2586 Type = FC_TYPE_ELS;
9c2face6
JS
2587 } else {
2588 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2589 Rctl = w5p->hcsw.Rctl;
2590 Type = w5p->hcsw.Type;
2591
2592 /* Firmware Workaround */
2593 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2594 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2595 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2596 Rctl = FC_RCTL_ELS_REQ;
2597 Type = FC_TYPE_ELS;
9c2face6
JS
2598 w5p->hcsw.Rctl = Rctl;
2599 w5p->hcsw.Type = Type;
2600 }
2601 }
92d7f7b0 2602
3772a991 2603 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2604 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2605 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2606 "Type x%x received\n",
e8b62011 2607 pring->ringno, Rctl, Type);
3772a991 2608
92d7f7b0 2609 return 1;
dea3101e 2610}
2611
e59058c4 2612/**
3621a710 2613 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2614 * @phba: Pointer to HBA context object.
2615 * @pring: Pointer to driver SLI ring object.
2616 * @prspiocb: Pointer to response iocb object.
2617 *
2618 * This function looks up the iocb_lookup table to get the command iocb
2619 * corresponding to the given response iocb using the iotag of the
2620 * response iocb. This function is called with the hbalock held.
2621 * This function returns the command iocb object if it finds the command
2622 * iocb else returns NULL.
2623 **/
dea3101e 2624static struct lpfc_iocbq *
2e0fef85
JS
2625lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2626 struct lpfc_sli_ring *pring,
2627 struct lpfc_iocbq *prspiocb)
dea3101e 2628{
dea3101e 2629 struct lpfc_iocbq *cmd_iocb = NULL;
2630 uint16_t iotag;
2631
604a3e30
JB
2632 iotag = prspiocb->iocb.ulpIoTag;
2633
2634 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2635 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 2636 list_del_init(&cmd_iocb->list);
4f2e66c6 2637 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
4f2e66c6 2638 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 2639 }
604a3e30 2640 return cmd_iocb;
dea3101e 2641 }
2642
dea3101e 2643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2644 "0317 iotag x%x is out off "
604a3e30 2645 "range: max iotag x%x wd0 x%x\n",
e8b62011 2646 iotag, phba->sli.last_iotag,
604a3e30 2647 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 2648 return NULL;
2649}
2650
3772a991
JS
2651/**
2652 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2653 * @phba: Pointer to HBA context object.
2654 * @pring: Pointer to driver SLI ring object.
2655 * @iotag: IOCB tag.
2656 *
2657 * This function looks up the iocb_lookup table to get the command iocb
2658 * corresponding to the given iotag. This function is called with the
2659 * hbalock held.
2660 * This function returns the command iocb object if it finds the command
2661 * iocb else returns NULL.
2662 **/
2663static struct lpfc_iocbq *
2664lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2665 struct lpfc_sli_ring *pring, uint16_t iotag)
2666{
2667 struct lpfc_iocbq *cmd_iocb;
2668
2669 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2670 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
2671 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2672 /* remove from txcmpl queue list */
2673 list_del_init(&cmd_iocb->list);
2674 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4f2e66c6 2675 return cmd_iocb;
2a9bf3d0 2676 }
3772a991 2677 }
3772a991
JS
2678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2679 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2680 iotag, phba->sli.last_iotag);
2681 return NULL;
2682}
2683
e59058c4 2684/**
3621a710 2685 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2686 * @phba: Pointer to HBA context object.
2687 * @pring: Pointer to driver SLI ring object.
2688 * @saveq: Pointer to the response iocb to be processed.
2689 *
2690 * This function is called by the ring event handler for non-fcp
2691 * rings when there is a new response iocb in the response ring.
2692 * The caller is not required to hold any locks. This function
2693 * gets the command iocb associated with the response iocb and
2694 * calls the completion handler for the command iocb. If there
2695 * is no completion handler, the function will free the resources
2696 * associated with command iocb. If the response iocb is for
2697 * an already aborted command iocb, the status of the completion
2698 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2699 * This function always returns 1.
2700 **/
dea3101e 2701static int
2e0fef85 2702lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 2703 struct lpfc_iocbq *saveq)
2704{
2e0fef85 2705 struct lpfc_iocbq *cmdiocbp;
dea3101e 2706 int rc = 1;
2707 unsigned long iflag;
2708
2709 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2710 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2711 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2712 spin_unlock_irqrestore(&phba->hbalock, iflag);
2713
dea3101e 2714 if (cmdiocbp) {
2715 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2716 /*
2717 * If an ELS command failed send an event to mgmt
2718 * application.
2719 */
2720 if (saveq->iocb.ulpStatus &&
2721 (pring->ringno == LPFC_ELS_RING) &&
2722 (cmdiocbp->iocb.ulpCommand ==
2723 CMD_ELS_REQUEST64_CR))
2724 lpfc_send_els_failure_event(phba,
2725 cmdiocbp, saveq);
2726
dea3101e 2727 /*
2728 * Post all ELS completions to the worker thread.
2729 * All other are passed to the completion callback.
2730 */
2731 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
2732 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2733 (cmdiocbp->iocb_flag &
2734 LPFC_DRIVER_ABORTED)) {
2735 spin_lock_irqsave(&phba->hbalock,
2736 iflag);
07951076
JS
2737 cmdiocbp->iocb_flag &=
2738 ~LPFC_DRIVER_ABORTED;
341af102
JS
2739 spin_unlock_irqrestore(&phba->hbalock,
2740 iflag);
07951076
JS
2741 saveq->iocb.ulpStatus =
2742 IOSTAT_LOCAL_REJECT;
2743 saveq->iocb.un.ulpWord[4] =
2744 IOERR_SLI_ABORTED;
0ff10d46
JS
2745
2746 /* Firmware could still be in progress
2747 * of DMAing payload, so don't free data
2748 * buffer till after a hbeat.
2749 */
341af102
JS
2750 spin_lock_irqsave(&phba->hbalock,
2751 iflag);
0ff10d46 2752 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
2753 spin_unlock_irqrestore(&phba->hbalock,
2754 iflag);
2755 }
0f65ff68
JS
2756 if (phba->sli_rev == LPFC_SLI_REV4) {
2757 if (saveq->iocb_flag &
2758 LPFC_EXCHANGE_BUSY) {
2759 /* Set cmdiocb flag for the
2760 * exchange busy so sgl (xri)
2761 * will not be released until
2762 * the abort xri is received
2763 * from hba.
2764 */
2765 spin_lock_irqsave(
2766 &phba->hbalock, iflag);
2767 cmdiocbp->iocb_flag |=
2768 LPFC_EXCHANGE_BUSY;
2769 spin_unlock_irqrestore(
2770 &phba->hbalock, iflag);
2771 }
2772 if (cmdiocbp->iocb_flag &
2773 LPFC_DRIVER_ABORTED) {
2774 /*
2775 * Clear LPFC_DRIVER_ABORTED
2776 * bit in case it was driver
2777 * initiated abort.
2778 */
2779 spin_lock_irqsave(
2780 &phba->hbalock, iflag);
2781 cmdiocbp->iocb_flag &=
2782 ~LPFC_DRIVER_ABORTED;
2783 spin_unlock_irqrestore(
2784 &phba->hbalock, iflag);
2785 cmdiocbp->iocb.ulpStatus =
2786 IOSTAT_LOCAL_REJECT;
2787 cmdiocbp->iocb.un.ulpWord[4] =
2788 IOERR_ABORT_REQUESTED;
2789 /*
2790 * For SLI4, irsiocb contains
2791 * NO_XRI in sli_xritag, it
2792 * shall not affect releasing
2793 * sgl (xri) process.
2794 */
2795 saveq->iocb.ulpStatus =
2796 IOSTAT_LOCAL_REJECT;
2797 saveq->iocb.un.ulpWord[4] =
2798 IOERR_SLI_ABORTED;
2799 spin_lock_irqsave(
2800 &phba->hbalock, iflag);
2801 saveq->iocb_flag |=
2802 LPFC_DELAY_MEM_FREE;
2803 spin_unlock_irqrestore(
2804 &phba->hbalock, iflag);
2805 }
07951076 2806 }
dea3101e 2807 }
2e0fef85 2808 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2809 } else
2810 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 2811 } else {
2812 /*
2813 * Unknown initiating command based on the response iotag.
2814 * This could be the case on the ELS ring because of
2815 * lpfc_els_abort().
2816 */
2817 if (pring->ringno != LPFC_ELS_RING) {
2818 /*
2819 * Ring <ringno> handler: unexpected completion IoTag
2820 * <IoTag>
2821 */
a257bf90 2822 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2823 "0322 Ring %d handler: "
2824 "unexpected completion IoTag x%x "
2825 "Data: x%x x%x x%x x%x\n",
2826 pring->ringno,
2827 saveq->iocb.ulpIoTag,
2828 saveq->iocb.ulpStatus,
2829 saveq->iocb.un.ulpWord[4],
2830 saveq->iocb.ulpCommand,
2831 saveq->iocb.ulpContext);
dea3101e 2832 }
2833 }
68876920 2834
dea3101e 2835 return rc;
2836}
2837
e59058c4 2838/**
3621a710 2839 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2840 * @phba: Pointer to HBA context object.
2841 * @pring: Pointer to driver SLI ring object.
2842 *
2843 * This function is called from the iocb ring event handlers when
2844 * put pointer is ahead of the get pointer for a ring. This function signal
2845 * an error attention condition to the worker thread and the worker
2846 * thread will transition the HBA to offline state.
2847 **/
2e0fef85
JS
2848static void
2849lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2850{
34b02dcd 2851 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2852 /*
025dfdaf 2853 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2854 * rsp ring <portRspMax>
2855 */
2856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2857 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2858 "is bigger than rsp ring %d\n",
e8b62011 2859 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 2860 pring->sli.sli3.numRiocb);
875fbdfe 2861
2e0fef85 2862 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2863
2864 /*
2865 * All error attention handlers are posted to
2866 * worker thread
2867 */
2868 phba->work_ha |= HA_ERATT;
2869 phba->work_hs = HS_FFER3;
92d7f7b0 2870
5e9d9b82 2871 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2872
2873 return;
2874}
2875
9399627f 2876/**
3621a710 2877 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2878 * @ptr: Pointer to address of HBA context object.
2879 *
2880 * This function is invoked by the Error Attention polling timer when the
2881 * timer times out. It will check the SLI Error Attention register for
2882 * possible attention events. If so, it will post an Error Attention event
2883 * and wake up worker thread to process it. Otherwise, it will set up the
2884 * Error Attention polling timer for the next poll.
2885 **/
2886void lpfc_poll_eratt(unsigned long ptr)
2887{
2888 struct lpfc_hba *phba;
aa6fbb75
JS
2889 uint32_t eratt = 0, rem;
2890 uint64_t sli_intr, cnt;
9399627f
JS
2891
2892 phba = (struct lpfc_hba *)ptr;
2893
aa6fbb75
JS
2894 /* Here we will also keep track of interrupts per sec of the hba */
2895 sli_intr = phba->sli.slistat.sli_intr;
2896
2897 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2898 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2899 sli_intr);
2900 else
2901 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2902
2903 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2904 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2905 phba->sli.slistat.sli_ips = cnt;
2906
2907 phba->sli.slistat.sli_prev_intr = sli_intr;
2908
9399627f
JS
2909 /* Check chip HA register for error event */
2910 eratt = lpfc_sli_check_eratt(phba);
2911
2912 if (eratt)
2913 /* Tell the worker thread there is work to do */
2914 lpfc_worker_wake_up(phba);
2915 else
2916 /* Restart the timer for next eratt poll */
256ec0d0
JS
2917 mod_timer(&phba->eratt_poll,
2918 jiffies +
2919 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
9399627f
JS
2920 return;
2921}
2922
875fbdfe 2923
e59058c4 2924/**
3621a710 2925 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
2926 * @phba: Pointer to HBA context object.
2927 * @pring: Pointer to driver SLI ring object.
2928 * @mask: Host attention register mask for this ring.
2929 *
2930 * This function is called from the interrupt context when there is a ring
2931 * event for the fcp ring. The caller does not hold any lock.
2932 * The function processes each response iocb in the response ring until it
25985edc 2933 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
2934 * LE bit set. The function will call the completion handler of the command iocb
2935 * if the response iocb indicates a completion for a command iocb or it is
2936 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2937 * function if this is an unsolicited iocb.
dea3101e 2938 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
2939 * to check it explicitly.
2940 */
2941int
2e0fef85
JS
2942lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2943 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2944{
34b02dcd 2945 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 2946 IOCB_t *irsp = NULL;
87f6eaff 2947 IOCB_t *entry = NULL;
dea3101e 2948 struct lpfc_iocbq *cmdiocbq = NULL;
2949 struct lpfc_iocbq rspiocbq;
dea3101e 2950 uint32_t status;
2951 uint32_t portRspPut, portRspMax;
2952 int rc = 1;
2953 lpfc_iocb_type type;
2954 unsigned long iflag;
2955 uint32_t rsp_cmpl = 0;
dea3101e 2956
2e0fef85 2957 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 2958 pring->stats.iocb_event++;
2959
dea3101e 2960 /*
2961 * The next available response entry should never exceed the maximum
2962 * entries. If it does, treat it as an adapter hardware error.
2963 */
7e56aa25 2964 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 2965 portRspPut = le32_to_cpu(pgp->rspPutInx);
2966 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 2967 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 2968 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 2969 return 1;
2970 }
45ed1190
JS
2971 if (phba->fcp_ring_in_use) {
2972 spin_unlock_irqrestore(&phba->hbalock, iflag);
2973 return 1;
2974 } else
2975 phba->fcp_ring_in_use = 1;
dea3101e 2976
2977 rmb();
7e56aa25 2978 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
2979 /*
2980 * Fetch an entry off the ring and copy it into a local data
2981 * structure. The copy involves a byte-swap since the
2982 * network byte order and pci byte orders are different.
2983 */
ed957684 2984 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 2985 phba->last_completion_time = jiffies;
875fbdfe 2986
7e56aa25
JS
2987 if (++pring->sli.sli3.rspidx >= portRspMax)
2988 pring->sli.sli3.rspidx = 0;
875fbdfe 2989
87f6eaff
JSEC
2990 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2991 (uint32_t *) &rspiocbq.iocb,
ed957684 2992 phba->iocb_rsp_size);
a4bc3379 2993 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
2994 irsp = &rspiocbq.iocb;
2995
dea3101e 2996 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2997 pring->stats.iocb_rsp++;
2998 rsp_cmpl++;
2999
3000 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3001 /*
3002 * If resource errors reported from HBA, reduce
3003 * queuedepths of the SCSI device.
3004 */
3005 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3006 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3007 IOERR_NO_RESOURCES)) {
92d7f7b0 3008 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3009 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3010 spin_lock_irqsave(&phba->hbalock, iflag);
3011 }
3012
dea3101e 3013 /* Rsp ring <ringno> error: IOCB */
3014 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3015 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3016 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3017 pring->ringno,
92d7f7b0
JS
3018 irsp->un.ulpWord[0],
3019 irsp->un.ulpWord[1],
3020 irsp->un.ulpWord[2],
3021 irsp->un.ulpWord[3],
3022 irsp->un.ulpWord[4],
3023 irsp->un.ulpWord[5],
d7c255b2
JS
3024 *(uint32_t *)&irsp->un1,
3025 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3026 }
3027
3028 switch (type) {
3029 case LPFC_ABORT_IOCB:
3030 case LPFC_SOL_IOCB:
3031 /*
3032 * Idle exchange closed via ABTS from port. No iocb
3033 * resources need to be recovered.
3034 */
3035 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3036 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3037 "0333 IOCB cmd 0x%x"
dca9479b 3038 " processed. Skipping"
92d7f7b0 3039 " completion\n",
dca9479b 3040 irsp->ulpCommand);
dea3101e 3041 break;
3042 }
3043
604a3e30
JB
3044 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3045 &rspiocbq);
0f65ff68
JS
3046 if (unlikely(!cmdiocbq))
3047 break;
3048 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3049 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3050 if (cmdiocbq->iocb_cmpl) {
3051 spin_unlock_irqrestore(&phba->hbalock, iflag);
3052 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3053 &rspiocbq);
3054 spin_lock_irqsave(&phba->hbalock, iflag);
3055 }
dea3101e 3056 break;
a4bc3379 3057 case LPFC_UNSOL_IOCB:
2e0fef85 3058 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3059 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3060 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3061 break;
dea3101e 3062 default:
3063 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3064 char adaptermsg[LPFC_MAX_ADPTMSG];
3065 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3066 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3067 MAX_MSG_DATA);
898eb71c
JP
3068 dev_warn(&((phba->pcidev)->dev),
3069 "lpfc%d: %s\n",
dea3101e 3070 phba->brd_no, adaptermsg);
3071 } else {
3072 /* Unknown IOCB command */
3073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3074 "0334 Unknown IOCB command "
92d7f7b0 3075 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3076 type, irsp->ulpCommand,
92d7f7b0
JS
3077 irsp->ulpStatus,
3078 irsp->ulpIoTag,
3079 irsp->ulpContext);
dea3101e 3080 }
3081 break;
3082 }
3083
3084 /*
3085 * The response IOCB has been processed. Update the ring
3086 * pointer in SLIM. If the port response put pointer has not
3087 * been updated, sync the pgp->rspPutInx and fetch the new port
3088 * response put pointer.
3089 */
7e56aa25
JS
3090 writel(pring->sli.sli3.rspidx,
3091 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3092
7e56aa25 3093 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3094 portRspPut = le32_to_cpu(pgp->rspPutInx);
3095 }
3096
3097 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3098 pring->stats.iocb_rsp_full++;
3099 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3100 writel(status, phba->CAregaddr);
3101 readl(phba->CAregaddr);
3102 }
3103 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3104 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3105 pring->stats.iocb_cmd_empty++;
3106
3107 /* Force update of the local copy of cmdGetInx */
7e56aa25 3108 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3109 lpfc_sli_resume_iocb(phba, pring);
3110
3111 if ((pring->lpfc_sli_cmd_available))
3112 (pring->lpfc_sli_cmd_available) (phba, pring);
3113
3114 }
3115
45ed1190 3116 phba->fcp_ring_in_use = 0;
2e0fef85 3117 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3118 return rc;
3119}
3120
e59058c4 3121/**
3772a991
JS
3122 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3123 * @phba: Pointer to HBA context object.
3124 * @pring: Pointer to driver SLI ring object.
3125 * @rspiocbp: Pointer to driver response IOCB object.
3126 *
3127 * This function is called from the worker thread when there is a slow-path
3128 * response IOCB to process. This function chains all the response iocbs until
3129 * seeing the iocb with the LE bit set. The function will call
3130 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3131 * completion of a command iocb. The function will call the
3132 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3133 * The function frees the resources or calls the completion handler if this
3134 * iocb is an abort completion. The function returns NULL when the response
3135 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3136 * this function shall chain the iocb on to the iocb_continueq and return the
3137 * response iocb passed in.
3138 **/
3139static struct lpfc_iocbq *
3140lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3141 struct lpfc_iocbq *rspiocbp)
3142{
3143 struct lpfc_iocbq *saveq;
3144 struct lpfc_iocbq *cmdiocbp;
3145 struct lpfc_iocbq *next_iocb;
3146 IOCB_t *irsp = NULL;
3147 uint32_t free_saveq;
3148 uint8_t iocb_cmd_type;
3149 lpfc_iocb_type type;
3150 unsigned long iflag;
3151 int rc;
3152
3153 spin_lock_irqsave(&phba->hbalock, iflag);
3154 /* First add the response iocb to the countinueq list */
3155 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3156 pring->iocb_continueq_cnt++;
3157
70f23fd6 3158 /* Now, determine whether the list is completed for processing */
3772a991
JS
3159 irsp = &rspiocbp->iocb;
3160 if (irsp->ulpLe) {
3161 /*
3162 * By default, the driver expects to free all resources
3163 * associated with this iocb completion.
3164 */
3165 free_saveq = 1;
3166 saveq = list_get_first(&pring->iocb_continueq,
3167 struct lpfc_iocbq, list);
3168 irsp = &(saveq->iocb);
3169 list_del_init(&pring->iocb_continueq);
3170 pring->iocb_continueq_cnt = 0;
3171
3172 pring->stats.iocb_rsp++;
3173
3174 /*
3175 * If resource errors reported from HBA, reduce
3176 * queuedepths of the SCSI device.
3177 */
3178 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3179 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3180 IOERR_NO_RESOURCES)) {
3772a991
JS
3181 spin_unlock_irqrestore(&phba->hbalock, iflag);
3182 phba->lpfc_rampdown_queue_depth(phba);
3183 spin_lock_irqsave(&phba->hbalock, iflag);
3184 }
3185
3186 if (irsp->ulpStatus) {
3187 /* Rsp ring <ringno> error: IOCB */
3188 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3189 "0328 Rsp Ring %d error: "
3190 "IOCB Data: "
3191 "x%x x%x x%x x%x "
3192 "x%x x%x x%x x%x "
3193 "x%x x%x x%x x%x "
3194 "x%x x%x x%x x%x\n",
3195 pring->ringno,
3196 irsp->un.ulpWord[0],
3197 irsp->un.ulpWord[1],
3198 irsp->un.ulpWord[2],
3199 irsp->un.ulpWord[3],
3200 irsp->un.ulpWord[4],
3201 irsp->un.ulpWord[5],
3202 *(((uint32_t *) irsp) + 6),
3203 *(((uint32_t *) irsp) + 7),
3204 *(((uint32_t *) irsp) + 8),
3205 *(((uint32_t *) irsp) + 9),
3206 *(((uint32_t *) irsp) + 10),
3207 *(((uint32_t *) irsp) + 11),
3208 *(((uint32_t *) irsp) + 12),
3209 *(((uint32_t *) irsp) + 13),
3210 *(((uint32_t *) irsp) + 14),
3211 *(((uint32_t *) irsp) + 15));
3212 }
3213
3214 /*
3215 * Fetch the IOCB command type and call the correct completion
3216 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3217 * get freed back to the lpfc_iocb_list by the discovery
3218 * kernel thread.
3219 */
3220 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3221 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3222 switch (type) {
3223 case LPFC_SOL_IOCB:
3224 spin_unlock_irqrestore(&phba->hbalock, iflag);
3225 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3226 spin_lock_irqsave(&phba->hbalock, iflag);
3227 break;
3228
3229 case LPFC_UNSOL_IOCB:
3230 spin_unlock_irqrestore(&phba->hbalock, iflag);
3231 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3232 spin_lock_irqsave(&phba->hbalock, iflag);
3233 if (!rc)
3234 free_saveq = 0;
3235 break;
3236
3237 case LPFC_ABORT_IOCB:
3238 cmdiocbp = NULL;
3239 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3240 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3241 saveq);
3242 if (cmdiocbp) {
3243 /* Call the specified completion routine */
3244 if (cmdiocbp->iocb_cmpl) {
3245 spin_unlock_irqrestore(&phba->hbalock,
3246 iflag);
3247 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3248 saveq);
3249 spin_lock_irqsave(&phba->hbalock,
3250 iflag);
3251 } else
3252 __lpfc_sli_release_iocbq(phba,
3253 cmdiocbp);
3254 }
3255 break;
3256
3257 case LPFC_UNKNOWN_IOCB:
3258 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3259 char adaptermsg[LPFC_MAX_ADPTMSG];
3260 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3261 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3262 MAX_MSG_DATA);
3263 dev_warn(&((phba->pcidev)->dev),
3264 "lpfc%d: %s\n",
3265 phba->brd_no, adaptermsg);
3266 } else {
3267 /* Unknown IOCB command */
3268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3269 "0335 Unknown IOCB "
3270 "command Data: x%x "
3271 "x%x x%x x%x\n",
3272 irsp->ulpCommand,
3273 irsp->ulpStatus,
3274 irsp->ulpIoTag,
3275 irsp->ulpContext);
3276 }
3277 break;
3278 }
3279
3280 if (free_saveq) {
3281 list_for_each_entry_safe(rspiocbp, next_iocb,
3282 &saveq->list, list) {
3283 list_del(&rspiocbp->list);
3284 __lpfc_sli_release_iocbq(phba, rspiocbp);
3285 }
3286 __lpfc_sli_release_iocbq(phba, saveq);
3287 }
3288 rspiocbp = NULL;
3289 }
3290 spin_unlock_irqrestore(&phba->hbalock, iflag);
3291 return rspiocbp;
3292}
3293
3294/**
3295 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3296 * @phba: Pointer to HBA context object.
3297 * @pring: Pointer to driver SLI ring object.
3298 * @mask: Host attention register mask for this ring.
3299 *
3772a991
JS
3300 * This routine wraps the actual slow_ring event process routine from the
3301 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3302 **/
3772a991 3303void
2e0fef85
JS
3304lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3305 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3306{
3307 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3308}
3309
3310/**
3311 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3312 * @phba: Pointer to HBA context object.
3313 * @pring: Pointer to driver SLI ring object.
3314 * @mask: Host attention register mask for this ring.
3315 *
3316 * This function is called from the worker thread when there is a ring event
3317 * for non-fcp rings. The caller does not hold any lock. The function will
3318 * remove each response iocb in the response ring and calls the handle
3319 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3320 **/
3321static void
3322lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3323 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3324{
34b02dcd 3325 struct lpfc_pgp *pgp;
dea3101e 3326 IOCB_t *entry;
3327 IOCB_t *irsp = NULL;
3328 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3329 uint32_t portRspPut, portRspMax;
dea3101e 3330 unsigned long iflag;
3772a991 3331 uint32_t status;
dea3101e 3332
34b02dcd 3333 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3334 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3335 pring->stats.iocb_event++;
3336
dea3101e 3337 /*
3338 * The next available response entry should never exceed the maximum
3339 * entries. If it does, treat it as an adapter hardware error.
3340 */
7e56aa25 3341 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3342 portRspPut = le32_to_cpu(pgp->rspPutInx);
3343 if (portRspPut >= portRspMax) {
3344 /*
025dfdaf 3345 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3346 * rsp ring <portRspMax>
3347 */
ed957684 3348 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3349 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3350 "is bigger than rsp ring %d\n",
e8b62011 3351 pring->ringno, portRspPut, portRspMax);
dea3101e 3352
2e0fef85
JS
3353 phba->link_state = LPFC_HBA_ERROR;
3354 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3355
3356 phba->work_hs = HS_FFER3;
3357 lpfc_handle_eratt(phba);
3358
3772a991 3359 return;
dea3101e 3360 }
3361
3362 rmb();
7e56aa25 3363 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3364 /*
3365 * Build a completion list and call the appropriate handler.
3366 * The process is to get the next available response iocb, get
3367 * a free iocb from the list, copy the response data into the
3368 * free iocb, insert to the continuation list, and update the
3369 * next response index to slim. This process makes response
3370 * iocb's in the ring available to DMA as fast as possible but
3371 * pays a penalty for a copy operation. Since the iocb is
3372 * only 32 bytes, this penalty is considered small relative to
3373 * the PCI reads for register values and a slim write. When
3374 * the ulpLe field is set, the entire Command has been
3375 * received.
3376 */
ed957684
JS
3377 entry = lpfc_resp_iocb(phba, pring);
3378
858c9f6c 3379 phba->last_completion_time = jiffies;
2e0fef85 3380 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3381 if (rspiocbp == NULL) {
3382 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3383 "completion.\n", __func__);
dea3101e 3384 break;
3385 }
3386
ed957684
JS
3387 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3388 phba->iocb_rsp_size);
dea3101e 3389 irsp = &rspiocbp->iocb;
3390
7e56aa25
JS
3391 if (++pring->sli.sli3.rspidx >= portRspMax)
3392 pring->sli.sli3.rspidx = 0;
dea3101e 3393
a58cbd52
JS
3394 if (pring->ringno == LPFC_ELS_RING) {
3395 lpfc_debugfs_slow_ring_trc(phba,
3396 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3397 *(((uint32_t *) irsp) + 4),
3398 *(((uint32_t *) irsp) + 6),
3399 *(((uint32_t *) irsp) + 7));
3400 }
3401
7e56aa25
JS
3402 writel(pring->sli.sli3.rspidx,
3403 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3404
3772a991
JS
3405 spin_unlock_irqrestore(&phba->hbalock, iflag);
3406 /* Handle the response IOCB */
3407 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3408 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3409
3410 /*
3411 * If the port response put pointer has not been updated, sync
3412 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3413 * response put pointer.
3414 */
7e56aa25 3415 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3416 portRspPut = le32_to_cpu(pgp->rspPutInx);
3417 }
7e56aa25 3418 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3419
92d7f7b0 3420 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3421 /* At least one response entry has been freed */
3422 pring->stats.iocb_rsp_full++;
3423 /* SET RxRE_RSP in Chip Att register */
3424 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3425 writel(status, phba->CAregaddr);
3426 readl(phba->CAregaddr); /* flush */
3427 }
3428 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3429 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3430 pring->stats.iocb_cmd_empty++;
3431
3432 /* Force update of the local copy of cmdGetInx */
7e56aa25 3433 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3434 lpfc_sli_resume_iocb(phba, pring);
3435
3436 if ((pring->lpfc_sli_cmd_available))
3437 (pring->lpfc_sli_cmd_available) (phba, pring);
3438
3439 }
3440
2e0fef85 3441 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3442 return;
dea3101e 3443}
3444
4f774513
JS
3445/**
3446 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3447 * @phba: Pointer to HBA context object.
3448 * @pring: Pointer to driver SLI ring object.
3449 * @mask: Host attention register mask for this ring.
3450 *
3451 * This function is called from the worker thread when there is a pending
3452 * ELS response iocb on the driver internal slow-path response iocb worker
3453 * queue. The caller does not hold any lock. The function will remove each
3454 * response iocb from the response worker queue and calls the handle
3455 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3456 **/
3457static void
3458lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3459 struct lpfc_sli_ring *pring, uint32_t mask)
3460{
3461 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3462 struct hbq_dmabuf *dmabuf;
3463 struct lpfc_cq_event *cq_event;
4f774513
JS
3464 unsigned long iflag;
3465
45ed1190
JS
3466 spin_lock_irqsave(&phba->hbalock, iflag);
3467 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3468 spin_unlock_irqrestore(&phba->hbalock, iflag);
3469 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3470 /* Get the response iocb from the head of work queue */
3471 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3472 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3473 cq_event, struct lpfc_cq_event, list);
4f774513 3474 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3475
3476 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3477 case CQE_CODE_COMPL_WQE:
3478 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3479 cq_event);
45ed1190
JS
3480 /* Translate ELS WCQE to response IOCBQ */
3481 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3482 irspiocbq);
3483 if (irspiocbq)
3484 lpfc_sli_sp_handle_rspiocb(phba, pring,
3485 irspiocbq);
4d9ab994
JS
3486 break;
3487 case CQE_CODE_RECEIVE:
7851fe2c 3488 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3489 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3490 cq_event);
3491 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3492 break;
3493 default:
3494 break;
3495 }
4f774513
JS
3496 }
3497}
3498
e59058c4 3499/**
3621a710 3500 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3501 * @phba: Pointer to HBA context object.
3502 * @pring: Pointer to driver SLI ring object.
3503 *
3504 * This function aborts all iocbs in the given ring and frees all the iocb
3505 * objects in txq. This function issues an abort iocb for all the iocb commands
3506 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3507 * the return of this function. The caller is not required to hold any locks.
3508 **/
2e0fef85 3509void
dea3101e 3510lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3511{
2534ba75 3512 LIST_HEAD(completions);
dea3101e 3513 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3514
92d7f7b0
JS
3515 if (pring->ringno == LPFC_ELS_RING) {
3516 lpfc_fabric_abort_hba(phba);
3517 }
3518
dea3101e 3519 /* Error everything on txq and txcmplq
3520 * First do the txq.
3521 */
2e0fef85 3522 spin_lock_irq(&phba->hbalock);
2534ba75 3523 list_splice_init(&pring->txq, &completions);
dea3101e 3524
3525 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
3526 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3527 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 3528
2e0fef85 3529 spin_unlock_irq(&phba->hbalock);
dea3101e 3530
a257bf90
JS
3531 /* Cancel all the IOCBs from the completions list */
3532 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3533 IOERR_SLI_ABORTED);
dea3101e 3534}
3535
a8e497d5 3536/**
3621a710 3537 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3538 * @phba: Pointer to HBA context object.
3539 *
3540 * This function flushes all iocbs in the fcp ring and frees all the iocb
3541 * objects in txq and txcmplq. This function will not issue abort iocbs
3542 * for all the iocb commands in txcmplq, they will just be returned with
3543 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3544 * slot has been permanently disabled.
3545 **/
3546void
3547lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3548{
3549 LIST_HEAD(txq);
3550 LIST_HEAD(txcmplq);
a8e497d5
JS
3551 struct lpfc_sli *psli = &phba->sli;
3552 struct lpfc_sli_ring *pring;
3553
3554 /* Currently, only one fcp ring */
3555 pring = &psli->ring[psli->fcp_ring];
3556
3557 spin_lock_irq(&phba->hbalock);
3558 /* Retrieve everything on txq */
3559 list_splice_init(&pring->txq, &txq);
a8e497d5
JS
3560
3561 /* Retrieve everything on the txcmplq */
3562 list_splice_init(&pring->txcmplq, &txcmplq);
4f2e66c6
JS
3563
3564 /* Indicate the I/O queues are flushed */
3565 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
3566 spin_unlock_irq(&phba->hbalock);
3567
3568 /* Flush the txq */
a257bf90
JS
3569 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3570 IOERR_SLI_DOWN);
a8e497d5
JS
3571
3572 /* Flush the txcmpq */
a257bf90
JS
3573 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3574 IOERR_SLI_DOWN);
a8e497d5
JS
3575}
3576
e59058c4 3577/**
3772a991 3578 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
3579 * @phba: Pointer to HBA context object.
3580 * @mask: Bit mask to be checked.
3581 *
3582 * This function reads the host status register and compares
3583 * with the provided bit mask to check if HBA completed
3584 * the restart. This function will wait in a loop for the
3585 * HBA to complete restart. If the HBA does not restart within
3586 * 15 iterations, the function will reset the HBA again. The
3587 * function returns 1 when HBA fail to restart otherwise returns
3588 * zero.
3589 **/
3772a991
JS
3590static int
3591lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3592{
41415862
JW
3593 uint32_t status;
3594 int i = 0;
3595 int retval = 0;
dea3101e 3596
41415862 3597 /* Read the HBA Host Status Register */
9940b97b
JS
3598 if (lpfc_readl(phba->HSregaddr, &status))
3599 return 1;
dea3101e 3600
41415862
JW
3601 /*
3602 * Check status register every 100ms for 5 retries, then every
3603 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3604 * every 2.5 sec for 4.
3605 * Break our of the loop if errors occurred during init.
3606 */
3607 while (((status & mask) != mask) &&
3608 !(status & HS_FFERM) &&
3609 i++ < 20) {
dea3101e 3610
41415862
JW
3611 if (i <= 5)
3612 msleep(10);
3613 else if (i <= 10)
3614 msleep(500);
3615 else
3616 msleep(2500);
dea3101e 3617
41415862 3618 if (i == 15) {
2e0fef85 3619 /* Do post */
92d7f7b0 3620 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3621 lpfc_sli_brdrestart(phba);
3622 }
3623 /* Read the HBA Host Status Register */
9940b97b
JS
3624 if (lpfc_readl(phba->HSregaddr, &status)) {
3625 retval = 1;
3626 break;
3627 }
41415862 3628 }
dea3101e 3629
41415862
JW
3630 /* Check to see if any errors occurred during init */
3631 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
3632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3633 "2751 Adapter failed to restart, "
3634 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3635 status,
3636 readl(phba->MBslimaddr + 0xa8),
3637 readl(phba->MBslimaddr + 0xac));
2e0fef85 3638 phba->link_state = LPFC_HBA_ERROR;
41415862 3639 retval = 1;
dea3101e 3640 }
dea3101e 3641
41415862
JW
3642 return retval;
3643}
dea3101e 3644
da0436e9
JS
3645/**
3646 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3647 * @phba: Pointer to HBA context object.
3648 * @mask: Bit mask to be checked.
3649 *
3650 * This function checks the host status register to check if HBA is
3651 * ready. This function will wait in a loop for the HBA to be ready
3652 * If the HBA is not ready , the function will will reset the HBA PCI
3653 * function again. The function returns 1 when HBA fail to be ready
3654 * otherwise returns zero.
3655 **/
3656static int
3657lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3658{
3659 uint32_t status;
3660 int retval = 0;
3661
3662 /* Read the HBA Host Status Register */
3663 status = lpfc_sli4_post_status_check(phba);
3664
3665 if (status) {
3666 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3667 lpfc_sli_brdrestart(phba);
3668 status = lpfc_sli4_post_status_check(phba);
3669 }
3670
3671 /* Check to see if any errors occurred during init */
3672 if (status) {
3673 phba->link_state = LPFC_HBA_ERROR;
3674 retval = 1;
3675 } else
3676 phba->sli4_hba.intr_enable = 0;
3677
3678 return retval;
3679}
3680
3681/**
3682 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3683 * @phba: Pointer to HBA context object.
3684 * @mask: Bit mask to be checked.
3685 *
3686 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3687 * from the API jump table function pointer from the lpfc_hba struct.
3688 **/
3689int
3690lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3691{
3692 return phba->lpfc_sli_brdready(phba, mask);
3693}
3694
9290831f
JS
3695#define BARRIER_TEST_PATTERN (0xdeadbeef)
3696
e59058c4 3697/**
3621a710 3698 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3699 * @phba: Pointer to HBA context object.
3700 *
1b51197d
JS
3701 * This function is called before resetting an HBA. This function is called
3702 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 3703 **/
2e0fef85 3704void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3705{
65a29c16
JS
3706 uint32_t __iomem *resp_buf;
3707 uint32_t __iomem *mbox_buf;
9290831f 3708 volatile uint32_t mbox;
9940b97b 3709 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
3710 int i;
3711 uint8_t hdrtype;
3712
3713 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3714 if (hdrtype != 0x80 ||
3715 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3716 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3717 return;
3718
3719 /*
3720 * Tell the other part of the chip to suspend temporarily all
3721 * its DMA activity.
3722 */
65a29c16 3723 resp_buf = phba->MBslimaddr;
9290831f
JS
3724
3725 /* Disable the error attention */
9940b97b
JS
3726 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3727 return;
9290831f
JS
3728 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3729 readl(phba->HCregaddr); /* flush */
2e0fef85 3730 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 3731
9940b97b
JS
3732 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3733 return;
3734 if (ha_copy & HA_ERATT) {
9290831f
JS
3735 /* Clear Chip error bit */
3736 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3737 phba->pport->stopped = 1;
9290831f
JS
3738 }
3739
3740 mbox = 0;
3741 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3742 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3743
3744 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 3745 mbox_buf = phba->MBslimaddr;
9290831f
JS
3746 writel(mbox, mbox_buf);
3747
9940b97b
JS
3748 for (i = 0; i < 50; i++) {
3749 if (lpfc_readl((resp_buf + 1), &resp_data))
3750 return;
3751 if (resp_data != ~(BARRIER_TEST_PATTERN))
3752 mdelay(1);
3753 else
3754 break;
3755 }
3756 resp_data = 0;
3757 if (lpfc_readl((resp_buf + 1), &resp_data))
3758 return;
3759 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 3760 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 3761 phba->pport->stopped)
9290831f
JS
3762 goto restore_hc;
3763 else
3764 goto clear_errat;
3765 }
3766
3767 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
3768 resp_data = 0;
3769 for (i = 0; i < 500; i++) {
3770 if (lpfc_readl(resp_buf, &resp_data))
3771 return;
3772 if (resp_data != mbox)
3773 mdelay(1);
3774 else
3775 break;
3776 }
9290831f
JS
3777
3778clear_errat:
3779
9940b97b
JS
3780 while (++i < 500) {
3781 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3782 return;
3783 if (!(ha_copy & HA_ERATT))
3784 mdelay(1);
3785 else
3786 break;
3787 }
9290831f
JS
3788
3789 if (readl(phba->HAregaddr) & HA_ERATT) {
3790 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3791 phba->pport->stopped = 1;
9290831f
JS
3792 }
3793
3794restore_hc:
2e0fef85 3795 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
3796 writel(hc_copy, phba->HCregaddr);
3797 readl(phba->HCregaddr); /* flush */
3798}
3799
e59058c4 3800/**
3621a710 3801 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
3802 * @phba: Pointer to HBA context object.
3803 *
3804 * This function issues a kill_board mailbox command and waits for
3805 * the error attention interrupt. This function is called for stopping
3806 * the firmware processing. The caller is not required to hold any
3807 * locks. This function calls lpfc_hba_down_post function to free
3808 * any pending commands after the kill. The function will return 1 when it
3809 * fails to kill the board else will return 0.
3810 **/
41415862 3811int
2e0fef85 3812lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
3813{
3814 struct lpfc_sli *psli;
3815 LPFC_MBOXQ_t *pmb;
3816 uint32_t status;
3817 uint32_t ha_copy;
3818 int retval;
3819 int i = 0;
dea3101e 3820
41415862 3821 psli = &phba->sli;
dea3101e 3822
41415862 3823 /* Kill HBA */
ed957684 3824 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3825 "0329 Kill HBA Data: x%x x%x\n",
3826 phba->pport->port_state, psli->sli_flag);
41415862 3827
98c9ea5c
JS
3828 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3829 if (!pmb)
41415862 3830 return 1;
41415862
JW
3831
3832 /* Disable the error attention */
2e0fef85 3833 spin_lock_irq(&phba->hbalock);
9940b97b
JS
3834 if (lpfc_readl(phba->HCregaddr, &status)) {
3835 spin_unlock_irq(&phba->hbalock);
3836 mempool_free(pmb, phba->mbox_mem_pool);
3837 return 1;
3838 }
41415862
JW
3839 status &= ~HC_ERINT_ENA;
3840 writel(status, phba->HCregaddr);
3841 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
3842 phba->link_flag |= LS_IGNORE_ERATT;
3843 spin_unlock_irq(&phba->hbalock);
41415862
JW
3844
3845 lpfc_kill_board(phba, pmb);
3846 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3847 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3848
3849 if (retval != MBX_SUCCESS) {
3850 if (retval != MBX_BUSY)
3851 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
3852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3853 "2752 KILL_BOARD command failed retval %d\n",
3854 retval);
2e0fef85
JS
3855 spin_lock_irq(&phba->hbalock);
3856 phba->link_flag &= ~LS_IGNORE_ERATT;
3857 spin_unlock_irq(&phba->hbalock);
41415862
JW
3858 return 1;
3859 }
3860
f4b4c68f
JS
3861 spin_lock_irq(&phba->hbalock);
3862 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3863 spin_unlock_irq(&phba->hbalock);
9290831f 3864
41415862
JW
3865 mempool_free(pmb, phba->mbox_mem_pool);
3866
3867 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3868 * attention every 100ms for 3 seconds. If we don't get ERATT after
3869 * 3 seconds we still set HBA_ERROR state because the status of the
3870 * board is now undefined.
3871 */
9940b97b
JS
3872 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3873 return 1;
41415862
JW
3874 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3875 mdelay(100);
9940b97b
JS
3876 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3877 return 1;
41415862
JW
3878 }
3879
3880 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
3881 if (ha_copy & HA_ERATT) {
3882 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3883 phba->pport->stopped = 1;
9290831f 3884 }
2e0fef85 3885 spin_lock_irq(&phba->hbalock);
41415862 3886 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 3887 psli->mbox_active = NULL;
2e0fef85
JS
3888 phba->link_flag &= ~LS_IGNORE_ERATT;
3889 spin_unlock_irq(&phba->hbalock);
41415862 3890
41415862 3891 lpfc_hba_down_post(phba);
2e0fef85 3892 phba->link_state = LPFC_HBA_ERROR;
41415862 3893
2e0fef85 3894 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 3895}
3896
e59058c4 3897/**
3772a991 3898 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
3899 * @phba: Pointer to HBA context object.
3900 *
3901 * This function resets the HBA by writing HC_INITFF to the control
3902 * register. After the HBA resets, this function resets all the iocb ring
3903 * indices. This function disables PCI layer parity checking during
3904 * the reset.
3905 * This function returns 0 always.
3906 * The caller is not required to hold any locks.
3907 **/
41415862 3908int
2e0fef85 3909lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 3910{
41415862 3911 struct lpfc_sli *psli;
dea3101e 3912 struct lpfc_sli_ring *pring;
41415862 3913 uint16_t cfg_value;
dea3101e 3914 int i;
dea3101e 3915
41415862 3916 psli = &phba->sli;
dea3101e 3917
41415862
JW
3918 /* Reset HBA */
3919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3920 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 3921 phba->pport->port_state, psli->sli_flag);
dea3101e 3922
3923 /* perform board reset */
3924 phba->fc_eventTag = 0;
4d9ab994 3925 phba->link_events = 0;
2e0fef85
JS
3926 phba->pport->fc_myDID = 0;
3927 phba->pport->fc_prevDID = 0;
dea3101e 3928
41415862
JW
3929 /* Turn off parity checking and serr during the physical reset */
3930 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3931 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3932 (cfg_value &
3933 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3934
3772a991
JS
3935 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3936
41415862
JW
3937 /* Now toggle INITFF bit in the Host Control Register */
3938 writel(HC_INITFF, phba->HCregaddr);
3939 mdelay(1);
3940 readl(phba->HCregaddr); /* flush */
3941 writel(0, phba->HCregaddr);
3942 readl(phba->HCregaddr); /* flush */
3943
3944 /* Restore PCI cmd register */
3945 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 3946
3947 /* Initialize relevant SLI info */
41415862
JW
3948 for (i = 0; i < psli->num_rings; i++) {
3949 pring = &psli->ring[i];
dea3101e 3950 pring->flag = 0;
7e56aa25
JS
3951 pring->sli.sli3.rspidx = 0;
3952 pring->sli.sli3.next_cmdidx = 0;
3953 pring->sli.sli3.local_getidx = 0;
3954 pring->sli.sli3.cmdidx = 0;
dea3101e 3955 pring->missbufcnt = 0;
3956 }
dea3101e 3957
2e0fef85 3958 phba->link_state = LPFC_WARM_START;
41415862
JW
3959 return 0;
3960}
3961
e59058c4 3962/**
da0436e9
JS
3963 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3964 * @phba: Pointer to HBA context object.
3965 *
3966 * This function resets a SLI4 HBA. This function disables PCI layer parity
3967 * checking during resets the device. The caller is not required to hold
3968 * any locks.
3969 *
3970 * This function returns 0 always.
3971 **/
3972int
3973lpfc_sli4_brdreset(struct lpfc_hba *phba)
3974{
3975 struct lpfc_sli *psli = &phba->sli;
3976 uint16_t cfg_value;
27b01b82 3977 int rc;
da0436e9
JS
3978
3979 /* Reset HBA */
3980 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3981 "0295 Reset HBA Data: x%x x%x\n",
3982 phba->pport->port_state, psli->sli_flag);
3983
3984 /* perform board reset */
3985 phba->fc_eventTag = 0;
4d9ab994 3986 phba->link_events = 0;
da0436e9
JS
3987 phba->pport->fc_myDID = 0;
3988 phba->pport->fc_prevDID = 0;
3989
da0436e9
JS
3990 spin_lock_irq(&phba->hbalock);
3991 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3992 phba->fcf.fcf_flag = 0;
da0436e9
JS
3993 spin_unlock_irq(&phba->hbalock);
3994
3995 /* Now physically reset the device */
3996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3997 "0389 Performing PCI function reset!\n");
be858b65
JS
3998
3999 /* Turn off parity checking and serr during the physical reset */
4000 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4001 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4002 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4003
88318816 4004 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4005 rc = lpfc_pci_function_reset(phba);
88318816 4006 lpfc_sli4_queue_destroy(phba);
da0436e9 4007
be858b65
JS
4008 /* Restore PCI cmd register */
4009 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4010
27b01b82 4011 return rc;
da0436e9
JS
4012}
4013
4014/**
4015 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4016 * @phba: Pointer to HBA context object.
4017 *
4018 * This function is called in the SLI initialization code path to
4019 * restart the HBA. The caller is not required to hold any lock.
4020 * This function writes MBX_RESTART mailbox command to the SLIM and
4021 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4022 * function to free any pending commands. The function enables
4023 * POST only during the first initialization. The function returns zero.
4024 * The function does not guarantee completion of MBX_RESTART mailbox
4025 * command before the return of this function.
4026 **/
da0436e9
JS
4027static int
4028lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4029{
4030 MAILBOX_t *mb;
4031 struct lpfc_sli *psli;
41415862
JW
4032 volatile uint32_t word0;
4033 void __iomem *to_slim;
0d878419 4034 uint32_t hba_aer_enabled;
41415862 4035
2e0fef85 4036 spin_lock_irq(&phba->hbalock);
41415862 4037
0d878419
JS
4038 /* Take PCIe device Advanced Error Reporting (AER) state */
4039 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4040
41415862
JW
4041 psli = &phba->sli;
4042
4043 /* Restart HBA */
4044 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4045 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 4046 phba->pport->port_state, psli->sli_flag);
41415862
JW
4047
4048 word0 = 0;
4049 mb = (MAILBOX_t *) &word0;
4050 mb->mbxCommand = MBX_RESTART;
4051 mb->mbxHc = 1;
4052
9290831f
JS
4053 lpfc_reset_barrier(phba);
4054
41415862
JW
4055 to_slim = phba->MBslimaddr;
4056 writel(*(uint32_t *) mb, to_slim);
4057 readl(to_slim); /* flush */
4058
4059 /* Only skip post after fc_ffinit is completed */
eaf15d5b 4060 if (phba->pport->port_state)
41415862 4061 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4062 else
41415862 4063 word0 = 0; /* This is really setting up word1 */
65a29c16 4064 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4065 writel(*(uint32_t *) mb, to_slim);
4066 readl(to_slim); /* flush */
dea3101e 4067
41415862 4068 lpfc_sli_brdreset(phba);
2e0fef85
JS
4069 phba->pport->stopped = 0;
4070 phba->link_state = LPFC_INIT_START;
da0436e9 4071 phba->hba_flag = 0;
2e0fef85 4072 spin_unlock_irq(&phba->hbalock);
41415862 4073
64ba8818
JS
4074 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4075 psli->stats_start = get_seconds();
4076
eaf15d5b
JS
4077 /* Give the INITFF and Post time to settle. */
4078 mdelay(100);
41415862 4079
0d878419
JS
4080 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4081 if (hba_aer_enabled)
4082 pci_disable_pcie_error_reporting(phba->pcidev);
4083
41415862 4084 lpfc_hba_down_post(phba);
dea3101e 4085
4086 return 0;
4087}
4088
da0436e9
JS
4089/**
4090 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4091 * @phba: Pointer to HBA context object.
4092 *
4093 * This function is called in the SLI initialization code path to restart
4094 * a SLI4 HBA. The caller is not required to hold any lock.
4095 * At the end of the function, it calls lpfc_hba_down_post function to
4096 * free any pending commands.
4097 **/
4098static int
4099lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4100{
4101 struct lpfc_sli *psli = &phba->sli;
75baf696 4102 uint32_t hba_aer_enabled;
27b01b82 4103 int rc;
da0436e9
JS
4104
4105 /* Restart HBA */
4106 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4107 "0296 Restart HBA Data: x%x x%x\n",
4108 phba->pport->port_state, psli->sli_flag);
4109
75baf696
JS
4110 /* Take PCIe device Advanced Error Reporting (AER) state */
4111 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4112
27b01b82 4113 rc = lpfc_sli4_brdreset(phba);
da0436e9
JS
4114
4115 spin_lock_irq(&phba->hbalock);
4116 phba->pport->stopped = 0;
4117 phba->link_state = LPFC_INIT_START;
4118 phba->hba_flag = 0;
4119 spin_unlock_irq(&phba->hbalock);
4120
4121 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4122 psli->stats_start = get_seconds();
4123
75baf696
JS
4124 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4125 if (hba_aer_enabled)
4126 pci_disable_pcie_error_reporting(phba->pcidev);
4127
da0436e9
JS
4128 lpfc_hba_down_post(phba);
4129
27b01b82 4130 return rc;
da0436e9
JS
4131}
4132
4133/**
4134 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4135 * @phba: Pointer to HBA context object.
4136 *
4137 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4138 * API jump table function pointer from the lpfc_hba struct.
4139**/
4140int
4141lpfc_sli_brdrestart(struct lpfc_hba *phba)
4142{
4143 return phba->lpfc_sli_brdrestart(phba);
4144}
4145
e59058c4 4146/**
3621a710 4147 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4148 * @phba: Pointer to HBA context object.
4149 *
4150 * This function is called after a HBA restart to wait for successful
4151 * restart of the HBA. Successful restart of the HBA is indicated by
4152 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4153 * iteration, the function will restart the HBA again. The function returns
4154 * zero if HBA successfully restarted else returns negative error code.
4155 **/
dea3101e 4156static int
4157lpfc_sli_chipset_init(struct lpfc_hba *phba)
4158{
4159 uint32_t status, i = 0;
4160
4161 /* Read the HBA Host Status Register */
9940b97b
JS
4162 if (lpfc_readl(phba->HSregaddr, &status))
4163 return -EIO;
dea3101e 4164
4165 /* Check status register to see what current state is */
4166 i = 0;
4167 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4168
dcf2a4e0
JS
4169 /* Check every 10ms for 10 retries, then every 100ms for 90
4170 * retries, then every 1 sec for 50 retires for a total of
4171 * ~60 seconds before reset the board again and check every
4172 * 1 sec for 50 retries. The up to 60 seconds before the
4173 * board ready is required by the Falcon FIPS zeroization
4174 * complete, and any reset the board in between shall cause
4175 * restart of zeroization, further delay the board ready.
dea3101e 4176 */
dcf2a4e0 4177 if (i++ >= 200) {
dea3101e 4178 /* Adapter failed to init, timeout, status reg
4179 <status> */
ed957684 4180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4181 "0436 Adapter failed to init, "
09372820
JS
4182 "timeout, status reg x%x, "
4183 "FW Data: A8 x%x AC x%x\n", status,
4184 readl(phba->MBslimaddr + 0xa8),
4185 readl(phba->MBslimaddr + 0xac));
2e0fef85 4186 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4187 return -ETIMEDOUT;
4188 }
4189
4190 /* Check to see if any errors occurred during init */
4191 if (status & HS_FFERM) {
4192 /* ERROR: During chipset initialization */
4193 /* Adapter failed to init, chipset, status reg
4194 <status> */
ed957684 4195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4196 "0437 Adapter failed to init, "
09372820
JS
4197 "chipset, status reg x%x, "
4198 "FW Data: A8 x%x AC x%x\n", status,
4199 readl(phba->MBslimaddr + 0xa8),
4200 readl(phba->MBslimaddr + 0xac));
2e0fef85 4201 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4202 return -EIO;
4203 }
4204
dcf2a4e0 4205 if (i <= 10)
dea3101e 4206 msleep(10);
dcf2a4e0
JS
4207 else if (i <= 100)
4208 msleep(100);
4209 else
4210 msleep(1000);
dea3101e 4211
dcf2a4e0
JS
4212 if (i == 150) {
4213 /* Do post */
92d7f7b0 4214 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4215 lpfc_sli_brdrestart(phba);
dea3101e 4216 }
4217 /* Read the HBA Host Status Register */
9940b97b
JS
4218 if (lpfc_readl(phba->HSregaddr, &status))
4219 return -EIO;
dea3101e 4220 }
4221
4222 /* Check to see if any errors occurred during init */
4223 if (status & HS_FFERM) {
4224 /* ERROR: During chipset initialization */
4225 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4227 "0438 Adapter failed to init, chipset, "
09372820
JS
4228 "status reg x%x, "
4229 "FW Data: A8 x%x AC x%x\n", status,
4230 readl(phba->MBslimaddr + 0xa8),
4231 readl(phba->MBslimaddr + 0xac));
2e0fef85 4232 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4233 return -EIO;
4234 }
4235
4236 /* Clear all interrupt enable conditions */
4237 writel(0, phba->HCregaddr);
4238 readl(phba->HCregaddr); /* flush */
4239
4240 /* setup host attn register */
4241 writel(0xffffffff, phba->HAregaddr);
4242 readl(phba->HAregaddr); /* flush */
4243 return 0;
4244}
4245
e59058c4 4246/**
3621a710 4247 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4248 *
4249 * This function calculates and returns the number of HBQs required to be
4250 * configured.
4251 **/
78b2d852 4252int
ed957684
JS
4253lpfc_sli_hbq_count(void)
4254{
92d7f7b0 4255 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4256}
4257
e59058c4 4258/**
3621a710 4259 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4260 *
4261 * This function adds the number of hbq entries in every HBQ to get
4262 * the total number of hbq entries required for the HBA and returns
4263 * the total count.
4264 **/
ed957684
JS
4265static int
4266lpfc_sli_hbq_entry_count(void)
4267{
4268 int hbq_count = lpfc_sli_hbq_count();
4269 int count = 0;
4270 int i;
4271
4272 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4273 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4274 return count;
4275}
4276
e59058c4 4277/**
3621a710 4278 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4279 *
4280 * This function calculates amount of memory required for all hbq entries
4281 * to be configured and returns the total memory required.
4282 **/
dea3101e 4283int
ed957684
JS
4284lpfc_sli_hbq_size(void)
4285{
4286 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4287}
4288
e59058c4 4289/**
3621a710 4290 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4291 * @phba: Pointer to HBA context object.
4292 *
4293 * This function is called during the SLI initialization to configure
4294 * all the HBQs and post buffers to the HBQ. The caller is not
4295 * required to hold any locks. This function will return zero if successful
4296 * else it will return negative error code.
4297 **/
ed957684
JS
4298static int
4299lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4300{
4301 int hbq_count = lpfc_sli_hbq_count();
4302 LPFC_MBOXQ_t *pmb;
4303 MAILBOX_t *pmbox;
4304 uint32_t hbqno;
4305 uint32_t hbq_entry_index;
ed957684 4306
92d7f7b0
JS
4307 /* Get a Mailbox buffer to setup mailbox
4308 * commands for HBA initialization
4309 */
ed957684
JS
4310 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4311
4312 if (!pmb)
4313 return -ENOMEM;
4314
04c68496 4315 pmbox = &pmb->u.mb;
ed957684
JS
4316
4317 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4318 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4319 phba->hbq_in_use = 1;
ed957684
JS
4320
4321 hbq_entry_index = 0;
4322 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4323 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4324 phba->hbqs[hbqno].hbqPutIdx = 0;
4325 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4326 phba->hbqs[hbqno].entry_count =
92d7f7b0 4327 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4328 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4329 hbq_entry_index, pmb);
ed957684
JS
4330 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4331
4332 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4333 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4334 mbxStatus <status>, ring <num> */
4335
4336 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4337 LOG_SLI | LOG_VPORT,
e8b62011 4338 "1805 Adapter failed to init. "
ed957684 4339 "Data: x%x x%x x%x\n",
e8b62011 4340 pmbox->mbxCommand,
ed957684
JS
4341 pmbox->mbxStatus, hbqno);
4342
4343 phba->link_state = LPFC_HBA_ERROR;
4344 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4345 return -ENXIO;
ed957684
JS
4346 }
4347 }
4348 phba->hbq_count = hbq_count;
4349
ed957684
JS
4350 mempool_free(pmb, phba->mbox_mem_pool);
4351
92d7f7b0 4352 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4353 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4354 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4355 return 0;
4356}
4357
4f774513
JS
4358/**
4359 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4360 * @phba: Pointer to HBA context object.
4361 *
4362 * This function is called during the SLI initialization to configure
4363 * all the HBQs and post buffers to the HBQ. The caller is not
4364 * required to hold any locks. This function will return zero if successful
4365 * else it will return negative error code.
4366 **/
4367static int
4368lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4369{
4370 phba->hbq_in_use = 1;
4371 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4372 phba->hbq_count = 1;
4373 /* Initially populate or replenish the HBQs */
4374 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4375 return 0;
4376}
4377
e59058c4 4378/**
3621a710 4379 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4380 * @phba: Pointer to HBA context object.
4381 * @sli_mode: sli mode - 2/3
4382 *
4383 * This function is called by the sli intialization code path
4384 * to issue config_port mailbox command. This function restarts the
4385 * HBA firmware and issues a config_port mailbox command to configure
4386 * the SLI interface in the sli mode specified by sli_mode
4387 * variable. The caller is not required to hold any locks.
4388 * The function returns 0 if successful, else returns negative error
4389 * code.
4390 **/
9399627f
JS
4391int
4392lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4393{
4394 LPFC_MBOXQ_t *pmb;
4395 uint32_t resetcount = 0, rc = 0, done = 0;
4396
4397 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4398 if (!pmb) {
2e0fef85 4399 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4400 return -ENOMEM;
4401 }
4402
ed957684 4403 phba->sli_rev = sli_mode;
dea3101e 4404 while (resetcount < 2 && !done) {
2e0fef85 4405 spin_lock_irq(&phba->hbalock);
1c067a42 4406 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4407 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4408 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4409 lpfc_sli_brdrestart(phba);
dea3101e 4410 rc = lpfc_sli_chipset_init(phba);
4411 if (rc)
4412 break;
4413
2e0fef85 4414 spin_lock_irq(&phba->hbalock);
1c067a42 4415 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4416 spin_unlock_irq(&phba->hbalock);
dea3101e 4417 resetcount++;
4418
ed957684
JS
4419 /* Call pre CONFIG_PORT mailbox command initialization. A
4420 * value of 0 means the call was successful. Any other
4421 * nonzero value is a failure, but if ERESTART is returned,
4422 * the driver may reset the HBA and try again.
4423 */
dea3101e 4424 rc = lpfc_config_port_prep(phba);
4425 if (rc == -ERESTART) {
ed957684 4426 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4427 continue;
34b02dcd 4428 } else if (rc)
dea3101e 4429 break;
6d368e53 4430
2e0fef85 4431 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 4432 lpfc_config_port(phba, pmb);
4433 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4434 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4435 LPFC_SLI3_HBQ_ENABLED |
4436 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
4437 LPFC_SLI3_BG_ENABLED |
4438 LPFC_SLI3_DSS_ENABLED);
ed957684 4439 if (rc != MBX_SUCCESS) {
dea3101e 4440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4441 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 4442 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 4443 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 4444 spin_lock_irq(&phba->hbalock);
04c68496 4445 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
4446 spin_unlock_irq(&phba->hbalock);
4447 rc = -ENXIO;
04c68496
JS
4448 } else {
4449 /* Allow asynchronous mailbox command to go through */
4450 spin_lock_irq(&phba->hbalock);
4451 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4452 spin_unlock_irq(&phba->hbalock);
ed957684 4453 done = 1;
cb69f7de
JS
4454
4455 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4456 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4457 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4458 "3110 Port did not grant ASABT\n");
04c68496 4459 }
dea3101e 4460 }
ed957684
JS
4461 if (!done) {
4462 rc = -EINVAL;
4463 goto do_prep_failed;
4464 }
04c68496
JS
4465 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4466 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
4467 rc = -ENXIO;
4468 goto do_prep_failed;
4469 }
04c68496 4470 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 4471 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
4472 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4473 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4474 phba->max_vpi : phba->max_vports;
4475
34b02dcd
JS
4476 } else
4477 phba->max_vpi = 0;
bc73905a
JS
4478 phba->fips_level = 0;
4479 phba->fips_spec_rev = 0;
4480 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 4481 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
4482 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4483 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4484 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4485 "2850 Security Crypto Active. FIPS x%d "
4486 "(Spec Rev: x%d)",
4487 phba->fips_level, phba->fips_spec_rev);
4488 }
4489 if (pmb->u.mb.un.varCfgPort.sec_err) {
4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4491 "2856 Config Port Security Crypto "
4492 "Error: x%x ",
4493 pmb->u.mb.un.varCfgPort.sec_err);
4494 }
04c68496 4495 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 4496 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 4497 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 4498 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
4499
4500 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4501 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6
JS
4502
4503 if (phba->cfg_enable_bg) {
04c68496 4504 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
4505 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4506 else
4507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4508 "0443 Adapter did not grant "
4509 "BlockGuard\n");
4510 }
34b02dcd 4511 } else {
8f34f4ce 4512 phba->hbq_get = NULL;
34b02dcd 4513 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 4514 phba->max_vpi = 0;
ed957684 4515 }
92d7f7b0 4516do_prep_failed:
ed957684
JS
4517 mempool_free(pmb, phba->mbox_mem_pool);
4518 return rc;
4519}
4520
e59058c4
JS
4521
4522/**
3621a710 4523 * lpfc_sli_hba_setup - SLI intialization function
e59058c4
JS
4524 * @phba: Pointer to HBA context object.
4525 *
4526 * This function is the main SLI intialization function. This function
4527 * is called by the HBA intialization code, HBA reset code and HBA
4528 * error attention handler code. Caller is not required to hold any
4529 * locks. This function issues config_port mailbox command to configure
4530 * the SLI, setup iocb rings and HBQ rings. In the end the function
4531 * calls the config_port_post function to issue init_link mailbox
4532 * command and to start the discovery. The function will return zero
4533 * if successful, else it will return negative error code.
4534 **/
ed957684
JS
4535int
4536lpfc_sli_hba_setup(struct lpfc_hba *phba)
4537{
4538 uint32_t rc;
6d368e53
JS
4539 int mode = 3, i;
4540 int longs;
ed957684
JS
4541
4542 switch (lpfc_sli_mode) {
4543 case 2:
78b2d852 4544 if (phba->cfg_enable_npiv) {
92d7f7b0 4545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 4546 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 4547 "parameter (%d) to auto (0).\n",
e8b62011 4548 lpfc_sli_mode);
92d7f7b0
JS
4549 break;
4550 }
ed957684
JS
4551 mode = 2;
4552 break;
4553 case 0:
4554 case 3:
4555 break;
4556 default:
92d7f7b0 4557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4558 "1819 Unrecognized lpfc_sli_mode "
4559 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
4560
4561 break;
4562 }
4563
9399627f
JS
4564 rc = lpfc_sli_config_port(phba, mode);
4565
ed957684 4566 if (rc && lpfc_sli_mode == 3)
92d7f7b0 4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
4568 "1820 Unable to select SLI-3. "
4569 "Not supported by adapter.\n");
ed957684 4570 if (rc && mode != 2)
9399627f 4571 rc = lpfc_sli_config_port(phba, 2);
ed957684 4572 if (rc)
dea3101e 4573 goto lpfc_sli_hba_setup_error;
4574
0d878419
JS
4575 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4576 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4577 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4578 if (!rc) {
4579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4580 "2709 This device supports "
4581 "Advanced Error Reporting (AER)\n");
4582 spin_lock_irq(&phba->hbalock);
4583 phba->hba_flag |= HBA_AER_ENABLED;
4584 spin_unlock_irq(&phba->hbalock);
4585 } else {
4586 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4587 "2708 This device does not support "
4588 "Advanced Error Reporting (AER)\n");
4589 phba->cfg_aer_support = 0;
4590 }
4591 }
4592
ed957684
JS
4593 if (phba->sli_rev == 3) {
4594 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4595 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
4596 } else {
4597 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4598 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 4599 phba->sli3_options = 0;
ed957684
JS
4600 }
4601
4602 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4603 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4604 phba->sli_rev, phba->max_vpi);
ed957684 4605 rc = lpfc_sli_ring_map(phba);
dea3101e 4606
4607 if (rc)
4608 goto lpfc_sli_hba_setup_error;
4609
6d368e53
JS
4610 /* Initialize VPIs. */
4611 if (phba->sli_rev == LPFC_SLI_REV3) {
4612 /*
4613 * The VPI bitmask and physical ID array are allocated
4614 * and initialized once only - at driver load. A port
4615 * reset doesn't need to reinitialize this memory.
4616 */
4617 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4618 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4619 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4620 GFP_KERNEL);
4621 if (!phba->vpi_bmask) {
4622 rc = -ENOMEM;
4623 goto lpfc_sli_hba_setup_error;
4624 }
4625
4626 phba->vpi_ids = kzalloc(
4627 (phba->max_vpi+1) * sizeof(uint16_t),
4628 GFP_KERNEL);
4629 if (!phba->vpi_ids) {
4630 kfree(phba->vpi_bmask);
4631 rc = -ENOMEM;
4632 goto lpfc_sli_hba_setup_error;
4633 }
4634 for (i = 0; i < phba->max_vpi; i++)
4635 phba->vpi_ids[i] = i;
4636 }
4637 }
4638
9399627f 4639 /* Init HBQs */
ed957684
JS
4640 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4641 rc = lpfc_sli_hbq_setup(phba);
4642 if (rc)
4643 goto lpfc_sli_hba_setup_error;
4644 }
04c68496 4645 spin_lock_irq(&phba->hbalock);
dea3101e 4646 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 4647 spin_unlock_irq(&phba->hbalock);
dea3101e 4648
4649 rc = lpfc_config_port_post(phba);
4650 if (rc)
4651 goto lpfc_sli_hba_setup_error;
4652
ed957684
JS
4653 return rc;
4654
92d7f7b0 4655lpfc_sli_hba_setup_error:
2e0fef85 4656 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 4657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4658 "0445 Firmware initialization failed\n");
dea3101e 4659 return rc;
4660}
4661
e59058c4 4662/**
da0436e9
JS
4663 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4664 * @phba: Pointer to HBA context object.
4665 * @mboxq: mailbox pointer.
4666 * This function issue a dump mailbox command to read config region
4667 * 23 and parse the records in the region and populate driver
4668 * data structure.
e59058c4 4669 **/
da0436e9 4670static int
ff78d8f9 4671lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 4672{
ff78d8f9 4673 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
4674 struct lpfc_dmabuf *mp;
4675 struct lpfc_mqe *mqe;
4676 uint32_t data_length;
4677 int rc;
dea3101e 4678
da0436e9
JS
4679 /* Program the default value of vlan_id and fc_map */
4680 phba->valid_vlan = 0;
4681 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4682 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4683 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4684
ff78d8f9
JS
4685 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4686 if (!mboxq)
da0436e9
JS
4687 return -ENOMEM;
4688
ff78d8f9
JS
4689 mqe = &mboxq->u.mqe;
4690 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4691 rc = -ENOMEM;
4692 goto out_free_mboxq;
4693 }
4694
da0436e9
JS
4695 mp = (struct lpfc_dmabuf *) mboxq->context1;
4696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4697
4698 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4699 "(%d):2571 Mailbox cmd x%x Status x%x "
4700 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4701 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4702 "CQ: x%x x%x x%x x%x\n",
4703 mboxq->vport ? mboxq->vport->vpi : 0,
4704 bf_get(lpfc_mqe_command, mqe),
4705 bf_get(lpfc_mqe_status, mqe),
4706 mqe->un.mb_words[0], mqe->un.mb_words[1],
4707 mqe->un.mb_words[2], mqe->un.mb_words[3],
4708 mqe->un.mb_words[4], mqe->un.mb_words[5],
4709 mqe->un.mb_words[6], mqe->un.mb_words[7],
4710 mqe->un.mb_words[8], mqe->un.mb_words[9],
4711 mqe->un.mb_words[10], mqe->un.mb_words[11],
4712 mqe->un.mb_words[12], mqe->un.mb_words[13],
4713 mqe->un.mb_words[14], mqe->un.mb_words[15],
4714 mqe->un.mb_words[16], mqe->un.mb_words[50],
4715 mboxq->mcqe.word0,
4716 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4717 mboxq->mcqe.trailer);
4718
4719 if (rc) {
4720 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4721 kfree(mp);
ff78d8f9
JS
4722 rc = -EIO;
4723 goto out_free_mboxq;
da0436e9
JS
4724 }
4725 data_length = mqe->un.mb_words[5];
a0c87cbd 4726 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
4727 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4728 kfree(mp);
ff78d8f9
JS
4729 rc = -EIO;
4730 goto out_free_mboxq;
d11e31dd 4731 }
dea3101e 4732
da0436e9
JS
4733 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4734 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4735 kfree(mp);
ff78d8f9
JS
4736 rc = 0;
4737
4738out_free_mboxq:
4739 mempool_free(mboxq, phba->mbox_mem_pool);
4740 return rc;
da0436e9 4741}
e59058c4
JS
4742
4743/**
da0436e9
JS
4744 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4745 * @phba: pointer to lpfc hba data structure.
4746 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4747 * @vpd: pointer to the memory to hold resulting port vpd data.
4748 * @vpd_size: On input, the number of bytes allocated to @vpd.
4749 * On output, the number of data bytes in @vpd.
e59058c4 4750 *
da0436e9
JS
4751 * This routine executes a READ_REV SLI4 mailbox command. In
4752 * addition, this routine gets the port vpd data.
4753 *
4754 * Return codes
af901ca1 4755 * 0 - successful
d439d286 4756 * -ENOMEM - could not allocated memory.
e59058c4 4757 **/
da0436e9
JS
4758static int
4759lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4760 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 4761{
da0436e9
JS
4762 int rc = 0;
4763 uint32_t dma_size;
4764 struct lpfc_dmabuf *dmabuf;
4765 struct lpfc_mqe *mqe;
dea3101e 4766
da0436e9
JS
4767 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4768 if (!dmabuf)
4769 return -ENOMEM;
4770
4771 /*
4772 * Get a DMA buffer for the vpd data resulting from the READ_REV
4773 * mailbox command.
a257bf90 4774 */
da0436e9
JS
4775 dma_size = *vpd_size;
4776 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4777 dma_size,
4778 &dmabuf->phys,
4779 GFP_KERNEL);
4780 if (!dmabuf->virt) {
4781 kfree(dmabuf);
4782 return -ENOMEM;
a257bf90 4783 }
da0436e9 4784 memset(dmabuf->virt, 0, dma_size);
a257bf90 4785
da0436e9
JS
4786 /*
4787 * The SLI4 implementation of READ_REV conflicts at word1,
4788 * bits 31:16 and SLI4 adds vpd functionality not present
4789 * in SLI3. This code corrects the conflicts.
1dcb58e5 4790 */
da0436e9
JS
4791 lpfc_read_rev(phba, mboxq);
4792 mqe = &mboxq->u.mqe;
4793 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4794 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4795 mqe->un.read_rev.word1 &= 0x0000FFFF;
4796 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4797 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4798
4799 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4800 if (rc) {
4801 dma_free_coherent(&phba->pcidev->dev, dma_size,
4802 dmabuf->virt, dmabuf->phys);
def9c7a9 4803 kfree(dmabuf);
da0436e9
JS
4804 return -EIO;
4805 }
1dcb58e5 4806
da0436e9
JS
4807 /*
4808 * The available vpd length cannot be bigger than the
4809 * DMA buffer passed to the port. Catch the less than
4810 * case and update the caller's size.
4811 */
4812 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4813 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 4814
d7c47992
JS
4815 memcpy(vpd, dmabuf->virt, *vpd_size);
4816
da0436e9
JS
4817 dma_free_coherent(&phba->pcidev->dev, dma_size,
4818 dmabuf->virt, dmabuf->phys);
4819 kfree(dmabuf);
4820 return 0;
dea3101e 4821}
4822
cd1c8301
JS
4823/**
4824 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4825 * @phba: pointer to lpfc hba data structure.
4826 *
4827 * This routine retrieves SLI4 device physical port name this PCI function
4828 * is attached to.
4829 *
4830 * Return codes
4907cb7b 4831 * 0 - successful
cd1c8301
JS
4832 * otherwise - failed to retrieve physical port name
4833 **/
4834static int
4835lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4836{
4837 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
4838 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4839 struct lpfc_controller_attribute *cntl_attr;
4840 struct lpfc_mbx_get_port_name *get_port_name;
4841 void *virtaddr = NULL;
4842 uint32_t alloclen, reqlen;
4843 uint32_t shdr_status, shdr_add_status;
4844 union lpfc_sli4_cfg_shdr *shdr;
4845 char cport_name = 0;
4846 int rc;
4847
4848 /* We assume nothing at this point */
4849 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4850 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4851
4852 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4853 if (!mboxq)
4854 return -ENOMEM;
cd1c8301 4855 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
4856 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4857 lpfc_sli4_read_config(phba);
4858 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4859 goto retrieve_ppname;
cd1c8301
JS
4860
4861 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4862 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4863 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4864 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4865 LPFC_SLI4_MBX_NEMBED);
4866 if (alloclen < reqlen) {
4867 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4868 "3084 Allocated DMA memory size (%d) is "
4869 "less than the requested DMA memory size "
4870 "(%d)\n", alloclen, reqlen);
4871 rc = -ENOMEM;
4872 goto out_free_mboxq;
4873 }
4874 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4875 virtaddr = mboxq->sge_array->addr[0];
4876 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4877 shdr = &mbx_cntl_attr->cfg_shdr;
4878 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4879 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4880 if (shdr_status || shdr_add_status || rc) {
4881 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4882 "3085 Mailbox x%x (x%x/x%x) failed, "
4883 "rc:x%x, status:x%x, add_status:x%x\n",
4884 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4885 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4886 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4887 rc, shdr_status, shdr_add_status);
4888 rc = -ENXIO;
4889 goto out_free_mboxq;
4890 }
4891 cntl_attr = &mbx_cntl_attr->cntl_attr;
4892 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4893 phba->sli4_hba.lnk_info.lnk_tp =
4894 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4895 phba->sli4_hba.lnk_info.lnk_no =
4896 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4897 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4898 "3086 lnk_type:%d, lnk_numb:%d\n",
4899 phba->sli4_hba.lnk_info.lnk_tp,
4900 phba->sli4_hba.lnk_info.lnk_no);
4901
4902retrieve_ppname:
4903 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4904 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4905 sizeof(struct lpfc_mbx_get_port_name) -
4906 sizeof(struct lpfc_sli4_cfg_mhdr),
4907 LPFC_SLI4_MBX_EMBED);
4908 get_port_name = &mboxq->u.mqe.un.get_port_name;
4909 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4910 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4911 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4912 phba->sli4_hba.lnk_info.lnk_tp);
4913 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4914 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4915 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4916 if (shdr_status || shdr_add_status || rc) {
4917 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4918 "3087 Mailbox x%x (x%x/x%x) failed: "
4919 "rc:x%x, status:x%x, add_status:x%x\n",
4920 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4921 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4922 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4923 rc, shdr_status, shdr_add_status);
4924 rc = -ENXIO;
4925 goto out_free_mboxq;
4926 }
4927 switch (phba->sli4_hba.lnk_info.lnk_no) {
4928 case LPFC_LINK_NUMBER_0:
4929 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4930 &get_port_name->u.response);
4931 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4932 break;
4933 case LPFC_LINK_NUMBER_1:
4934 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4935 &get_port_name->u.response);
4936 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4937 break;
4938 case LPFC_LINK_NUMBER_2:
4939 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4940 &get_port_name->u.response);
4941 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4942 break;
4943 case LPFC_LINK_NUMBER_3:
4944 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4945 &get_port_name->u.response);
4946 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4947 break;
4948 default:
4949 break;
4950 }
4951
4952 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4953 phba->Port[0] = cport_name;
4954 phba->Port[1] = '\0';
4955 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4956 "3091 SLI get port name: %s\n", phba->Port);
4957 }
4958
4959out_free_mboxq:
4960 if (rc != MBX_TIMEOUT) {
4961 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4962 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4963 else
4964 mempool_free(mboxq, phba->mbox_mem_pool);
4965 }
4966 return rc;
4967}
4968
e59058c4 4969/**
da0436e9
JS
4970 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4971 * @phba: pointer to lpfc hba data structure.
e59058c4 4972 *
da0436e9
JS
4973 * This routine is called to explicitly arm the SLI4 device's completion and
4974 * event queues
4975 **/
4976static void
4977lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4978{
962bc51b 4979 int fcp_eqidx;
da0436e9
JS
4980
4981 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4982 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
0558056c 4983 fcp_eqidx = 0;
2e90f4b5 4984 if (phba->sli4_hba.fcp_cq) {
67d12733 4985 do {
2e90f4b5
JS
4986 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4987 LPFC_QUEUE_REARM);
67d12733 4988 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
2e90f4b5 4989 }
67d12733
JS
4990 if (phba->sli4_hba.hba_eq) {
4991 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
2e90f4b5 4992 fcp_eqidx++)
67d12733 4993 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
2e90f4b5
JS
4994 LPFC_QUEUE_REARM);
4995 }
da0436e9
JS
4996}
4997
6d368e53
JS
4998/**
4999 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5000 * @phba: Pointer to HBA context object.
5001 * @type: The resource extent type.
b76f2dc9
JS
5002 * @extnt_count: buffer to hold port available extent count.
5003 * @extnt_size: buffer to hold element count per extent.
6d368e53 5004 *
b76f2dc9
JS
5005 * This function calls the port and retrievs the number of available
5006 * extents and their size for a particular extent type.
5007 *
5008 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5009 **/
b76f2dc9 5010int
6d368e53
JS
5011lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5012 uint16_t *extnt_count, uint16_t *extnt_size)
5013{
5014 int rc = 0;
5015 uint32_t length;
5016 uint32_t mbox_tmo;
5017 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5018 LPFC_MBOXQ_t *mbox;
5019
5020 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5021 if (!mbox)
5022 return -ENOMEM;
5023
5024 /* Find out how many extents are available for this resource type */
5025 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5026 sizeof(struct lpfc_sli4_cfg_mhdr));
5027 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5028 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5029 length, LPFC_SLI4_MBX_EMBED);
5030
5031 /* Send an extents count of 0 - the GET doesn't use it. */
5032 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5033 LPFC_SLI4_MBX_EMBED);
5034 if (unlikely(rc)) {
5035 rc = -EIO;
5036 goto err_exit;
5037 }
5038
5039 if (!phba->sli4_hba.intr_enable)
5040 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5041 else {
a183a15f 5042 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5043 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5044 }
5045 if (unlikely(rc)) {
5046 rc = -EIO;
5047 goto err_exit;
5048 }
5049
5050 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5051 if (bf_get(lpfc_mbox_hdr_status,
5052 &rsrc_info->header.cfg_shdr.response)) {
5053 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5054 "2930 Failed to get resource extents "
5055 "Status 0x%x Add'l Status 0x%x\n",
5056 bf_get(lpfc_mbox_hdr_status,
5057 &rsrc_info->header.cfg_shdr.response),
5058 bf_get(lpfc_mbox_hdr_add_status,
5059 &rsrc_info->header.cfg_shdr.response));
5060 rc = -EIO;
5061 goto err_exit;
5062 }
5063
5064 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5065 &rsrc_info->u.rsp);
5066 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5067 &rsrc_info->u.rsp);
8a9d2e80
JS
5068
5069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5070 "3162 Retrieved extents type-%d from port: count:%d, "
5071 "size:%d\n", type, *extnt_count, *extnt_size);
5072
5073err_exit:
6d368e53
JS
5074 mempool_free(mbox, phba->mbox_mem_pool);
5075 return rc;
5076}
5077
5078/**
5079 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5080 * @phba: Pointer to HBA context object.
5081 * @type: The extent type to check.
5082 *
5083 * This function reads the current available extents from the port and checks
5084 * if the extent count or extent size has changed since the last access.
5085 * Callers use this routine post port reset to understand if there is a
5086 * extent reprovisioning requirement.
5087 *
5088 * Returns:
5089 * -Error: error indicates problem.
5090 * 1: Extent count or size has changed.
5091 * 0: No changes.
5092 **/
5093static int
5094lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5095{
5096 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5097 uint16_t size_diff, rsrc_ext_size;
5098 int rc = 0;
5099 struct lpfc_rsrc_blks *rsrc_entry;
5100 struct list_head *rsrc_blk_list = NULL;
5101
5102 size_diff = 0;
5103 curr_ext_cnt = 0;
5104 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5105 &rsrc_ext_cnt,
5106 &rsrc_ext_size);
5107 if (unlikely(rc))
5108 return -EIO;
5109
5110 switch (type) {
5111 case LPFC_RSC_TYPE_FCOE_RPI:
5112 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5113 break;
5114 case LPFC_RSC_TYPE_FCOE_VPI:
5115 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5116 break;
5117 case LPFC_RSC_TYPE_FCOE_XRI:
5118 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5119 break;
5120 case LPFC_RSC_TYPE_FCOE_VFI:
5121 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5122 break;
5123 default:
5124 break;
5125 }
5126
5127 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5128 curr_ext_cnt++;
5129 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5130 size_diff++;
5131 }
5132
5133 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5134 rc = 1;
5135
5136 return rc;
5137}
5138
5139/**
5140 * lpfc_sli4_cfg_post_extnts -
5141 * @phba: Pointer to HBA context object.
5142 * @extnt_cnt - number of available extents.
5143 * @type - the extent type (rpi, xri, vfi, vpi).
5144 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5145 * @mbox - pointer to the caller's allocated mailbox structure.
5146 *
5147 * This function executes the extents allocation request. It also
5148 * takes care of the amount of memory needed to allocate or get the
5149 * allocated extents. It is the caller's responsibility to evaluate
5150 * the response.
5151 *
5152 * Returns:
5153 * -Error: Error value describes the condition found.
5154 * 0: if successful
5155 **/
5156static int
8a9d2e80 5157lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5158 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5159{
5160 int rc = 0;
5161 uint32_t req_len;
5162 uint32_t emb_len;
5163 uint32_t alloc_len, mbox_tmo;
5164
5165 /* Calculate the total requested length of the dma memory */
8a9d2e80 5166 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5167
5168 /*
5169 * Calculate the size of an embedded mailbox. The uint32_t
5170 * accounts for extents-specific word.
5171 */
5172 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5173 sizeof(uint32_t);
5174
5175 /*
5176 * Presume the allocation and response will fit into an embedded
5177 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5178 */
5179 *emb = LPFC_SLI4_MBX_EMBED;
5180 if (req_len > emb_len) {
8a9d2e80 5181 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5182 sizeof(union lpfc_sli4_cfg_shdr) +
5183 sizeof(uint32_t);
5184 *emb = LPFC_SLI4_MBX_NEMBED;
5185 }
5186
5187 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5188 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5189 req_len, *emb);
5190 if (alloc_len < req_len) {
5191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5192 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5193 "less than the requested DMA memory "
5194 "size (x%x)\n", alloc_len, req_len);
5195 return -ENOMEM;
5196 }
8a9d2e80 5197 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5198 if (unlikely(rc))
5199 return -EIO;
5200
5201 if (!phba->sli4_hba.intr_enable)
5202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5203 else {
a183a15f 5204 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5205 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5206 }
5207
5208 if (unlikely(rc))
5209 rc = -EIO;
5210 return rc;
5211}
5212
5213/**
5214 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5215 * @phba: Pointer to HBA context object.
5216 * @type: The resource extent type to allocate.
5217 *
5218 * This function allocates the number of elements for the specified
5219 * resource type.
5220 **/
5221static int
5222lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5223{
5224 bool emb = false;
5225 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5226 uint16_t rsrc_id, rsrc_start, j, k;
5227 uint16_t *ids;
5228 int i, rc;
5229 unsigned long longs;
5230 unsigned long *bmask;
5231 struct lpfc_rsrc_blks *rsrc_blks;
5232 LPFC_MBOXQ_t *mbox;
5233 uint32_t length;
5234 struct lpfc_id_range *id_array = NULL;
5235 void *virtaddr = NULL;
5236 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5237 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5238 struct list_head *ext_blk_list;
5239
5240 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5241 &rsrc_cnt,
5242 &rsrc_size);
5243 if (unlikely(rc))
5244 return -EIO;
5245
5246 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5248 "3009 No available Resource Extents "
5249 "for resource type 0x%x: Count: 0x%x, "
5250 "Size 0x%x\n", type, rsrc_cnt,
5251 rsrc_size);
5252 return -ENOMEM;
5253 }
5254
8a9d2e80
JS
5255 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5256 "2903 Post resource extents type-0x%x: "
5257 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5258
5259 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5260 if (!mbox)
5261 return -ENOMEM;
5262
8a9d2e80 5263 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5264 if (unlikely(rc)) {
5265 rc = -EIO;
5266 goto err_exit;
5267 }
5268
5269 /*
5270 * Figure out where the response is located. Then get local pointers
5271 * to the response data. The port does not guarantee to respond to
5272 * all extents counts request so update the local variable with the
5273 * allocated count from the port.
5274 */
5275 if (emb == LPFC_SLI4_MBX_EMBED) {
5276 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5277 id_array = &rsrc_ext->u.rsp.id[0];
5278 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5279 } else {
5280 virtaddr = mbox->sge_array->addr[0];
5281 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5282 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5283 id_array = &n_rsrc->id;
5284 }
5285
5286 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5287 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5288
5289 /*
5290 * Based on the resource size and count, correct the base and max
5291 * resource values.
5292 */
5293 length = sizeof(struct lpfc_rsrc_blks);
5294 switch (type) {
5295 case LPFC_RSC_TYPE_FCOE_RPI:
5296 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5297 sizeof(unsigned long),
5298 GFP_KERNEL);
5299 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5300 rc = -ENOMEM;
5301 goto err_exit;
5302 }
5303 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5304 sizeof(uint16_t),
5305 GFP_KERNEL);
5306 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5307 kfree(phba->sli4_hba.rpi_bmask);
5308 rc = -ENOMEM;
5309 goto err_exit;
5310 }
5311
5312 /*
5313 * The next_rpi was initialized with the maximum available
5314 * count but the port may allocate a smaller number. Catch
5315 * that case and update the next_rpi.
5316 */
5317 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5318
5319 /* Initialize local ptrs for common extent processing later. */
5320 bmask = phba->sli4_hba.rpi_bmask;
5321 ids = phba->sli4_hba.rpi_ids;
5322 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5323 break;
5324 case LPFC_RSC_TYPE_FCOE_VPI:
5325 phba->vpi_bmask = kzalloc(longs *
5326 sizeof(unsigned long),
5327 GFP_KERNEL);
5328 if (unlikely(!phba->vpi_bmask)) {
5329 rc = -ENOMEM;
5330 goto err_exit;
5331 }
5332 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5333 sizeof(uint16_t),
5334 GFP_KERNEL);
5335 if (unlikely(!phba->vpi_ids)) {
5336 kfree(phba->vpi_bmask);
5337 rc = -ENOMEM;
5338 goto err_exit;
5339 }
5340
5341 /* Initialize local ptrs for common extent processing later. */
5342 bmask = phba->vpi_bmask;
5343 ids = phba->vpi_ids;
5344 ext_blk_list = &phba->lpfc_vpi_blk_list;
5345 break;
5346 case LPFC_RSC_TYPE_FCOE_XRI:
5347 phba->sli4_hba.xri_bmask = kzalloc(longs *
5348 sizeof(unsigned long),
5349 GFP_KERNEL);
5350 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5351 rc = -ENOMEM;
5352 goto err_exit;
5353 }
8a9d2e80 5354 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5355 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5356 sizeof(uint16_t),
5357 GFP_KERNEL);
5358 if (unlikely(!phba->sli4_hba.xri_ids)) {
5359 kfree(phba->sli4_hba.xri_bmask);
5360 rc = -ENOMEM;
5361 goto err_exit;
5362 }
5363
5364 /* Initialize local ptrs for common extent processing later. */
5365 bmask = phba->sli4_hba.xri_bmask;
5366 ids = phba->sli4_hba.xri_ids;
5367 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5368 break;
5369 case LPFC_RSC_TYPE_FCOE_VFI:
5370 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5371 sizeof(unsigned long),
5372 GFP_KERNEL);
5373 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5374 rc = -ENOMEM;
5375 goto err_exit;
5376 }
5377 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5378 sizeof(uint16_t),
5379 GFP_KERNEL);
5380 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5381 kfree(phba->sli4_hba.vfi_bmask);
5382 rc = -ENOMEM;
5383 goto err_exit;
5384 }
5385
5386 /* Initialize local ptrs for common extent processing later. */
5387 bmask = phba->sli4_hba.vfi_bmask;
5388 ids = phba->sli4_hba.vfi_ids;
5389 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5390 break;
5391 default:
5392 /* Unsupported Opcode. Fail call. */
5393 id_array = NULL;
5394 bmask = NULL;
5395 ids = NULL;
5396 ext_blk_list = NULL;
5397 goto err_exit;
5398 }
5399
5400 /*
5401 * Complete initializing the extent configuration with the
5402 * allocated ids assigned to this function. The bitmask serves
5403 * as an index into the array and manages the available ids. The
5404 * array just stores the ids communicated to the port via the wqes.
5405 */
5406 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5407 if ((i % 2) == 0)
5408 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5409 &id_array[k]);
5410 else
5411 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5412 &id_array[k]);
5413
5414 rsrc_blks = kzalloc(length, GFP_KERNEL);
5415 if (unlikely(!rsrc_blks)) {
5416 rc = -ENOMEM;
5417 kfree(bmask);
5418 kfree(ids);
5419 goto err_exit;
5420 }
5421 rsrc_blks->rsrc_start = rsrc_id;
5422 rsrc_blks->rsrc_size = rsrc_size;
5423 list_add_tail(&rsrc_blks->list, ext_blk_list);
5424 rsrc_start = rsrc_id;
5425 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5426 phba->sli4_hba.scsi_xri_start = rsrc_start +
5427 lpfc_sli4_get_els_iocb_cnt(phba);
5428
5429 while (rsrc_id < (rsrc_start + rsrc_size)) {
5430 ids[j] = rsrc_id;
5431 rsrc_id++;
5432 j++;
5433 }
5434 /* Entire word processed. Get next word.*/
5435 if ((i % 2) == 1)
5436 k++;
5437 }
5438 err_exit:
5439 lpfc_sli4_mbox_cmd_free(phba, mbox);
5440 return rc;
5441}
5442
5443/**
5444 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5445 * @phba: Pointer to HBA context object.
5446 * @type: the extent's type.
5447 *
5448 * This function deallocates all extents of a particular resource type.
5449 * SLI4 does not allow for deallocating a particular extent range. It
5450 * is the caller's responsibility to release all kernel memory resources.
5451 **/
5452static int
5453lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5454{
5455 int rc;
5456 uint32_t length, mbox_tmo = 0;
5457 LPFC_MBOXQ_t *mbox;
5458 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5459 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5460
5461 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5462 if (!mbox)
5463 return -ENOMEM;
5464
5465 /*
5466 * This function sends an embedded mailbox because it only sends the
5467 * the resource type. All extents of this type are released by the
5468 * port.
5469 */
5470 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5471 sizeof(struct lpfc_sli4_cfg_mhdr));
5472 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5473 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5474 length, LPFC_SLI4_MBX_EMBED);
5475
5476 /* Send an extents count of 0 - the dealloc doesn't use it. */
5477 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5478 LPFC_SLI4_MBX_EMBED);
5479 if (unlikely(rc)) {
5480 rc = -EIO;
5481 goto out_free_mbox;
5482 }
5483 if (!phba->sli4_hba.intr_enable)
5484 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5485 else {
a183a15f 5486 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5487 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5488 }
5489 if (unlikely(rc)) {
5490 rc = -EIO;
5491 goto out_free_mbox;
5492 }
5493
5494 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5495 if (bf_get(lpfc_mbox_hdr_status,
5496 &dealloc_rsrc->header.cfg_shdr.response)) {
5497 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5498 "2919 Failed to release resource extents "
5499 "for type %d - Status 0x%x Add'l Status 0x%x. "
5500 "Resource memory not released.\n",
5501 type,
5502 bf_get(lpfc_mbox_hdr_status,
5503 &dealloc_rsrc->header.cfg_shdr.response),
5504 bf_get(lpfc_mbox_hdr_add_status,
5505 &dealloc_rsrc->header.cfg_shdr.response));
5506 rc = -EIO;
5507 goto out_free_mbox;
5508 }
5509
5510 /* Release kernel memory resources for the specific type. */
5511 switch (type) {
5512 case LPFC_RSC_TYPE_FCOE_VPI:
5513 kfree(phba->vpi_bmask);
5514 kfree(phba->vpi_ids);
5515 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5516 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5517 &phba->lpfc_vpi_blk_list, list) {
5518 list_del_init(&rsrc_blk->list);
5519 kfree(rsrc_blk);
5520 }
16a3a208 5521 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
5522 break;
5523 case LPFC_RSC_TYPE_FCOE_XRI:
5524 kfree(phba->sli4_hba.xri_bmask);
5525 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5526 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5527 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5528 list_del_init(&rsrc_blk->list);
5529 kfree(rsrc_blk);
5530 }
5531 break;
5532 case LPFC_RSC_TYPE_FCOE_VFI:
5533 kfree(phba->sli4_hba.vfi_bmask);
5534 kfree(phba->sli4_hba.vfi_ids);
5535 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5536 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5537 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5538 list_del_init(&rsrc_blk->list);
5539 kfree(rsrc_blk);
5540 }
5541 break;
5542 case LPFC_RSC_TYPE_FCOE_RPI:
5543 /* RPI bitmask and physical id array are cleaned up earlier. */
5544 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5545 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5546 list_del_init(&rsrc_blk->list);
5547 kfree(rsrc_blk);
5548 }
5549 break;
5550 default:
5551 break;
5552 }
5553
5554 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5555
5556 out_free_mbox:
5557 mempool_free(mbox, phba->mbox_mem_pool);
5558 return rc;
5559}
5560
5561/**
5562 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5563 * @phba: Pointer to HBA context object.
5564 *
5565 * This function allocates all SLI4 resource identifiers.
5566 **/
5567int
5568lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5569{
5570 int i, rc, error = 0;
5571 uint16_t count, base;
5572 unsigned long longs;
5573
ff78d8f9
JS
5574 if (!phba->sli4_hba.rpi_hdrs_in_use)
5575 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
5576 if (phba->sli4_hba.extents_in_use) {
5577 /*
5578 * The port supports resource extents. The XRI, VPI, VFI, RPI
5579 * resource extent count must be read and allocated before
5580 * provisioning the resource id arrays.
5581 */
5582 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5583 LPFC_IDX_RSRC_RDY) {
5584 /*
5585 * Extent-based resources are set - the driver could
5586 * be in a port reset. Figure out if any corrective
5587 * actions need to be taken.
5588 */
5589 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5590 LPFC_RSC_TYPE_FCOE_VFI);
5591 if (rc != 0)
5592 error++;
5593 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5594 LPFC_RSC_TYPE_FCOE_VPI);
5595 if (rc != 0)
5596 error++;
5597 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5598 LPFC_RSC_TYPE_FCOE_XRI);
5599 if (rc != 0)
5600 error++;
5601 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5602 LPFC_RSC_TYPE_FCOE_RPI);
5603 if (rc != 0)
5604 error++;
5605
5606 /*
5607 * It's possible that the number of resources
5608 * provided to this port instance changed between
5609 * resets. Detect this condition and reallocate
5610 * resources. Otherwise, there is no action.
5611 */
5612 if (error) {
5613 lpfc_printf_log(phba, KERN_INFO,
5614 LOG_MBOX | LOG_INIT,
5615 "2931 Detected extent resource "
5616 "change. Reallocating all "
5617 "extents.\n");
5618 rc = lpfc_sli4_dealloc_extent(phba,
5619 LPFC_RSC_TYPE_FCOE_VFI);
5620 rc = lpfc_sli4_dealloc_extent(phba,
5621 LPFC_RSC_TYPE_FCOE_VPI);
5622 rc = lpfc_sli4_dealloc_extent(phba,
5623 LPFC_RSC_TYPE_FCOE_XRI);
5624 rc = lpfc_sli4_dealloc_extent(phba,
5625 LPFC_RSC_TYPE_FCOE_RPI);
5626 } else
5627 return 0;
5628 }
5629
5630 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5631 if (unlikely(rc))
5632 goto err_exit;
5633
5634 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5635 if (unlikely(rc))
5636 goto err_exit;
5637
5638 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5639 if (unlikely(rc))
5640 goto err_exit;
5641
5642 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5643 if (unlikely(rc))
5644 goto err_exit;
5645 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5646 LPFC_IDX_RSRC_RDY);
5647 return rc;
5648 } else {
5649 /*
5650 * The port does not support resource extents. The XRI, VPI,
5651 * VFI, RPI resource ids were determined from READ_CONFIG.
5652 * Just allocate the bitmasks and provision the resource id
5653 * arrays. If a port reset is active, the resources don't
5654 * need any action - just exit.
5655 */
5656 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
5657 LPFC_IDX_RSRC_RDY) {
5658 lpfc_sli4_dealloc_resource_identifiers(phba);
5659 lpfc_sli4_remove_rpis(phba);
5660 }
6d368e53
JS
5661 /* RPIs. */
5662 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
5663 if (count <= 0) {
5664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5665 "3279 Invalid provisioning of "
5666 "rpi:%d\n", count);
5667 rc = -EINVAL;
5668 goto err_exit;
5669 }
6d368e53
JS
5670 base = phba->sli4_hba.max_cfg_param.rpi_base;
5671 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5672 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5673 sizeof(unsigned long),
5674 GFP_KERNEL);
5675 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5676 rc = -ENOMEM;
5677 goto err_exit;
5678 }
5679 phba->sli4_hba.rpi_ids = kzalloc(count *
5680 sizeof(uint16_t),
5681 GFP_KERNEL);
5682 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5683 rc = -ENOMEM;
5684 goto free_rpi_bmask;
5685 }
5686
5687 for (i = 0; i < count; i++)
5688 phba->sli4_hba.rpi_ids[i] = base + i;
5689
5690 /* VPIs. */
5691 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
5692 if (count <= 0) {
5693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5694 "3280 Invalid provisioning of "
5695 "vpi:%d\n", count);
5696 rc = -EINVAL;
5697 goto free_rpi_ids;
5698 }
6d368e53
JS
5699 base = phba->sli4_hba.max_cfg_param.vpi_base;
5700 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5701 phba->vpi_bmask = kzalloc(longs *
5702 sizeof(unsigned long),
5703 GFP_KERNEL);
5704 if (unlikely(!phba->vpi_bmask)) {
5705 rc = -ENOMEM;
5706 goto free_rpi_ids;
5707 }
5708 phba->vpi_ids = kzalloc(count *
5709 sizeof(uint16_t),
5710 GFP_KERNEL);
5711 if (unlikely(!phba->vpi_ids)) {
5712 rc = -ENOMEM;
5713 goto free_vpi_bmask;
5714 }
5715
5716 for (i = 0; i < count; i++)
5717 phba->vpi_ids[i] = base + i;
5718
5719 /* XRIs. */
5720 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
5721 if (count <= 0) {
5722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5723 "3281 Invalid provisioning of "
5724 "xri:%d\n", count);
5725 rc = -EINVAL;
5726 goto free_vpi_ids;
5727 }
6d368e53
JS
5728 base = phba->sli4_hba.max_cfg_param.xri_base;
5729 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5730 phba->sli4_hba.xri_bmask = kzalloc(longs *
5731 sizeof(unsigned long),
5732 GFP_KERNEL);
5733 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5734 rc = -ENOMEM;
5735 goto free_vpi_ids;
5736 }
41899be7 5737 phba->sli4_hba.max_cfg_param.xri_used = 0;
6d368e53
JS
5738 phba->sli4_hba.xri_ids = kzalloc(count *
5739 sizeof(uint16_t),
5740 GFP_KERNEL);
5741 if (unlikely(!phba->sli4_hba.xri_ids)) {
5742 rc = -ENOMEM;
5743 goto free_xri_bmask;
5744 }
5745
5746 for (i = 0; i < count; i++)
5747 phba->sli4_hba.xri_ids[i] = base + i;
5748
5749 /* VFIs. */
5750 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
5751 if (count <= 0) {
5752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5753 "3282 Invalid provisioning of "
5754 "vfi:%d\n", count);
5755 rc = -EINVAL;
5756 goto free_xri_ids;
5757 }
6d368e53
JS
5758 base = phba->sli4_hba.max_cfg_param.vfi_base;
5759 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5760 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5761 sizeof(unsigned long),
5762 GFP_KERNEL);
5763 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5764 rc = -ENOMEM;
5765 goto free_xri_ids;
5766 }
5767 phba->sli4_hba.vfi_ids = kzalloc(count *
5768 sizeof(uint16_t),
5769 GFP_KERNEL);
5770 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5771 rc = -ENOMEM;
5772 goto free_vfi_bmask;
5773 }
5774
5775 for (i = 0; i < count; i++)
5776 phba->sli4_hba.vfi_ids[i] = base + i;
5777
5778 /*
5779 * Mark all resources ready. An HBA reset doesn't need
5780 * to reset the initialization.
5781 */
5782 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5783 LPFC_IDX_RSRC_RDY);
5784 return 0;
5785 }
5786
5787 free_vfi_bmask:
5788 kfree(phba->sli4_hba.vfi_bmask);
5789 free_xri_ids:
5790 kfree(phba->sli4_hba.xri_ids);
5791 free_xri_bmask:
5792 kfree(phba->sli4_hba.xri_bmask);
5793 free_vpi_ids:
5794 kfree(phba->vpi_ids);
5795 free_vpi_bmask:
5796 kfree(phba->vpi_bmask);
5797 free_rpi_ids:
5798 kfree(phba->sli4_hba.rpi_ids);
5799 free_rpi_bmask:
5800 kfree(phba->sli4_hba.rpi_bmask);
5801 err_exit:
5802 return rc;
5803}
5804
5805/**
5806 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5807 * @phba: Pointer to HBA context object.
5808 *
5809 * This function allocates the number of elements for the specified
5810 * resource type.
5811 **/
5812int
5813lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5814{
5815 if (phba->sli4_hba.extents_in_use) {
5816 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5817 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5819 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5820 } else {
5821 kfree(phba->vpi_bmask);
16a3a208 5822 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
5823 kfree(phba->vpi_ids);
5824 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5825 kfree(phba->sli4_hba.xri_bmask);
5826 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
5827 kfree(phba->sli4_hba.vfi_bmask);
5828 kfree(phba->sli4_hba.vfi_ids);
5829 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5830 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5831 }
5832
5833 return 0;
5834}
5835
b76f2dc9
JS
5836/**
5837 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5838 * @phba: Pointer to HBA context object.
5839 * @type: The resource extent type.
5840 * @extnt_count: buffer to hold port extent count response
5841 * @extnt_size: buffer to hold port extent size response.
5842 *
5843 * This function calls the port to read the host allocated extents
5844 * for a particular type.
5845 **/
5846int
5847lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5848 uint16_t *extnt_cnt, uint16_t *extnt_size)
5849{
5850 bool emb;
5851 int rc = 0;
5852 uint16_t curr_blks = 0;
5853 uint32_t req_len, emb_len;
5854 uint32_t alloc_len, mbox_tmo;
5855 struct list_head *blk_list_head;
5856 struct lpfc_rsrc_blks *rsrc_blk;
5857 LPFC_MBOXQ_t *mbox;
5858 void *virtaddr = NULL;
5859 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5860 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5861 union lpfc_sli4_cfg_shdr *shdr;
5862
5863 switch (type) {
5864 case LPFC_RSC_TYPE_FCOE_VPI:
5865 blk_list_head = &phba->lpfc_vpi_blk_list;
5866 break;
5867 case LPFC_RSC_TYPE_FCOE_XRI:
5868 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5869 break;
5870 case LPFC_RSC_TYPE_FCOE_VFI:
5871 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5872 break;
5873 case LPFC_RSC_TYPE_FCOE_RPI:
5874 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5875 break;
5876 default:
5877 return -EIO;
5878 }
5879
5880 /* Count the number of extents currently allocatd for this type. */
5881 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5882 if (curr_blks == 0) {
5883 /*
5884 * The GET_ALLOCATED mailbox does not return the size,
5885 * just the count. The size should be just the size
5886 * stored in the current allocated block and all sizes
5887 * for an extent type are the same so set the return
5888 * value now.
5889 */
5890 *extnt_size = rsrc_blk->rsrc_size;
5891 }
5892 curr_blks++;
5893 }
5894
5895 /* Calculate the total requested length of the dma memory. */
5896 req_len = curr_blks * sizeof(uint16_t);
5897
5898 /*
5899 * Calculate the size of an embedded mailbox. The uint32_t
5900 * accounts for extents-specific word.
5901 */
5902 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5903 sizeof(uint32_t);
5904
5905 /*
5906 * Presume the allocation and response will fit into an embedded
5907 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5908 */
5909 emb = LPFC_SLI4_MBX_EMBED;
5910 req_len = emb_len;
5911 if (req_len > emb_len) {
5912 req_len = curr_blks * sizeof(uint16_t) +
5913 sizeof(union lpfc_sli4_cfg_shdr) +
5914 sizeof(uint32_t);
5915 emb = LPFC_SLI4_MBX_NEMBED;
5916 }
5917
5918 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5919 if (!mbox)
5920 return -ENOMEM;
5921 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5922
5923 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5924 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5925 req_len, emb);
5926 if (alloc_len < req_len) {
5927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5928 "2983 Allocated DMA memory size (x%x) is "
5929 "less than the requested DMA memory "
5930 "size (x%x)\n", alloc_len, req_len);
5931 rc = -ENOMEM;
5932 goto err_exit;
5933 }
5934 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5935 if (unlikely(rc)) {
5936 rc = -EIO;
5937 goto err_exit;
5938 }
5939
5940 if (!phba->sli4_hba.intr_enable)
5941 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5942 else {
a183a15f 5943 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
5944 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5945 }
5946
5947 if (unlikely(rc)) {
5948 rc = -EIO;
5949 goto err_exit;
5950 }
5951
5952 /*
5953 * Figure out where the response is located. Then get local pointers
5954 * to the response data. The port does not guarantee to respond to
5955 * all extents counts request so update the local variable with the
5956 * allocated count from the port.
5957 */
5958 if (emb == LPFC_SLI4_MBX_EMBED) {
5959 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5960 shdr = &rsrc_ext->header.cfg_shdr;
5961 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5962 } else {
5963 virtaddr = mbox->sge_array->addr[0];
5964 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5965 shdr = &n_rsrc->cfg_shdr;
5966 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5967 }
5968
5969 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5970 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5971 "2984 Failed to read allocated resources "
5972 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5973 type,
5974 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5975 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5976 rc = -EIO;
5977 goto err_exit;
5978 }
5979 err_exit:
5980 lpfc_sli4_mbox_cmd_free(phba, mbox);
5981 return rc;
5982}
5983
8a9d2e80
JS
5984/**
5985 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5986 * @phba: pointer to lpfc hba data structure.
5987 *
5988 * This routine walks the list of els buffers that have been allocated and
5989 * repost them to the port by using SGL block post. This is needed after a
5990 * pci_function_reset/warm_start or start. It attempts to construct blocks
5991 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5992 * SGL block post mailbox commands to post them to the port. For single els
5993 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5994 * mailbox command for posting.
5995 *
5996 * Returns: 0 = success, non-zero failure.
5997 **/
5998static int
5999lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6000{
6001 struct lpfc_sglq *sglq_entry = NULL;
6002 struct lpfc_sglq *sglq_entry_next = NULL;
6003 struct lpfc_sglq *sglq_entry_first = NULL;
6004 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
6005 int last_xritag = NO_XRI;
6006 LIST_HEAD(prep_sgl_list);
6007 LIST_HEAD(blck_sgl_list);
6008 LIST_HEAD(allc_sgl_list);
6009 LIST_HEAD(post_sgl_list);
6010 LIST_HEAD(free_sgl_list);
6011
38c20673 6012 spin_lock_irq(&phba->hbalock);
8a9d2e80 6013 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
38c20673 6014 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
6015
6016 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6017 &allc_sgl_list, list) {
6018 list_del_init(&sglq_entry->list);
6019 block_cnt++;
6020 if ((last_xritag != NO_XRI) &&
6021 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6022 /* a hole in xri block, form a sgl posting block */
6023 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6024 post_cnt = block_cnt - 1;
6025 /* prepare list for next posting block */
6026 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6027 block_cnt = 1;
6028 } else {
6029 /* prepare list for next posting block */
6030 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6031 /* enough sgls for non-embed sgl mbox command */
6032 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6033 list_splice_init(&prep_sgl_list,
6034 &blck_sgl_list);
6035 post_cnt = block_cnt;
6036 block_cnt = 0;
6037 }
6038 }
6039 num_posted++;
6040
6041 /* keep track of last sgl's xritag */
6042 last_xritag = sglq_entry->sli4_xritag;
6043
6044 /* end of repost sgl list condition for els buffers */
6045 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6046 if (post_cnt == 0) {
6047 list_splice_init(&prep_sgl_list,
6048 &blck_sgl_list);
6049 post_cnt = block_cnt;
6050 } else if (block_cnt == 1) {
6051 status = lpfc_sli4_post_sgl(phba,
6052 sglq_entry->phys, 0,
6053 sglq_entry->sli4_xritag);
6054 if (!status) {
6055 /* successful, put sgl to posted list */
6056 list_add_tail(&sglq_entry->list,
6057 &post_sgl_list);
6058 } else {
6059 /* Failure, put sgl to free list */
6060 lpfc_printf_log(phba, KERN_WARNING,
6061 LOG_SLI,
6062 "3159 Failed to post els "
6063 "sgl, xritag:x%x\n",
6064 sglq_entry->sli4_xritag);
6065 list_add_tail(&sglq_entry->list,
6066 &free_sgl_list);
6067 spin_lock_irq(&phba->hbalock);
6068 phba->sli4_hba.els_xri_cnt--;
6069 spin_unlock_irq(&phba->hbalock);
6070 }
6071 }
6072 }
6073
6074 /* continue until a nembed page worth of sgls */
6075 if (post_cnt == 0)
6076 continue;
6077
6078 /* post the els buffer list sgls as a block */
6079 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6080 post_cnt);
6081
6082 if (!status) {
6083 /* success, put sgl list to posted sgl list */
6084 list_splice_init(&blck_sgl_list, &post_sgl_list);
6085 } else {
6086 /* Failure, put sgl list to free sgl list */
6087 sglq_entry_first = list_first_entry(&blck_sgl_list,
6088 struct lpfc_sglq,
6089 list);
6090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6091 "3160 Failed to post els sgl-list, "
6092 "xritag:x%x-x%x\n",
6093 sglq_entry_first->sli4_xritag,
6094 (sglq_entry_first->sli4_xritag +
6095 post_cnt - 1));
6096 list_splice_init(&blck_sgl_list, &free_sgl_list);
6097 spin_lock_irq(&phba->hbalock);
6098 phba->sli4_hba.els_xri_cnt -= post_cnt;
6099 spin_unlock_irq(&phba->hbalock);
6100 }
6101
6102 /* don't reset xirtag due to hole in xri block */
6103 if (block_cnt == 0)
6104 last_xritag = NO_XRI;
6105
6106 /* reset els sgl post count for next round of posting */
6107 post_cnt = 0;
6108 }
6109
6110 /* free the els sgls failed to post */
6111 lpfc_free_sgl_list(phba, &free_sgl_list);
6112
6113 /* push els sgls posted to the availble list */
6114 if (!list_empty(&post_sgl_list)) {
38c20673 6115 spin_lock_irq(&phba->hbalock);
8a9d2e80
JS
6116 list_splice_init(&post_sgl_list,
6117 &phba->sli4_hba.lpfc_sgl_list);
38c20673 6118 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
6119 } else {
6120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6121 "3161 Failure to post els sgl to port.\n");
6122 return -EIO;
6123 }
6124 return 0;
6125}
6126
da0436e9
JS
6127/**
6128 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6129 * @phba: Pointer to HBA context object.
6130 *
6131 * This function is the main SLI4 device intialization PCI function. This
6132 * function is called by the HBA intialization code, HBA reset code and
6133 * HBA error attention handler code. Caller is not required to hold any
6134 * locks.
6135 **/
6136int
6137lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6138{
6139 int rc;
6140 LPFC_MBOXQ_t *mboxq;
6141 struct lpfc_mqe *mqe;
6142 uint8_t *vpd;
6143 uint32_t vpd_size;
6144 uint32_t ftr_rsp = 0;
6145 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6146 struct lpfc_vport *vport = phba->pport;
6147 struct lpfc_dmabuf *mp;
6148
6149 /* Perform a PCI function reset to start from clean */
6150 rc = lpfc_pci_function_reset(phba);
6151 if (unlikely(rc))
6152 return -ENODEV;
6153
6154 /* Check the HBA Host Status Register for readyness */
6155 rc = lpfc_sli4_post_status_check(phba);
6156 if (unlikely(rc))
6157 return -ENODEV;
6158 else {
6159 spin_lock_irq(&phba->hbalock);
6160 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6161 spin_unlock_irq(&phba->hbalock);
6162 }
6163
6164 /*
6165 * Allocate a single mailbox container for initializing the
6166 * port.
6167 */
6168 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6169 if (!mboxq)
6170 return -ENOMEM;
6171
da0436e9 6172 /* Issue READ_REV to collect vpd and FW information. */
49198b37 6173 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
6174 vpd = kzalloc(vpd_size, GFP_KERNEL);
6175 if (!vpd) {
6176 rc = -ENOMEM;
6177 goto out_free_mbox;
6178 }
6179
6180 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
6181 if (unlikely(rc)) {
6182 kfree(vpd);
6183 goto out_free_mbox;
6184 }
da0436e9 6185 mqe = &mboxq->u.mqe;
f1126688
JS
6186 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6187 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
76a95d75
JS
6188 phba->hba_flag |= HBA_FCOE_MODE;
6189 else
6190 phba->hba_flag &= ~HBA_FCOE_MODE;
45ed1190
JS
6191
6192 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6193 LPFC_DCBX_CEE_MODE)
6194 phba->hba_flag |= HBA_FIP_SUPPORT;
6195 else
6196 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6197
4f2e66c6
JS
6198 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6199
c31098ce 6200 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
6201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6202 "0376 READ_REV Error. SLI Level %d "
6203 "FCoE enabled %d\n",
76a95d75 6204 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 6205 rc = -EIO;
76a95d75
JS
6206 kfree(vpd);
6207 goto out_free_mbox;
da0436e9 6208 }
cd1c8301 6209
ff78d8f9
JS
6210 /*
6211 * Continue initialization with default values even if driver failed
6212 * to read FCoE param config regions, only read parameters if the
6213 * board is FCoE
6214 */
6215 if (phba->hba_flag & HBA_FCOE_MODE &&
6216 lpfc_sli4_read_fcoe_params(phba))
6217 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6218 "2570 Failed to read FCoE parameters\n");
6219
cd1c8301
JS
6220 /*
6221 * Retrieve sli4 device physical port name, failure of doing it
6222 * is considered as non-fatal.
6223 */
6224 rc = lpfc_sli4_retrieve_pport_name(phba);
6225 if (!rc)
6226 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6227 "3080 Successful retrieving SLI4 device "
6228 "physical port name: %s.\n", phba->Port);
6229
da0436e9
JS
6230 /*
6231 * Evaluate the read rev and vpd data. Populate the driver
6232 * state with the results. If this routine fails, the failure
6233 * is not fatal as the driver will use generic values.
6234 */
6235 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6236 if (unlikely(!rc)) {
6237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6238 "0377 Error %d parsing vpd. "
6239 "Using defaults.\n", rc);
6240 rc = 0;
6241 }
76a95d75 6242 kfree(vpd);
da0436e9 6243
f1126688
JS
6244 /* Save information as VPD data */
6245 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6246 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6247 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6248 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6249 &mqe->un.read_rev);
6250 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6251 &mqe->un.read_rev);
6252 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6253 &mqe->un.read_rev);
6254 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6255 &mqe->un.read_rev);
6256 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6257 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6258 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6259 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6260 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6261 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6262 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6263 "(%d):0380 READ_REV Status x%x "
6264 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6265 mboxq->vport ? mboxq->vport->vpi : 0,
6266 bf_get(lpfc_mqe_status, mqe),
6267 phba->vpd.rev.opFwName,
6268 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6269 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9
JS
6270
6271 /*
6272 * Discover the port's supported feature set and match it against the
6273 * hosts requests.
6274 */
6275 lpfc_request_features(phba, mboxq);
6276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6277 if (unlikely(rc)) {
6278 rc = -EIO;
76a95d75 6279 goto out_free_mbox;
da0436e9
JS
6280 }
6281
6282 /*
6283 * The port must support FCP initiator mode as this is the
6284 * only mode running in the host.
6285 */
6286 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6287 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6288 "0378 No support for fcpi mode.\n");
6289 ftr_rsp++;
6290 }
fedd3b7b
JS
6291 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6292 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6293 else
6294 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
da0436e9
JS
6295 /*
6296 * If the port cannot support the host's requested features
6297 * then turn off the global config parameters to disable the
6298 * feature in the driver. This is not a fatal error.
6299 */
bf08611b
JS
6300 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6301 if (phba->cfg_enable_bg) {
6302 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6303 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6304 else
6305 ftr_rsp++;
6306 }
da0436e9
JS
6307
6308 if (phba->max_vpi && phba->cfg_enable_npiv &&
6309 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6310 ftr_rsp++;
6311
6312 if (ftr_rsp) {
6313 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6314 "0379 Feature Mismatch Data: x%08x %08x "
6315 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6316 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6317 phba->cfg_enable_npiv, phba->max_vpi);
6318 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6319 phba->cfg_enable_bg = 0;
6320 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6321 phba->cfg_enable_npiv = 0;
6322 }
6323
6324 /* These SLI3 features are assumed in SLI4 */
6325 spin_lock_irq(&phba->hbalock);
6326 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6327 spin_unlock_irq(&phba->hbalock);
6328
6d368e53
JS
6329 /*
6330 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6331 * calls depends on these resources to complete port setup.
6332 */
6333 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6334 if (rc) {
6335 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6336 "2920 Failed to alloc Resource IDs "
6337 "rc = x%x\n", rc);
6338 goto out_free_mbox;
6339 }
6340
da0436e9 6341 /* Read the port's service parameters. */
9f1177a3
JS
6342 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6343 if (rc) {
6344 phba->link_state = LPFC_HBA_ERROR;
6345 rc = -ENOMEM;
76a95d75 6346 goto out_free_mbox;
9f1177a3
JS
6347 }
6348
da0436e9
JS
6349 mboxq->vport = vport;
6350 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6351 mp = (struct lpfc_dmabuf *) mboxq->context1;
6352 if (rc == MBX_SUCCESS) {
6353 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6354 rc = 0;
6355 }
6356
6357 /*
6358 * This memory was allocated by the lpfc_read_sparam routine. Release
6359 * it to the mbuf pool.
6360 */
6361 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6362 kfree(mp);
6363 mboxq->context1 = NULL;
6364 if (unlikely(rc)) {
6365 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6366 "0382 READ_SPARAM command failed "
6367 "status %d, mbxStatus x%x\n",
6368 rc, bf_get(lpfc_mqe_status, mqe));
6369 phba->link_state = LPFC_HBA_ERROR;
6370 rc = -EIO;
76a95d75 6371 goto out_free_mbox;
da0436e9
JS
6372 }
6373
0558056c 6374 lpfc_update_vport_wwn(vport);
da0436e9
JS
6375
6376 /* Update the fc_host data structures with new wwn. */
6377 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6378 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6379
8a9d2e80
JS
6380 /* update host els and scsi xri-sgl sizes and mappings */
6381 rc = lpfc_sli4_xri_sgl_update(phba);
6382 if (unlikely(rc)) {
6383 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6384 "1400 Failed to update xri-sgl size and "
6385 "mapping: %d\n", rc);
6386 goto out_free_mbox;
da0436e9
JS
6387 }
6388
8a9d2e80
JS
6389 /* register the els sgl pool to the port */
6390 rc = lpfc_sli4_repost_els_sgl_list(phba);
6391 if (unlikely(rc)) {
6392 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6393 "0582 Error %d during els sgl post "
6394 "operation\n", rc);
6395 rc = -ENODEV;
6396 goto out_free_mbox;
6397 }
6398
6399 /* register the allocated scsi sgl pool to the port */
da0436e9
JS
6400 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6401 if (unlikely(rc)) {
6d368e53 6402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
6403 "0383 Error %d during scsi sgl post "
6404 "operation\n", rc);
da0436e9
JS
6405 /* Some Scsi buffers were moved to the abort scsi list */
6406 /* A pci function reset will repost them */
6407 rc = -ENODEV;
76a95d75 6408 goto out_free_mbox;
da0436e9
JS
6409 }
6410
6411 /* Post the rpi header region to the device. */
6412 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6413 if (unlikely(rc)) {
6414 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6415 "0393 Error %d during rpi post operation\n",
6416 rc);
6417 rc = -ENODEV;
76a95d75 6418 goto out_free_mbox;
da0436e9 6419 }
97f2ecf1 6420 lpfc_sli4_node_prep(phba);
da0436e9 6421
5350d872
JS
6422 /* Create all the SLI4 queues */
6423 rc = lpfc_sli4_queue_create(phba);
6424 if (rc) {
6425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6426 "3089 Failed to allocate queues\n");
6427 rc = -ENODEV;
6428 goto out_stop_timers;
6429 }
da0436e9
JS
6430 /* Set up all the queues to the device */
6431 rc = lpfc_sli4_queue_setup(phba);
6432 if (unlikely(rc)) {
6433 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6434 "0381 Error %d during queue setup.\n ", rc);
5350d872 6435 goto out_destroy_queue;
da0436e9
JS
6436 }
6437
6438 /* Arm the CQs and then EQs on device */
6439 lpfc_sli4_arm_cqeq_intr(phba);
6440
6441 /* Indicate device interrupt mode */
6442 phba->sli4_hba.intr_enable = 1;
6443
6444 /* Allow asynchronous mailbox command to go through */
6445 spin_lock_irq(&phba->hbalock);
6446 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6447 spin_unlock_irq(&phba->hbalock);
6448
6449 /* Post receive buffers to the device */
6450 lpfc_sli4_rb_setup(phba);
6451
fc2b989b
JS
6452 /* Reset HBA FCF states after HBA reset */
6453 phba->fcf.fcf_flag = 0;
6454 phba->fcf.current_rec.flag = 0;
6455
da0436e9 6456 /* Start the ELS watchdog timer */
8fa38513 6457 mod_timer(&vport->els_tmofunc,
256ec0d0 6458 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
6459
6460 /* Start heart beat timer */
6461 mod_timer(&phba->hb_tmofunc,
256ec0d0 6462 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
6463 phba->hb_outstanding = 0;
6464 phba->last_completion_time = jiffies;
6465
6466 /* Start error attention (ERATT) polling timer */
256ec0d0
JS
6467 mod_timer(&phba->eratt_poll,
6468 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
da0436e9 6469
75baf696
JS
6470 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6471 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6472 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6473 if (!rc) {
6474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6475 "2829 This device supports "
6476 "Advanced Error Reporting (AER)\n");
6477 spin_lock_irq(&phba->hbalock);
6478 phba->hba_flag |= HBA_AER_ENABLED;
6479 spin_unlock_irq(&phba->hbalock);
6480 } else {
6481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6482 "2830 This device does not support "
6483 "Advanced Error Reporting (AER)\n");
6484 phba->cfg_aer_support = 0;
6485 }
0a96e975 6486 rc = 0;
75baf696
JS
6487 }
6488
76a95d75
JS
6489 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6490 /*
6491 * The FC Port needs to register FCFI (index 0)
6492 */
6493 lpfc_reg_fcfi(phba, mboxq);
6494 mboxq->vport = phba->pport;
6495 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9589b062 6496 if (rc != MBX_SUCCESS)
76a95d75 6497 goto out_unset_queue;
9589b062
JS
6498 rc = 0;
6499 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6500 &mboxq->u.mqe.un.reg_fcfi);
026abb87
JS
6501
6502 /* Check if the port is configured to be disabled */
6503 lpfc_sli_read_link_ste(phba);
76a95d75 6504 }
026abb87 6505
da0436e9
JS
6506 /*
6507 * The port is ready, set the host's link state to LINK_DOWN
6508 * in preparation for link interrupts.
6509 */
da0436e9
JS
6510 spin_lock_irq(&phba->hbalock);
6511 phba->link_state = LPFC_LINK_DOWN;
6512 spin_unlock_irq(&phba->hbalock);
026abb87
JS
6513 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6514 (phba->hba_flag & LINK_DISABLED)) {
6515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6516 "3103 Adapter Link is disabled.\n");
6517 lpfc_down_link(phba, mboxq);
6518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6519 if (rc != MBX_SUCCESS) {
6520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6521 "3104 Adapter failed to issue "
6522 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6523 goto out_unset_queue;
6524 }
6525 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
6526 /* don't perform init_link on SLI4 FC port loopback test */
6527 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6528 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6529 if (rc)
6530 goto out_unset_queue;
6531 }
5350d872
JS
6532 }
6533 mempool_free(mboxq, phba->mbox_mem_pool);
6534 return rc;
76a95d75 6535out_unset_queue:
da0436e9 6536 /* Unset all the queues set up in this routine when error out */
5350d872
JS
6537 lpfc_sli4_queue_unset(phba);
6538out_destroy_queue:
6539 lpfc_sli4_queue_destroy(phba);
da0436e9 6540out_stop_timers:
5350d872 6541 lpfc_stop_hba_timers(phba);
da0436e9
JS
6542out_free_mbox:
6543 mempool_free(mboxq, phba->mbox_mem_pool);
6544 return rc;
6545}
6546
6547/**
6548 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6549 * @ptr: context object - pointer to hba structure.
6550 *
6551 * This is the callback function for mailbox timer. The mailbox
6552 * timer is armed when a new mailbox command is issued and the timer
6553 * is deleted when the mailbox complete. The function is called by
6554 * the kernel timer code when a mailbox does not complete within
6555 * expected time. This function wakes up the worker thread to
6556 * process the mailbox timeout and returns. All the processing is
6557 * done by the worker thread function lpfc_mbox_timeout_handler.
6558 **/
6559void
6560lpfc_mbox_timeout(unsigned long ptr)
6561{
6562 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6563 unsigned long iflag;
6564 uint32_t tmo_posted;
6565
6566 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6567 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6568 if (!tmo_posted)
6569 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6570 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6571
6572 if (!tmo_posted)
6573 lpfc_worker_wake_up(phba);
6574 return;
6575}
6576
6577
6578/**
6579 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6580 * @phba: Pointer to HBA context object.
6581 *
6582 * This function is called from worker thread when a mailbox command times out.
6583 * The caller is not required to hold any locks. This function will reset the
6584 * HBA and recover all the pending commands.
6585 **/
6586void
6587lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6588{
6589 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
04c68496 6590 MAILBOX_t *mb = &pmbox->u.mb;
da0436e9
JS
6591 struct lpfc_sli *psli = &phba->sli;
6592 struct lpfc_sli_ring *pring;
6593
6594 /* Check the pmbox pointer first. There is a race condition
6595 * between the mbox timeout handler getting executed in the
6596 * worklist and the mailbox actually completing. When this
6597 * race condition occurs, the mbox_active will be NULL.
6598 */
6599 spin_lock_irq(&phba->hbalock);
6600 if (pmbox == NULL) {
6601 lpfc_printf_log(phba, KERN_WARNING,
6602 LOG_MBOX | LOG_SLI,
6603 "0353 Active Mailbox cleared - mailbox timeout "
6604 "exiting\n");
6605 spin_unlock_irq(&phba->hbalock);
6606 return;
6607 }
6608
6609 /* Mbox cmd <mbxCommand> timeout */
6610 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6611 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6612 mb->mbxCommand,
6613 phba->pport->port_state,
6614 phba->sli.sli_flag,
6615 phba->sli.mbox_active);
6616 spin_unlock_irq(&phba->hbalock);
6617
6618 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6619 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 6620 * it to fail all outstanding SCSI IO.
da0436e9
JS
6621 */
6622 spin_lock_irq(&phba->pport->work_port_lock);
6623 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6624 spin_unlock_irq(&phba->pport->work_port_lock);
6625 spin_lock_irq(&phba->hbalock);
6626 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 6627 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
6628 spin_unlock_irq(&phba->hbalock);
6629
6630 pring = &psli->ring[psli->fcp_ring];
6631 lpfc_sli_abort_iocb_ring(phba, pring);
6632
6633 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6634 "0345 Resetting board due to mailbox timeout\n");
6635
6636 /* Reset the HBA device */
6637 lpfc_reset_hba(phba);
6638}
6639
6640/**
6641 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6642 * @phba: Pointer to HBA context object.
6643 * @pmbox: Pointer to mailbox object.
6644 * @flag: Flag indicating how the mailbox need to be processed.
6645 *
6646 * This function is called by discovery code and HBA management code
6647 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6648 * function gets the hbalock to protect the data structures.
6649 * The mailbox command can be submitted in polling mode, in which case
6650 * this function will wait in a polling loop for the completion of the
6651 * mailbox.
6652 * If the mailbox is submitted in no_wait mode (not polling) the
6653 * function will submit the command and returns immediately without waiting
6654 * for the mailbox completion. The no_wait is supported only when HBA
6655 * is in SLI2/SLI3 mode - interrupts are enabled.
6656 * The SLI interface allows only one mailbox pending at a time. If the
6657 * mailbox is issued in polling mode and there is already a mailbox
6658 * pending, then the function will return an error. If the mailbox is issued
6659 * in NO_WAIT mode and there is a mailbox pending already, the function
6660 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6661 * The sli layer owns the mailbox object until the completion of mailbox
6662 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6663 * return codes the caller owns the mailbox command after the return of
6664 * the function.
e59058c4 6665 **/
3772a991
JS
6666static int
6667lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6668 uint32_t flag)
dea3101e 6669{
bf07bdea 6670 MAILBOX_t *mbx;
2e0fef85 6671 struct lpfc_sli *psli = &phba->sli;
dea3101e 6672 uint32_t status, evtctr;
9940b97b 6673 uint32_t ha_copy, hc_copy;
dea3101e 6674 int i;
09372820 6675 unsigned long timeout;
dea3101e 6676 unsigned long drvr_flag = 0;
34b02dcd 6677 uint32_t word0, ldata;
dea3101e 6678 void __iomem *to_slim;
58da1ffb
JS
6679 int processing_queue = 0;
6680
6681 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6682 if (!pmbox) {
8568a4d2 6683 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 6684 /* processing mbox queue from intr_handler */
3772a991
JS
6685 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6686 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6687 return MBX_SUCCESS;
6688 }
58da1ffb 6689 processing_queue = 1;
58da1ffb
JS
6690 pmbox = lpfc_mbox_get(phba);
6691 if (!pmbox) {
6692 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6693 return MBX_SUCCESS;
6694 }
6695 }
dea3101e 6696
ed957684 6697 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 6698 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 6699 if(!pmbox->vport) {
58da1ffb 6700 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 6701 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 6702 LOG_MBOX | LOG_VPORT,
e8b62011 6703 "1806 Mbox x%x failed. No vport\n",
3772a991 6704 pmbox->u.mb.mbxCommand);
ed957684 6705 dump_stack();
58da1ffb 6706 goto out_not_finished;
ed957684
JS
6707 }
6708 }
6709
8d63f375 6710 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
6711 if (unlikely(pci_channel_offline(phba->pcidev))) {
6712 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6713 goto out_not_finished;
6714 }
8d63f375 6715
a257bf90
JS
6716 /* If HBA has a deferred error attention, fail the iocb. */
6717 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6718 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6719 goto out_not_finished;
6720 }
6721
dea3101e 6722 psli = &phba->sli;
92d7f7b0 6723
bf07bdea 6724 mbx = &pmbox->u.mb;
dea3101e 6725 status = MBX_SUCCESS;
6726
2e0fef85
JS
6727 if (phba->link_state == LPFC_HBA_ERROR) {
6728 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
6729
6730 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6731 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6732 "(%d):0311 Mailbox command x%x cannot "
6733 "issue Data: x%x x%x\n",
6734 pmbox->vport ? pmbox->vport->vpi : 0,
6735 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 6736 goto out_not_finished;
41415862
JW
6737 }
6738
bf07bdea 6739 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
6740 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6741 !(hc_copy & HC_MBINT_ENA)) {
6742 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6743 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
6744 "(%d):2528 Mailbox command x%x cannot "
6745 "issue Data: x%x x%x\n",
6746 pmbox->vport ? pmbox->vport->vpi : 0,
6747 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
6748 goto out_not_finished;
6749 }
9290831f
JS
6750 }
6751
dea3101e 6752 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6753 /* Polling for a mbox command when another one is already active
6754 * is not allowed in SLI. Also, the driver must have established
6755 * SLI2 mode to queue and process multiple mbox commands.
6756 */
6757
6758 if (flag & MBX_POLL) {
2e0fef85 6759 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6760
6761 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6762 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6763 "(%d):2529 Mailbox command x%x "
6764 "cannot issue Data: x%x x%x\n",
6765 pmbox->vport ? pmbox->vport->vpi : 0,
6766 pmbox->u.mb.mbxCommand,
6767 psli->sli_flag, flag);
58da1ffb 6768 goto out_not_finished;
dea3101e 6769 }
6770
3772a991 6771 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 6772 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6773 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6774 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6775 "(%d):2530 Mailbox command x%x "
6776 "cannot issue Data: x%x x%x\n",
6777 pmbox->vport ? pmbox->vport->vpi : 0,
6778 pmbox->u.mb.mbxCommand,
6779 psli->sli_flag, flag);
58da1ffb 6780 goto out_not_finished;
dea3101e 6781 }
6782
dea3101e 6783 /* Another mailbox command is still being processed, queue this
6784 * command to be processed later.
6785 */
6786 lpfc_mbox_put(phba, pmbox);
6787
6788 /* Mbox cmd issue - BUSY */
ed957684 6789 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6790 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 6791 "x%x x%x x%x x%x\n",
92d7f7b0 6792 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
bf07bdea 6793 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 6794 psli->sli_flag, flag);
dea3101e 6795
6796 psli->slistat.mbox_busy++;
2e0fef85 6797 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6798
858c9f6c
JS
6799 if (pmbox->vport) {
6800 lpfc_debugfs_disc_trc(pmbox->vport,
6801 LPFC_DISC_TRC_MBOX_VPORT,
6802 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6803 (uint32_t)mbx->mbxCommand,
6804 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6805 }
6806 else {
6807 lpfc_debugfs_disc_trc(phba->pport,
6808 LPFC_DISC_TRC_MBOX,
6809 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6810 (uint32_t)mbx->mbxCommand,
6811 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6812 }
6813
2e0fef85 6814 return MBX_BUSY;
dea3101e 6815 }
6816
dea3101e 6817 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6818
6819 /* If we are not polling, we MUST be in SLI2 mode */
6820 if (flag != MBX_POLL) {
3772a991 6821 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 6822 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 6823 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6824 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 6825 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
6826 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6827 "(%d):2531 Mailbox command x%x "
6828 "cannot issue Data: x%x x%x\n",
6829 pmbox->vport ? pmbox->vport->vpi : 0,
6830 pmbox->u.mb.mbxCommand,
6831 psli->sli_flag, flag);
58da1ffb 6832 goto out_not_finished;
dea3101e 6833 }
6834 /* timeout active mbox command */
256ec0d0
JS
6835 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6836 1000);
6837 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 6838 }
6839
6840 /* Mailbox cmd <cmd> issue */
ed957684 6841 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 6842 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 6843 "x%x\n",
e8b62011 6844 pmbox->vport ? pmbox->vport->vpi : 0,
bf07bdea 6845 mbx->mbxCommand, phba->pport->port_state,
92d7f7b0 6846 psli->sli_flag, flag);
dea3101e 6847
bf07bdea 6848 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
6849 if (pmbox->vport) {
6850 lpfc_debugfs_disc_trc(pmbox->vport,
6851 LPFC_DISC_TRC_MBOX_VPORT,
6852 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6853 (uint32_t)mbx->mbxCommand,
6854 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6855 }
6856 else {
6857 lpfc_debugfs_disc_trc(phba->pport,
6858 LPFC_DISC_TRC_MBOX,
6859 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
6860 (uint32_t)mbx->mbxCommand,
6861 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
6862 }
6863 }
6864
dea3101e 6865 psli->slistat.mbox_cmd++;
6866 evtctr = psli->slistat.mbox_event;
6867
6868 /* next set own bit for the adapter and copy over command word */
bf07bdea 6869 mbx->mbxOwner = OWN_CHIP;
dea3101e 6870
3772a991 6871 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
6872 /* Populate mbox extension offset word. */
6873 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 6874 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
6875 = (uint8_t *)phba->mbox_ext
6876 - (uint8_t *)phba->mbox;
6877 }
6878
6879 /* Copy the mailbox extension data */
6880 if (pmbox->in_ext_byte_len && pmbox->context2) {
6881 lpfc_sli_pcimem_bcopy(pmbox->context2,
6882 (uint8_t *)phba->mbox_ext,
6883 pmbox->in_ext_byte_len);
6884 }
6885 /* Copy command data to host SLIM area */
bf07bdea 6886 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6887 } else {
7a470277
JS
6888 /* Populate mbox extension offset word. */
6889 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 6890 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
6891 = MAILBOX_HBA_EXT_OFFSET;
6892
6893 /* Copy the mailbox extension data */
6894 if (pmbox->in_ext_byte_len && pmbox->context2) {
6895 lpfc_memcpy_to_slim(phba->MBslimaddr +
6896 MAILBOX_HBA_EXT_OFFSET,
6897 pmbox->context2, pmbox->in_ext_byte_len);
6898
6899 }
bf07bdea 6900 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6901 /* copy command data into host mbox for cmpl */
bf07bdea 6902 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 6903 }
6904
6905 /* First copy mbox command data to HBA SLIM, skip past first
6906 word */
6907 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 6908 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 6909 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6910
6911 /* Next copy over first word, with mbxOwner set */
bf07bdea 6912 ldata = *((uint32_t *)mbx);
dea3101e 6913 to_slim = phba->MBslimaddr;
6914 writel(ldata, to_slim);
6915 readl(to_slim); /* flush */
6916
bf07bdea 6917 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6918 /* switch over to host mailbox */
3772a991 6919 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 6920 }
6921 }
6922
6923 wmb();
dea3101e 6924
6925 switch (flag) {
6926 case MBX_NOWAIT:
09372820 6927 /* Set up reference to mailbox command */
dea3101e 6928 psli->mbox_active = pmbox;
09372820
JS
6929 /* Interrupt board to do it */
6930 writel(CA_MBATT, phba->CAregaddr);
6931 readl(phba->CAregaddr); /* flush */
6932 /* Don't wait for it to finish, just return */
dea3101e 6933 break;
6934
6935 case MBX_POLL:
09372820 6936 /* Set up null reference to mailbox command */
dea3101e 6937 psli->mbox_active = NULL;
09372820
JS
6938 /* Interrupt board to do it */
6939 writel(CA_MBATT, phba->CAregaddr);
6940 readl(phba->CAregaddr); /* flush */
6941
3772a991 6942 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6943 /* First read mbox status word */
34b02dcd 6944 word0 = *((uint32_t *)phba->mbox);
dea3101e 6945 word0 = le32_to_cpu(word0);
6946 } else {
6947 /* First read mbox status word */
9940b97b
JS
6948 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6949 spin_unlock_irqrestore(&phba->hbalock,
6950 drvr_flag);
6951 goto out_not_finished;
6952 }
dea3101e 6953 }
6954
6955 /* Read the HBA Host Attention Register */
9940b97b
JS
6956 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6957 spin_unlock_irqrestore(&phba->hbalock,
6958 drvr_flag);
6959 goto out_not_finished;
6960 }
a183a15f
JS
6961 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6962 1000) + jiffies;
09372820 6963 i = 0;
dea3101e 6964 /* Wait for command to complete */
41415862
JW
6965 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6966 (!(ha_copy & HA_MBATT) &&
2e0fef85 6967 (phba->link_state > LPFC_WARM_START))) {
09372820 6968 if (time_after(jiffies, timeout)) {
dea3101e 6969 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 6970 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 6971 drvr_flag);
58da1ffb 6972 goto out_not_finished;
dea3101e 6973 }
6974
6975 /* Check if we took a mbox interrupt while we were
6976 polling */
6977 if (((word0 & OWN_CHIP) != OWN_CHIP)
6978 && (evtctr != psli->slistat.mbox_event))
6979 break;
6980
09372820
JS
6981 if (i++ > 10) {
6982 spin_unlock_irqrestore(&phba->hbalock,
6983 drvr_flag);
6984 msleep(1);
6985 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6986 }
dea3101e 6987
3772a991 6988 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 6989 /* First copy command data */
34b02dcd 6990 word0 = *((uint32_t *)phba->mbox);
dea3101e 6991 word0 = le32_to_cpu(word0);
bf07bdea 6992 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 6993 MAILBOX_t *slimmb;
34b02dcd 6994 uint32_t slimword0;
dea3101e 6995 /* Check real SLIM for any errors */
6996 slimword0 = readl(phba->MBslimaddr);
6997 slimmb = (MAILBOX_t *) & slimword0;
6998 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6999 && slimmb->mbxStatus) {
7000 psli->sli_flag &=
3772a991 7001 ~LPFC_SLI_ACTIVE;
dea3101e 7002 word0 = slimword0;
7003 }
7004 }
7005 } else {
7006 /* First copy command data */
7007 word0 = readl(phba->MBslimaddr);
7008 }
7009 /* Read the HBA Host Attention Register */
9940b97b
JS
7010 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7011 spin_unlock_irqrestore(&phba->hbalock,
7012 drvr_flag);
7013 goto out_not_finished;
7014 }
dea3101e 7015 }
7016
3772a991 7017 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 7018 /* copy results back to user */
bf07bdea 7019 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7a470277
JS
7020 /* Copy the mailbox extension data */
7021 if (pmbox->out_ext_byte_len && pmbox->context2) {
7022 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7023 pmbox->context2,
7024 pmbox->out_ext_byte_len);
7025 }
dea3101e 7026 } else {
7027 /* First copy command data */
bf07bdea 7028 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
dea3101e 7029 MAILBOX_CMD_SIZE);
7a470277
JS
7030 /* Copy the mailbox extension data */
7031 if (pmbox->out_ext_byte_len && pmbox->context2) {
7032 lpfc_memcpy_from_slim(pmbox->context2,
7033 phba->MBslimaddr +
7034 MAILBOX_HBA_EXT_OFFSET,
7035 pmbox->out_ext_byte_len);
dea3101e 7036 }
7037 }
7038
7039 writel(HA_MBATT, phba->HAregaddr);
7040 readl(phba->HAregaddr); /* flush */
7041
7042 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 7043 status = mbx->mbxStatus;
dea3101e 7044 }
7045
2e0fef85
JS
7046 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7047 return status;
58da1ffb
JS
7048
7049out_not_finished:
7050 if (processing_queue) {
da0436e9 7051 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
7052 lpfc_mbox_cmpl_put(phba, pmbox);
7053 }
7054 return MBX_NOT_FINISHED;
dea3101e 7055}
7056
f1126688
JS
7057/**
7058 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7059 * @phba: Pointer to HBA context object.
7060 *
7061 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7062 * the driver internal pending mailbox queue. It will then try to wait out the
7063 * possible outstanding mailbox command before return.
7064 *
7065 * Returns:
7066 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7067 * the outstanding mailbox command timed out.
7068 **/
7069static int
7070lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7071{
7072 struct lpfc_sli *psli = &phba->sli;
f1126688 7073 int rc = 0;
a183a15f 7074 unsigned long timeout = 0;
f1126688
JS
7075
7076 /* Mark the asynchronous mailbox command posting as blocked */
7077 spin_lock_irq(&phba->hbalock);
7078 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
7079 /* Determine how long we might wait for the active mailbox
7080 * command to be gracefully completed by firmware.
7081 */
a183a15f
JS
7082 if (phba->sli.mbox_active)
7083 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7084 phba->sli.mbox_active) *
7085 1000) + jiffies;
7086 spin_unlock_irq(&phba->hbalock);
7087
f1126688
JS
7088 /* Wait for the outstnading mailbox command to complete */
7089 while (phba->sli.mbox_active) {
7090 /* Check active mailbox complete status every 2ms */
7091 msleep(2);
7092 if (time_after(jiffies, timeout)) {
7093 /* Timeout, marked the outstanding cmd not complete */
7094 rc = 1;
7095 break;
7096 }
7097 }
7098
7099 /* Can not cleanly block async mailbox command, fails it */
7100 if (rc) {
7101 spin_lock_irq(&phba->hbalock);
7102 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7103 spin_unlock_irq(&phba->hbalock);
7104 }
7105 return rc;
7106}
7107
7108/**
7109 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7110 * @phba: Pointer to HBA context object.
7111 *
7112 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7113 * commands from the driver internal pending mailbox queue. It makes sure
7114 * that there is no outstanding mailbox command before resuming posting
7115 * asynchronous mailbox commands. If, for any reason, there is outstanding
7116 * mailbox command, it will try to wait it out before resuming asynchronous
7117 * mailbox command posting.
7118 **/
7119static void
7120lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7121{
7122 struct lpfc_sli *psli = &phba->sli;
7123
7124 spin_lock_irq(&phba->hbalock);
7125 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7126 /* Asynchronous mailbox posting is not blocked, do nothing */
7127 spin_unlock_irq(&phba->hbalock);
7128 return;
7129 }
7130
7131 /* Outstanding synchronous mailbox command is guaranteed to be done,
7132 * successful or timeout, after timing-out the outstanding mailbox
7133 * command shall always be removed, so just unblock posting async
7134 * mailbox command and resume
7135 */
7136 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7137 spin_unlock_irq(&phba->hbalock);
7138
7139 /* wake up worker thread to post asynchronlous mailbox command */
7140 lpfc_worker_wake_up(phba);
7141}
7142
2d843edc
JS
7143/**
7144 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7145 * @phba: Pointer to HBA context object.
7146 * @mboxq: Pointer to mailbox object.
7147 *
7148 * The function waits for the bootstrap mailbox register ready bit from
7149 * port for twice the regular mailbox command timeout value.
7150 *
7151 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7152 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7153 **/
7154static int
7155lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7156{
7157 uint32_t db_ready;
7158 unsigned long timeout;
7159 struct lpfc_register bmbx_reg;
7160
7161 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7162 * 1000) + jiffies;
7163
7164 do {
7165 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7166 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7167 if (!db_ready)
7168 msleep(2);
7169
7170 if (time_after(jiffies, timeout))
7171 return MBXERR_ERROR;
7172 } while (!db_ready);
7173
7174 return 0;
7175}
7176
da0436e9
JS
7177/**
7178 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7179 * @phba: Pointer to HBA context object.
7180 * @mboxq: Pointer to mailbox object.
7181 *
7182 * The function posts a mailbox to the port. The mailbox is expected
7183 * to be comletely filled in and ready for the port to operate on it.
7184 * This routine executes a synchronous completion operation on the
7185 * mailbox by polling for its completion.
7186 *
7187 * The caller must not be holding any locks when calling this routine.
7188 *
7189 * Returns:
7190 * MBX_SUCCESS - mailbox posted successfully
7191 * Any of the MBX error values.
7192 **/
7193static int
7194lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7195{
7196 int rc = MBX_SUCCESS;
7197 unsigned long iflag;
da0436e9
JS
7198 uint32_t mcqe_status;
7199 uint32_t mbx_cmnd;
da0436e9
JS
7200 struct lpfc_sli *psli = &phba->sli;
7201 struct lpfc_mqe *mb = &mboxq->u.mqe;
7202 struct lpfc_bmbx_create *mbox_rgn;
7203 struct dma_address *dma_address;
da0436e9
JS
7204
7205 /*
7206 * Only one mailbox can be active to the bootstrap mailbox region
7207 * at a time and there is no queueing provided.
7208 */
7209 spin_lock_irqsave(&phba->hbalock, iflag);
7210 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7211 spin_unlock_irqrestore(&phba->hbalock, iflag);
7212 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7213 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7214 "cannot issue Data: x%x x%x\n",
7215 mboxq->vport ? mboxq->vport->vpi : 0,
7216 mboxq->u.mb.mbxCommand,
a183a15f
JS
7217 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7218 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7219 psli->sli_flag, MBX_POLL);
7220 return MBXERR_ERROR;
7221 }
7222 /* The server grabs the token and owns it until release */
7223 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7224 phba->sli.mbox_active = mboxq;
7225 spin_unlock_irqrestore(&phba->hbalock, iflag);
7226
2d843edc
JS
7227 /* wait for bootstrap mbox register for readyness */
7228 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7229 if (rc)
7230 goto exit;
7231
da0436e9
JS
7232 /*
7233 * Initialize the bootstrap memory region to avoid stale data areas
7234 * in the mailbox post. Then copy the caller's mailbox contents to
7235 * the bmbx mailbox region.
7236 */
7237 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7238 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7239 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7240 sizeof(struct lpfc_mqe));
7241
7242 /* Post the high mailbox dma address to the port and wait for ready. */
7243 dma_address = &phba->sli4_hba.bmbx.dma_address;
7244 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7245
2d843edc
JS
7246 /* wait for bootstrap mbox register for hi-address write done */
7247 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7248 if (rc)
7249 goto exit;
da0436e9
JS
7250
7251 /* Post the low mailbox dma address to the port. */
7252 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 7253
2d843edc
JS
7254 /* wait for bootstrap mbox register for low address write done */
7255 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7256 if (rc)
7257 goto exit;
da0436e9
JS
7258
7259 /*
7260 * Read the CQ to ensure the mailbox has completed.
7261 * If so, update the mailbox status so that the upper layers
7262 * can complete the request normally.
7263 */
7264 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7265 sizeof(struct lpfc_mqe));
7266 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7267 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7268 sizeof(struct lpfc_mcqe));
7269 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
7270 /*
7271 * When the CQE status indicates a failure and the mailbox status
7272 * indicates success then copy the CQE status into the mailbox status
7273 * (and prefix it with x4000).
7274 */
da0436e9 7275 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
7276 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7277 bf_set(lpfc_mqe_status, mb,
7278 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 7279 rc = MBXERR_ERROR;
d7c47992
JS
7280 } else
7281 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
7282
7283 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7284 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
7285 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7286 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
7287 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7288 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7289 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7290 bf_get(lpfc_mqe_status, mb),
7291 mb->un.mb_words[0], mb->un.mb_words[1],
7292 mb->un.mb_words[2], mb->un.mb_words[3],
7293 mb->un.mb_words[4], mb->un.mb_words[5],
7294 mb->un.mb_words[6], mb->un.mb_words[7],
7295 mb->un.mb_words[8], mb->un.mb_words[9],
7296 mb->un.mb_words[10], mb->un.mb_words[11],
7297 mb->un.mb_words[12], mboxq->mcqe.word0,
7298 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7299 mboxq->mcqe.trailer);
7300exit:
7301 /* We are holding the token, no needed for lock when release */
7302 spin_lock_irqsave(&phba->hbalock, iflag);
7303 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7304 phba->sli.mbox_active = NULL;
7305 spin_unlock_irqrestore(&phba->hbalock, iflag);
7306 return rc;
7307}
7308
7309/**
7310 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7311 * @phba: Pointer to HBA context object.
7312 * @pmbox: Pointer to mailbox object.
7313 * @flag: Flag indicating how the mailbox need to be processed.
7314 *
7315 * This function is called by discovery code and HBA management code to submit
7316 * a mailbox command to firmware with SLI-4 interface spec.
7317 *
7318 * Return codes the caller owns the mailbox command after the return of the
7319 * function.
7320 **/
7321static int
7322lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7323 uint32_t flag)
7324{
7325 struct lpfc_sli *psli = &phba->sli;
7326 unsigned long iflags;
7327 int rc;
7328
b76f2dc9
JS
7329 /* dump from issue mailbox command if setup */
7330 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7331
8fa38513
JS
7332 rc = lpfc_mbox_dev_check(phba);
7333 if (unlikely(rc)) {
7334 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7335 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
7336 "cannot issue Data: x%x x%x\n",
7337 mboxq->vport ? mboxq->vport->vpi : 0,
7338 mboxq->u.mb.mbxCommand,
a183a15f
JS
7339 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7340 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
7341 psli->sli_flag, flag);
7342 goto out_not_finished;
7343 }
7344
da0436e9
JS
7345 /* Detect polling mode and jump to a handler */
7346 if (!phba->sli4_hba.intr_enable) {
7347 if (flag == MBX_POLL)
7348 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7349 else
7350 rc = -EIO;
7351 if (rc != MBX_SUCCESS)
0558056c 7352 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 7353 "(%d):2541 Mailbox command x%x "
cc459f19
JS
7354 "(x%x/x%x) failure: "
7355 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7356 "Data: x%x x%x\n,",
da0436e9
JS
7357 mboxq->vport ? mboxq->vport->vpi : 0,
7358 mboxq->u.mb.mbxCommand,
a183a15f
JS
7359 lpfc_sli_config_mbox_subsys_get(phba,
7360 mboxq),
7361 lpfc_sli_config_mbox_opcode_get(phba,
7362 mboxq),
cc459f19
JS
7363 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7364 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7365 bf_get(lpfc_mcqe_ext_status,
7366 &mboxq->mcqe),
da0436e9
JS
7367 psli->sli_flag, flag);
7368 return rc;
7369 } else if (flag == MBX_POLL) {
f1126688
JS
7370 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7371 "(%d):2542 Try to issue mailbox command "
a183a15f 7372 "x%x (x%x/x%x) synchronously ahead of async"
f1126688 7373 "mailbox command queue: x%x x%x\n",
da0436e9
JS
7374 mboxq->vport ? mboxq->vport->vpi : 0,
7375 mboxq->u.mb.mbxCommand,
a183a15f
JS
7376 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7377 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 7378 psli->sli_flag, flag);
f1126688
JS
7379 /* Try to block the asynchronous mailbox posting */
7380 rc = lpfc_sli4_async_mbox_block(phba);
7381 if (!rc) {
7382 /* Successfully blocked, now issue sync mbox cmd */
7383 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7384 if (rc != MBX_SUCCESS)
cc459f19 7385 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 7386 LOG_MBOX | LOG_SLI,
cc459f19
JS
7387 "(%d):2597 Sync Mailbox command "
7388 "x%x (x%x/x%x) failure: "
7389 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7390 "Data: x%x x%x\n,",
7391 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
7392 mboxq->u.mb.mbxCommand,
7393 lpfc_sli_config_mbox_subsys_get(phba,
7394 mboxq),
7395 lpfc_sli_config_mbox_opcode_get(phba,
7396 mboxq),
cc459f19
JS
7397 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7398 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7399 bf_get(lpfc_mcqe_ext_status,
7400 &mboxq->mcqe),
a183a15f 7401 psli->sli_flag, flag);
f1126688
JS
7402 /* Unblock the async mailbox posting afterward */
7403 lpfc_sli4_async_mbox_unblock(phba);
7404 }
7405 return rc;
da0436e9
JS
7406 }
7407
7408 /* Now, interrupt mode asynchrous mailbox command */
7409 rc = lpfc_mbox_cmd_check(phba, mboxq);
7410 if (rc) {
7411 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7412 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7413 "cannot issue Data: x%x x%x\n",
7414 mboxq->vport ? mboxq->vport->vpi : 0,
7415 mboxq->u.mb.mbxCommand,
a183a15f
JS
7416 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7417 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7418 psli->sli_flag, flag);
7419 goto out_not_finished;
7420 }
da0436e9
JS
7421
7422 /* Put the mailbox command to the driver internal FIFO */
7423 psli->slistat.mbox_busy++;
7424 spin_lock_irqsave(&phba->hbalock, iflags);
7425 lpfc_mbox_put(phba, mboxq);
7426 spin_unlock_irqrestore(&phba->hbalock, iflags);
7427 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7428 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 7429 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
7430 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7431 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
7432 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7433 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7434 phba->pport->port_state,
7435 psli->sli_flag, MBX_NOWAIT);
7436 /* Wake up worker thread to transport mailbox command from head */
7437 lpfc_worker_wake_up(phba);
7438
7439 return MBX_BUSY;
7440
7441out_not_finished:
7442 return MBX_NOT_FINISHED;
7443}
7444
7445/**
7446 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7447 * @phba: Pointer to HBA context object.
7448 *
7449 * This function is called by worker thread to send a mailbox command to
7450 * SLI4 HBA firmware.
7451 *
7452 **/
7453int
7454lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7455{
7456 struct lpfc_sli *psli = &phba->sli;
7457 LPFC_MBOXQ_t *mboxq;
7458 int rc = MBX_SUCCESS;
7459 unsigned long iflags;
7460 struct lpfc_mqe *mqe;
7461 uint32_t mbx_cmnd;
7462
7463 /* Check interrupt mode before post async mailbox command */
7464 if (unlikely(!phba->sli4_hba.intr_enable))
7465 return MBX_NOT_FINISHED;
7466
7467 /* Check for mailbox command service token */
7468 spin_lock_irqsave(&phba->hbalock, iflags);
7469 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7470 spin_unlock_irqrestore(&phba->hbalock, iflags);
7471 return MBX_NOT_FINISHED;
7472 }
7473 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7474 spin_unlock_irqrestore(&phba->hbalock, iflags);
7475 return MBX_NOT_FINISHED;
7476 }
7477 if (unlikely(phba->sli.mbox_active)) {
7478 spin_unlock_irqrestore(&phba->hbalock, iflags);
7479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7480 "0384 There is pending active mailbox cmd\n");
7481 return MBX_NOT_FINISHED;
7482 }
7483 /* Take the mailbox command service token */
7484 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7485
7486 /* Get the next mailbox command from head of queue */
7487 mboxq = lpfc_mbox_get(phba);
7488
7489 /* If no more mailbox command waiting for post, we're done */
7490 if (!mboxq) {
7491 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7492 spin_unlock_irqrestore(&phba->hbalock, iflags);
7493 return MBX_SUCCESS;
7494 }
7495 phba->sli.mbox_active = mboxq;
7496 spin_unlock_irqrestore(&phba->hbalock, iflags);
7497
7498 /* Check device readiness for posting mailbox command */
7499 rc = lpfc_mbox_dev_check(phba);
7500 if (unlikely(rc))
7501 /* Driver clean routine will clean up pending mailbox */
7502 goto out_not_finished;
7503
7504 /* Prepare the mbox command to be posted */
7505 mqe = &mboxq->u.mqe;
7506 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7507
7508 /* Start timer for the mbox_tmo and log some mailbox post messages */
7509 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 7510 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
7511
7512 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 7513 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
7514 "x%x x%x\n",
7515 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
7516 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7517 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7518 phba->pport->port_state, psli->sli_flag);
7519
7520 if (mbx_cmnd != MBX_HEARTBEAT) {
7521 if (mboxq->vport) {
7522 lpfc_debugfs_disc_trc(mboxq->vport,
7523 LPFC_DISC_TRC_MBOX_VPORT,
7524 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7525 mbx_cmnd, mqe->un.mb_words[0],
7526 mqe->un.mb_words[1]);
7527 } else {
7528 lpfc_debugfs_disc_trc(phba->pport,
7529 LPFC_DISC_TRC_MBOX,
7530 "MBOX Send: cmd:x%x mb:x%x x%x",
7531 mbx_cmnd, mqe->un.mb_words[0],
7532 mqe->un.mb_words[1]);
7533 }
7534 }
7535 psli->slistat.mbox_cmd++;
7536
7537 /* Post the mailbox command to the port */
7538 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7539 if (rc != MBX_SUCCESS) {
7540 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 7541 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
7542 "cannot issue Data: x%x x%x\n",
7543 mboxq->vport ? mboxq->vport->vpi : 0,
7544 mboxq->u.mb.mbxCommand,
a183a15f
JS
7545 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7546 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
7547 psli->sli_flag, MBX_NOWAIT);
7548 goto out_not_finished;
7549 }
7550
7551 return rc;
7552
7553out_not_finished:
7554 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
7555 if (phba->sli.mbox_active) {
7556 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7557 __lpfc_mbox_cmpl_put(phba, mboxq);
7558 /* Release the token */
7559 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7560 phba->sli.mbox_active = NULL;
7561 }
da0436e9
JS
7562 spin_unlock_irqrestore(&phba->hbalock, iflags);
7563
7564 return MBX_NOT_FINISHED;
7565}
7566
7567/**
7568 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7569 * @phba: Pointer to HBA context object.
7570 * @pmbox: Pointer to mailbox object.
7571 * @flag: Flag indicating how the mailbox need to be processed.
7572 *
7573 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7574 * the API jump table function pointer from the lpfc_hba struct.
7575 *
7576 * Return codes the caller owns the mailbox command after the return of the
7577 * function.
7578 **/
7579int
7580lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7581{
7582 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7583}
7584
7585/**
25985edc 7586 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
7587 * @phba: The hba struct for which this call is being executed.
7588 * @dev_grp: The HBA PCI-Device group number.
7589 *
7590 * This routine sets up the mbox interface API function jump table in @phba
7591 * struct.
7592 * Returns: 0 - success, -ENODEV - failure.
7593 **/
7594int
7595lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7596{
7597
7598 switch (dev_grp) {
7599 case LPFC_PCI_DEV_LP:
7600 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7601 phba->lpfc_sli_handle_slow_ring_event =
7602 lpfc_sli_handle_slow_ring_event_s3;
7603 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7604 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7605 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7606 break;
7607 case LPFC_PCI_DEV_OC:
7608 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7609 phba->lpfc_sli_handle_slow_ring_event =
7610 lpfc_sli_handle_slow_ring_event_s4;
7611 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7612 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7613 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7614 break;
7615 default:
7616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7617 "1420 Invalid HBA PCI-device group: 0x%x\n",
7618 dev_grp);
7619 return -ENODEV;
7620 break;
7621 }
7622 return 0;
7623}
7624
e59058c4 7625/**
3621a710 7626 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
7627 * @phba: Pointer to HBA context object.
7628 * @pring: Pointer to driver SLI ring object.
7629 * @piocb: Pointer to address of newly added command iocb.
7630 *
7631 * This function is called with hbalock held to add a command
7632 * iocb to the txq when SLI layer cannot submit the command iocb
7633 * to the ring.
7634 **/
2a9bf3d0 7635void
92d7f7b0 7636__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7637 struct lpfc_iocbq *piocb)
dea3101e 7638{
7639 /* Insert the caller's iocb in the txq tail for later processing. */
7640 list_add_tail(&piocb->list, &pring->txq);
dea3101e 7641}
7642
e59058c4 7643/**
3621a710 7644 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
7645 * @phba: Pointer to HBA context object.
7646 * @pring: Pointer to driver SLI ring object.
7647 * @piocb: Pointer to address of newly added command iocb.
7648 *
7649 * This function is called with hbalock held before a new
7650 * iocb is submitted to the firmware. This function checks
7651 * txq to flush the iocbs in txq to Firmware before
7652 * submitting new iocbs to the Firmware.
7653 * If there are iocbs in the txq which need to be submitted
7654 * to firmware, lpfc_sli_next_iocb returns the first element
7655 * of the txq after dequeuing it from txq.
7656 * If there is no iocb in the txq then the function will return
7657 * *piocb and *piocb is set to NULL. Caller needs to check
7658 * *piocb to find if there are more commands in the txq.
7659 **/
dea3101e 7660static struct lpfc_iocbq *
7661lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 7662 struct lpfc_iocbq **piocb)
dea3101e 7663{
7664 struct lpfc_iocbq * nextiocb;
7665
7666 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7667 if (!nextiocb) {
7668 nextiocb = *piocb;
7669 *piocb = NULL;
7670 }
7671
7672 return nextiocb;
7673}
7674
e59058c4 7675/**
3772a991 7676 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 7677 * @phba: Pointer to HBA context object.
3772a991 7678 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
7679 * @piocb: Pointer to command iocb.
7680 * @flag: Flag indicating if this command can be put into txq.
7681 *
3772a991
JS
7682 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7683 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7684 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7685 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7686 * this function allows only iocbs for posting buffers. This function finds
7687 * next available slot in the command ring and posts the command to the
7688 * available slot and writes the port attention register to request HBA start
7689 * processing new iocb. If there is no slot available in the ring and
7690 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7691 * the function returns IOCB_BUSY.
e59058c4 7692 *
3772a991
JS
7693 * This function is called with hbalock held. The function will return success
7694 * after it successfully submit the iocb to firmware or after adding to the
7695 * txq.
e59058c4 7696 **/
98c9ea5c 7697static int
3772a991 7698__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 7699 struct lpfc_iocbq *piocb, uint32_t flag)
7700{
7701 struct lpfc_iocbq *nextiocb;
7702 IOCB_t *iocb;
3772a991 7703 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea3101e 7704
92d7f7b0
JS
7705 if (piocb->iocb_cmpl && (!piocb->vport) &&
7706 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7707 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7708 lpfc_printf_log(phba, KERN_ERR,
7709 LOG_SLI | LOG_VPORT,
e8b62011 7710 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
7711 piocb->iocb.ulpCommand);
7712 dump_stack();
7713 return IOCB_ERROR;
7714 }
7715
7716
8d63f375
LV
7717 /* If the PCI channel is in offline state, do not post iocbs. */
7718 if (unlikely(pci_channel_offline(phba->pcidev)))
7719 return IOCB_ERROR;
7720
a257bf90
JS
7721 /* If HBA has a deferred error attention, fail the iocb. */
7722 if (unlikely(phba->hba_flag & DEFER_ERATT))
7723 return IOCB_ERROR;
7724
dea3101e 7725 /*
7726 * We should never get an IOCB if we are in a < LINK_DOWN state
7727 */
2e0fef85 7728 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 7729 return IOCB_ERROR;
7730
7731 /*
7732 * Check to see if we are blocking IOCB processing because of a
0b727fea 7733 * outstanding event.
dea3101e 7734 */
0b727fea 7735 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 7736 goto iocb_busy;
7737
2e0fef85 7738 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 7739 /*
2680eeaa 7740 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 7741 * can be issued if the link is not up.
7742 */
7743 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
7744 case CMD_GEN_REQUEST64_CR:
7745 case CMD_GEN_REQUEST64_CX:
7746 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7747 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 7748 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
7749 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7750 MENLO_TRANSPORT_TYPE))
7751
7752 goto iocb_busy;
7753 break;
dea3101e 7754 case CMD_QUE_RING_BUF_CN:
7755 case CMD_QUE_RING_BUF64_CN:
dea3101e 7756 /*
7757 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7758 * completion, iocb_cmpl MUST be 0.
7759 */
7760 if (piocb->iocb_cmpl)
7761 piocb->iocb_cmpl = NULL;
7762 /*FALLTHROUGH*/
7763 case CMD_CREATE_XRI_CR:
2680eeaa
JS
7764 case CMD_CLOSE_XRI_CN:
7765 case CMD_CLOSE_XRI_CX:
dea3101e 7766 break;
7767 default:
7768 goto iocb_busy;
7769 }
7770
7771 /*
7772 * For FCP commands, we must be in a state where we can process link
7773 * attention events.
7774 */
7775 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 7776 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 7777 goto iocb_busy;
92d7f7b0 7778 }
dea3101e 7779
dea3101e 7780 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7781 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7782 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7783
7784 if (iocb)
7785 lpfc_sli_update_ring(phba, pring);
7786 else
7787 lpfc_sli_update_full_ring(phba, pring);
7788
7789 if (!piocb)
7790 return IOCB_SUCCESS;
7791
7792 goto out_busy;
7793
7794 iocb_busy:
7795 pring->stats.iocb_cmd_delay++;
7796
7797 out_busy:
7798
7799 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 7800 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 7801 return IOCB_SUCCESS;
7802 }
7803
7804 return IOCB_BUSY;
7805}
7806
3772a991 7807/**
4f774513
JS
7808 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7809 * @phba: Pointer to HBA context object.
7810 * @piocb: Pointer to command iocb.
7811 * @sglq: Pointer to the scatter gather queue object.
7812 *
7813 * This routine converts the bpl or bde that is in the IOCB
7814 * to a sgl list for the sli4 hardware. The physical address
7815 * of the bpl/bde is converted back to a virtual address.
7816 * If the IOCB contains a BPL then the list of BDE's is
7817 * converted to sli4_sge's. If the IOCB contains a single
7818 * BDE then it is converted to a single sli_sge.
7819 * The IOCB is still in cpu endianess so the contents of
7820 * the bpl can be used without byte swapping.
7821 *
7822 * Returns valid XRI = Success, NO_XRI = Failure.
7823**/
7824static uint16_t
7825lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7826 struct lpfc_sglq *sglq)
3772a991 7827{
4f774513
JS
7828 uint16_t xritag = NO_XRI;
7829 struct ulp_bde64 *bpl = NULL;
7830 struct ulp_bde64 bde;
7831 struct sli4_sge *sgl = NULL;
1b51197d 7832 struct lpfc_dmabuf *dmabuf;
4f774513
JS
7833 IOCB_t *icmd;
7834 int numBdes = 0;
7835 int i = 0;
63e801ce
JS
7836 uint32_t offset = 0; /* accumulated offset in the sg request list */
7837 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 7838
4f774513
JS
7839 if (!piocbq || !sglq)
7840 return xritag;
7841
7842 sgl = (struct sli4_sge *)sglq->sgl;
7843 icmd = &piocbq->iocb;
6b5151fd
JS
7844 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7845 return sglq->sli4_xritag;
4f774513
JS
7846 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7847 numBdes = icmd->un.genreq64.bdl.bdeSize /
7848 sizeof(struct ulp_bde64);
7849 /* The addrHigh and addrLow fields within the IOCB
7850 * have not been byteswapped yet so there is no
7851 * need to swap them back.
7852 */
1b51197d
JS
7853 if (piocbq->context3)
7854 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7855 else
7856 return xritag;
4f774513 7857
1b51197d 7858 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
7859 if (!bpl)
7860 return xritag;
7861
7862 for (i = 0; i < numBdes; i++) {
7863 /* Should already be byte swapped. */
28baac74
JS
7864 sgl->addr_hi = bpl->addrHigh;
7865 sgl->addr_lo = bpl->addrLow;
7866
0558056c 7867 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7868 if ((i+1) == numBdes)
7869 bf_set(lpfc_sli4_sge_last, sgl, 1);
7870 else
7871 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
7872 /* swap the size field back to the cpu so we
7873 * can assign it to the sgl.
7874 */
7875 bde.tus.w = le32_to_cpu(bpl->tus.w);
7876 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
7877 /* The offsets in the sgl need to be accumulated
7878 * separately for the request and reply lists.
7879 * The request is always first, the reply follows.
7880 */
7881 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7882 /* add up the reply sg entries */
7883 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7884 inbound++;
7885 /* first inbound? reset the offset */
7886 if (inbound == 1)
7887 offset = 0;
7888 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
7889 bf_set(lpfc_sli4_sge_type, sgl,
7890 LPFC_SGE_TYPE_DATA);
63e801ce
JS
7891 offset += bde.tus.f.bdeSize;
7892 }
546fc854 7893 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
7894 bpl++;
7895 sgl++;
7896 }
7897 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7898 /* The addrHigh and addrLow fields of the BDE have not
7899 * been byteswapped yet so they need to be swapped
7900 * before putting them in the sgl.
7901 */
7902 sgl->addr_hi =
7903 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7904 sgl->addr_lo =
7905 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 7906 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
7907 bf_set(lpfc_sli4_sge_last, sgl, 1);
7908 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
7909 sgl->sge_len =
7910 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
7911 }
7912 return sglq->sli4_xritag;
3772a991 7913}
92d7f7b0 7914
e59058c4 7915/**
4f774513 7916 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
e59058c4 7917 * @phba: Pointer to HBA context object.
e59058c4 7918 *
a93ff37a 7919 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
8fa38513
JS
7920 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7921 * held.
4f774513
JS
7922 *
7923 * Return: index into SLI4 fast-path FCP queue index.
e59058c4 7924 **/
2a76a283 7925static inline uint32_t
8fa38513 7926lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
92d7f7b0 7927{
2a76a283 7928 int i;
92d7f7b0 7929
49aa143d
JS
7930 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7931 i = smp_processor_id();
7932 else
7933 i = atomic_add_return(1, &phba->fcp_qidx);
92d7f7b0 7934
67d12733 7935 i = (i % phba->cfg_fcp_io_channel);
2a76a283 7936 return i;
92d7f7b0
JS
7937}
7938
e59058c4 7939/**
4f774513 7940 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 7941 * @phba: Pointer to HBA context object.
4f774513
JS
7942 * @piocb: Pointer to command iocb.
7943 * @wqe: Pointer to the work queue entry.
e59058c4 7944 *
4f774513
JS
7945 * This routine converts the iocb command to its Work Queue Entry
7946 * equivalent. The wqe pointer should not have any fields set when
7947 * this routine is called because it will memcpy over them.
7948 * This routine does not set the CQ_ID or the WQEC bits in the
7949 * wqe.
e59058c4 7950 *
4f774513 7951 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 7952 **/
cf5bf97e 7953static int
4f774513
JS
7954lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7955 union lpfc_wqe *wqe)
cf5bf97e 7956{
5ffc266e 7957 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
7958 uint8_t ct = 0;
7959 uint32_t fip;
7960 uint32_t abort_tag;
7961 uint8_t command_type = ELS_COMMAND_NON_FIP;
7962 uint8_t cmnd;
7963 uint16_t xritag;
dcf2a4e0
JS
7964 uint16_t abrt_iotag;
7965 struct lpfc_iocbq *abrtiocbq;
4f774513 7966 struct ulp_bde64 *bpl = NULL;
f0d9bccc 7967 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
7968 int numBdes, i;
7969 struct ulp_bde64 bde;
c31098ce 7970 struct lpfc_nodelist *ndlp;
ff78d8f9 7971 uint32_t *pcmd;
1b51197d 7972 uint32_t if_type;
4f774513 7973
45ed1190 7974 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 7975 /* The fcp commands will set command type */
0c287589 7976 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 7977 command_type = FCP_COMMAND;
c868595d 7978 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
7979 command_type = ELS_COMMAND_FIP;
7980 else
7981 command_type = ELS_COMMAND_NON_FIP;
7982
4f774513
JS
7983 /* Some of the fields are in the right position already */
7984 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7985 abort_tag = (uint32_t) iocbq->iotag;
7986 xritag = iocbq->sli4_xritag;
f0d9bccc 7987 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
4f774513
JS
7988 /* words0-2 bpl convert bde */
7989 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
7990 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7991 sizeof(struct ulp_bde64);
4f774513
JS
7992 bpl = (struct ulp_bde64 *)
7993 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7994 if (!bpl)
7995 return IOCB_ERROR;
cf5bf97e 7996
4f774513
JS
7997 /* Should already be byte swapped. */
7998 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
7999 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8000 /* swap the size field back to the cpu so we
8001 * can assign it to the sgl.
8002 */
8003 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
8004 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8005 total_len = 0;
8006 for (i = 0; i < numBdes; i++) {
8007 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8008 total_len += bde.tus.f.bdeSize;
8009 }
4f774513 8010 } else
5ffc266e 8011 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 8012
4f774513
JS
8013 iocbq->iocb.ulpIoTag = iocbq->iotag;
8014 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 8015
4f774513
JS
8016 switch (iocbq->iocb.ulpCommand) {
8017 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
8018 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8019 ndlp = iocbq->context_un.ndlp;
8020 else
8021 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
8022 if (!iocbq->iocb.ulpLe) {
8023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8024 "2007 Only Limited Edition cmd Format"
8025 " supported 0x%x\n",
8026 iocbq->iocb.ulpCommand);
8027 return IOCB_ERROR;
8028 }
ff78d8f9 8029
5ffc266e 8030 wqe->els_req.payload_len = xmit_len;
4f774513
JS
8031 /* Els_reguest64 has a TMO */
8032 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8033 iocbq->iocb.ulpTimeout);
8034 /* Need a VF for word 4 set the vf bit*/
8035 bf_set(els_req64_vf, &wqe->els_req, 0);
8036 /* And a VFID for word 12 */
8037 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 8038 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
8039 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8040 iocbq->iocb.ulpContext);
8041 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8042 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 8043 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 8044 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
8045 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8046 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
8047 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8048 iocbq->context2)->virt);
1b51197d
JS
8049 if_type = bf_get(lpfc_sli_intf_if_type,
8050 &phba->sli4_hba.sli_intf);
8051 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 8052 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 8053 *pcmd == ELS_CMD_SCR ||
6b5151fd 8054 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 8055 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
8056 *pcmd == ELS_CMD_PLOGI)) {
8057 bf_set(els_req64_sp, &wqe->els_req, 1);
8058 bf_set(els_req64_sid, &wqe->els_req,
8059 iocbq->vport->fc_myDID);
939723a4
JS
8060 if ((*pcmd == ELS_CMD_FLOGI) &&
8061 !(phba->fc_topology ==
8062 LPFC_TOPOLOGY_LOOP))
8063 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
8064 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8065 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 8066 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 8067 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
8068 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8069 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8070 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8071 }
c868595d 8072 }
6d368e53
JS
8073 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8074 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
8075 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8076 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8077 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8078 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8079 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8080 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7851fe2c 8081 break;
5ffc266e 8082 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
8083 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8084 iocbq->iocb.un.ulpWord[3]);
8085 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 8086 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
8087 /* The entire sequence is transmitted for this IOCB */
8088 xmit_len = total_len;
8089 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
8090 if (phba->link_flag & LS_LOOPBACK_MODE)
8091 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 8092 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
8093 /* word3 iocb=io_tag32 wqe=reserved */
8094 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
8095 /* word4 relative_offset memcpy */
8096 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
8097 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8098 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8099 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8100 LPFC_WQE_IOD_WRITE);
8101 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8102 LPFC_WQE_LENLOC_WORD12);
8103 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
8104 wqe->xmit_sequence.xmit_len = xmit_len;
8105 command_type = OTHER_COMMAND;
7851fe2c 8106 break;
4f774513 8107 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
8108 /* word3 iocb=iotag32 wqe=seq_payload_len */
8109 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
8110 /* word4 iocb=rsvd wqe=rsvd */
8111 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8112 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 8113 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 8114 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
8115 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8116 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8117 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8118 LPFC_WQE_LENLOC_WORD3);
8119 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 8120 break;
4f774513
JS
8121 case CMD_FCP_IWRITE64_CR:
8122 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
8123 /* word3 iocb=iotag wqe=payload_offset_len */
8124 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8125 wqe->fcp_iwrite.payload_offset_len =
8126 xmit_len + sizeof(struct fcp_rsp);
8127 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8128 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8129 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8130 iocbq->iocb.ulpFCP2Rcvy);
8131 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8132 /* Always open the exchange */
8133 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
f0d9bccc
JS
8134 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8135 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8136 LPFC_WQE_LENLOC_WORD4);
8137 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8138 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8139 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7851fe2c 8140 break;
4f774513 8141 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
8142 /* word3 iocb=iotag wqe=payload_offset_len */
8143 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8144 wqe->fcp_iread.payload_offset_len =
5ffc266e 8145 xmit_len + sizeof(struct fcp_rsp);
f0d9bccc
JS
8146 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8147 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8148 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8149 iocbq->iocb.ulpFCP2Rcvy);
8150 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688
JS
8151 /* Always open the exchange */
8152 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
f0d9bccc
JS
8153 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8154 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8155 LPFC_WQE_LENLOC_WORD4);
8156 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8157 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 8158 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7851fe2c 8159 break;
4f774513 8160 case CMD_FCP_ICMND64_CR:
f0d9bccc
JS
8161 /* word3 iocb=IO_TAG wqe=reserved */
8162 wqe->fcp_icmd.rsrvd3 = 0;
8163 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 8164 /* Always open the exchange */
f0d9bccc
JS
8165 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8166 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8167 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8168 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8169 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8170 LPFC_WQE_LENLOC_NONE);
8171 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
2a94aea4
JS
8172 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8173 iocbq->iocb.ulpFCP2Rcvy);
7851fe2c 8174 break;
4f774513 8175 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
8176 /* For this command calculate the xmit length of the
8177 * request bde.
8178 */
8179 xmit_len = 0;
8180 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8181 sizeof(struct ulp_bde64);
8182 for (i = 0; i < numBdes; i++) {
63e801ce 8183 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
8184 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8185 break;
63e801ce
JS
8186 xmit_len += bde.tus.f.bdeSize;
8187 }
f0d9bccc
JS
8188 /* word3 iocb=IO_TAG wqe=request_payload_len */
8189 wqe->gen_req.request_payload_len = xmit_len;
8190 /* word4 iocb=parameter wqe=relative_offset memcpy */
8191 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
8192 /* word6 context tag copied in memcpy */
8193 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8194 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8195 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8196 "2015 Invalid CT %x command 0x%x\n",
8197 ct, iocbq->iocb.ulpCommand);
8198 return IOCB_ERROR;
8199 }
f0d9bccc
JS
8200 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8201 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8202 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8203 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8204 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8205 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8206 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8207 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
4f774513 8208 command_type = OTHER_COMMAND;
7851fe2c 8209 break;
4f774513 8210 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 8211 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 8212 /* words0-2 BDE memcpy */
f0d9bccc
JS
8213 /* word3 iocb=iotag32 wqe=response_payload_len */
8214 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
8215 /* word4 */
8216 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
8217 /* word5 iocb=rsvd wge=did */
8218 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
8219 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8220
8221 if_type = bf_get(lpfc_sli_intf_if_type,
8222 &phba->sli4_hba.sli_intf);
8223 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8224 if (iocbq->vport->fc_flag & FC_PT2PT) {
8225 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8226 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8227 iocbq->vport->fc_myDID);
8228 if (iocbq->vport->fc_myDID == Fabric_DID) {
8229 bf_set(wqe_els_did,
8230 &wqe->xmit_els_rsp.wqe_dest, 0);
8231 }
8232 }
8233 }
f0d9bccc
JS
8234 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8235 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8236 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8237 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 8238 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 8239 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 8240 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 8241 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
8242 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8243 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8244 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8245 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8246 LPFC_WQE_LENLOC_WORD3);
8247 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
8248 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8249 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
8250 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8251 iocbq->context2)->virt);
8252 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
8253 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8254 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 8255 iocbq->vport->fc_myDID);
939723a4
JS
8256 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8257 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
8258 phba->vpi_ids[phba->pport->vpi]);
8259 }
4f774513 8260 command_type = OTHER_COMMAND;
7851fe2c 8261 break;
4f774513
JS
8262 case CMD_CLOSE_XRI_CN:
8263 case CMD_ABORT_XRI_CN:
8264 case CMD_ABORT_XRI_CX:
8265 /* words 0-2 memcpy should be 0 rserved */
8266 /* port will send abts */
dcf2a4e0
JS
8267 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8268 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8269 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8270 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8271 } else
8272 fip = 0;
8273
8274 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 8275 /*
dcf2a4e0
JS
8276 * The link is down, or the command was ELS_FIP
8277 * so the fw does not need to send abts
4f774513
JS
8278 * on the wire.
8279 */
8280 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8281 else
8282 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8283 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
8284 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8285 wqe->abort_cmd.rsrvd5 = 0;
8286 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
8287 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8288 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
8289 /*
8290 * The abort handler will send us CMD_ABORT_XRI_CN or
8291 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8292 */
f0d9bccc
JS
8293 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8294 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8295 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8296 LPFC_WQE_LENLOC_NONE);
4f774513
JS
8297 cmnd = CMD_ABORT_XRI_CX;
8298 command_type = OTHER_COMMAND;
8299 xritag = 0;
7851fe2c 8300 break;
6669f9bb 8301 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 8302 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 8303 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
8304 * we re-construct this WQE here based on information in
8305 * iocbq from scratch.
8306 */
8307 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 8308 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 8309 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
8310 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8311 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
8312 LPFC_ABTS_UNSOL_INT) {
8313 /* ABTS sent by initiator to CT exchange, the
8314 * RX_ID field will be filled with the newly
8315 * allocated responder XRI.
8316 */
8317 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8318 iocbq->sli4_xritag);
8319 } else {
8320 /* ABTS sent by responder to CT exchange, the
8321 * RX_ID field will be filled with the responder
8322 * RX_ID from ABTS.
8323 */
8324 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 8325 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 8326 }
6669f9bb
JS
8327 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8328 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
8329
8330 /* Use CT=VPI */
8331 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8332 ndlp->nlp_DID);
8333 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8334 iocbq->iocb.ulpContext);
8335 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 8336 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 8337 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
8338 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8339 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8340 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
8341 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8342 command_type = OTHER_COMMAND;
546fc854
JS
8343 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8344 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8345 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8346 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8347 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8348 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8349 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8350 }
8351
7851fe2c 8352 break;
4f774513
JS
8353 case CMD_XRI_ABORTED_CX:
8354 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
8355 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8356 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8357 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8358 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8359 default:
8360 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8361 "2014 Invalid command 0x%x\n",
8362 iocbq->iocb.ulpCommand);
8363 return IOCB_ERROR;
7851fe2c 8364 break;
4f774513 8365 }
6d368e53 8366
8012cc38
JS
8367 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8368 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8369 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8370 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8371 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8372 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8373 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8374 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
8375 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8376 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8377 wqe->generic.wqe_com.abort_tag = abort_tag;
8378 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8379 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8380 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8381 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
8382 return 0;
8383}
8384
8385/**
8386 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8387 * @phba: Pointer to HBA context object.
8388 * @ring_number: SLI ring number to issue iocb on.
8389 * @piocb: Pointer to command iocb.
8390 * @flag: Flag indicating if this command can be put into txq.
8391 *
8392 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8393 * an iocb command to an HBA with SLI-4 interface spec.
8394 *
8395 * This function is called with hbalock held. The function will return success
8396 * after it successfully submit the iocb to firmware or after adding to the
8397 * txq.
8398 **/
8399static int
8400__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8401 struct lpfc_iocbq *piocb, uint32_t flag)
8402{
8403 struct lpfc_sglq *sglq;
4f774513
JS
8404 union lpfc_wqe wqe;
8405 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
4f774513
JS
8406
8407 if (piocb->sli4_xritag == NO_XRI) {
8408 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 8409 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
8410 sglq = NULL;
8411 else {
0e9bb8d7 8412 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
8413 if (!(flag & SLI_IOCB_RET_IOCB)) {
8414 __lpfc_sli_ringtx_put(phba,
8415 pring, piocb);
8416 return IOCB_SUCCESS;
8417 } else {
8418 return IOCB_BUSY;
8419 }
8420 } else {
6d368e53 8421 sglq = __lpfc_sli_get_sglq(phba, piocb);
2a9bf3d0
JS
8422 if (!sglq) {
8423 if (!(flag & SLI_IOCB_RET_IOCB)) {
8424 __lpfc_sli_ringtx_put(phba,
8425 pring,
8426 piocb);
8427 return IOCB_SUCCESS;
8428 } else
8429 return IOCB_BUSY;
8430 }
8431 }
4f774513
JS
8432 }
8433 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6d368e53
JS
8434 /* These IO's already have an XRI and a mapped sgl. */
8435 sglq = NULL;
4f774513 8436 } else {
6d368e53
JS
8437 /*
8438 * This is a continuation of a commandi,(CX) so this
4f774513
JS
8439 * sglq is on the active list
8440 */
edccdc17 8441 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
8442 if (!sglq)
8443 return IOCB_ERROR;
8444 }
8445
8446 if (sglq) {
6d368e53 8447 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 8448 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 8449 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
8450 return IOCB_ERROR;
8451 }
8452
8453 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8454 return IOCB_ERROR;
8455
341af102
JS
8456 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8457 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
5ffc266e
JS
8458 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8459 &wqe))
4f774513
JS
8460 return IOCB_ERROR;
8461 } else {
8462 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8463 return IOCB_ERROR;
8464 }
8465 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8466
8467 return 0;
8468}
8469
8470/**
8471 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8472 *
8473 * This routine wraps the actual lockless version for issusing IOCB function
8474 * pointer from the lpfc_hba struct.
8475 *
8476 * Return codes:
8477 * IOCB_ERROR - Error
8478 * IOCB_SUCCESS - Success
8479 * IOCB_BUSY - Busy
8480 **/
2a9bf3d0 8481int
4f774513
JS
8482__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8483 struct lpfc_iocbq *piocb, uint32_t flag)
8484{
8485 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8486}
8487
8488/**
25985edc 8489 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
8490 * @phba: The hba struct for which this call is being executed.
8491 * @dev_grp: The HBA PCI-Device group number.
8492 *
8493 * This routine sets up the SLI interface API function jump table in @phba
8494 * struct.
8495 * Returns: 0 - success, -ENODEV - failure.
8496 **/
8497int
8498lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8499{
8500
8501 switch (dev_grp) {
8502 case LPFC_PCI_DEV_LP:
8503 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8504 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8505 break;
8506 case LPFC_PCI_DEV_OC:
8507 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8508 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8509 break;
8510 default:
8511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8512 "1419 Invalid HBA PCI-device group: 0x%x\n",
8513 dev_grp);
8514 return -ENODEV;
8515 break;
8516 }
8517 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8518 return 0;
8519}
8520
8521/**
8522 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8523 * @phba: Pointer to HBA context object.
8524 * @pring: Pointer to driver SLI ring object.
8525 * @piocb: Pointer to command iocb.
8526 * @flag: Flag indicating if this command can be put into txq.
8527 *
8528 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8529 * function. This function gets the hbalock and calls
8530 * __lpfc_sli_issue_iocb function and will return the error returned
8531 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8532 * functions which do not hold hbalock.
8533 **/
8534int
8535lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8536 struct lpfc_iocbq *piocb, uint32_t flag)
8537{
ba20c853 8538 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
2a76a283 8539 struct lpfc_sli_ring *pring;
ba20c853
JS
8540 struct lpfc_queue *fpeq;
8541 struct lpfc_eqe *eqe;
4f774513 8542 unsigned long iflags;
2a76a283 8543 int rc, idx;
4f774513 8544
7e56aa25 8545 if (phba->sli_rev == LPFC_SLI_REV4) {
2a76a283
JS
8546 if (piocb->iocb_flag & LPFC_IO_FCP) {
8547 if (unlikely(!phba->sli4_hba.fcp_wq))
8548 return IOCB_ERROR;
8549 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8550 piocb->fcp_wqidx = idx;
8551 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
ba20c853
JS
8552
8553 pring = &phba->sli.ring[ring_number];
8554 spin_lock_irqsave(&pring->ring_lock, iflags);
8555 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8556 flag);
8557 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8558
8559 if (lpfc_fcp_look_ahead) {
8560 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8561
8562 if (atomic_dec_and_test(&fcp_eq_hdl->
8563 fcp_eq_in_use)) {
4f774513 8564
ba20c853
JS
8565 /* Get associated EQ with this index */
8566 fpeq = phba->sli4_hba.hba_eq[idx];
8567
8568 /* Turn off interrupts from this EQ */
8569 lpfc_sli4_eq_clr_intr(fpeq);
8570
8571 /*
8572 * Process all the events on FCP EQ
8573 */
8574 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8575 lpfc_sli4_hba_handle_eqe(phba,
8576 eqe, idx);
8577 fpeq->EQ_processed++;
8578 }
8579
8580 /* Always clear and re-arm the EQ */
8581 lpfc_sli4_eq_release(fpeq,
8582 LPFC_QUEUE_REARM);
8583 }
8584 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8585 }
8586 } else {
8587 pring = &phba->sli.ring[ring_number];
8588 spin_lock_irqsave(&pring->ring_lock, iflags);
8589 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8590 flag);
8591 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8592
2a76a283 8593 }
7e56aa25
JS
8594 } else {
8595 /* For now, SLI2/3 will still use hbalock */
8596 spin_lock_irqsave(&phba->hbalock, iflags);
8597 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8598 spin_unlock_irqrestore(&phba->hbalock, iflags);
8599 }
4f774513
JS
8600 return rc;
8601}
8602
8603/**
8604 * lpfc_extra_ring_setup - Extra ring setup function
8605 * @phba: Pointer to HBA context object.
8606 *
8607 * This function is called while driver attaches with the
8608 * HBA to setup the extra ring. The extra ring is used
8609 * only when driver needs to support target mode functionality
8610 * or IP over FC functionalities.
8611 *
8612 * This function is called with no lock held.
8613 **/
8614static int
8615lpfc_extra_ring_setup( struct lpfc_hba *phba)
8616{
8617 struct lpfc_sli *psli;
8618 struct lpfc_sli_ring *pring;
8619
8620 psli = &phba->sli;
8621
8622 /* Adjust cmd/rsp ring iocb entries more evenly */
8623
8624 /* Take some away from the FCP ring */
8625 pring = &psli->ring[psli->fcp_ring];
7e56aa25
JS
8626 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8627 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8628 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8629 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 8630
a4bc3379
JS
8631 /* and give them to the extra ring */
8632 pring = &psli->ring[psli->extra_ring];
8633
7e56aa25
JS
8634 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8635 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8636 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8637 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
8638
8639 /* Setup default profile for this ring */
8640 pring->iotag_max = 4096;
8641 pring->num_mask = 1;
8642 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
8643 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8644 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
8645 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8646 return 0;
8647}
8648
cb69f7de
JS
8649/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8650 * @phba: Pointer to HBA context object.
8651 * @iocbq: Pointer to iocb object.
8652 *
8653 * The async_event handler calls this routine when it receives
8654 * an ASYNC_STATUS_CN event from the port. The port generates
8655 * this event when an Abort Sequence request to an rport fails
8656 * twice in succession. The abort could be originated by the
8657 * driver or by the port. The ABTS could have been for an ELS
8658 * or FCP IO. The port only generates this event when an ABTS
8659 * fails to complete after one retry.
8660 */
8661static void
8662lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8663 struct lpfc_iocbq *iocbq)
8664{
8665 struct lpfc_nodelist *ndlp = NULL;
8666 uint16_t rpi = 0, vpi = 0;
8667 struct lpfc_vport *vport = NULL;
8668
8669 /* The rpi in the ulpContext is vport-sensitive. */
8670 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8671 rpi = iocbq->iocb.ulpContext;
8672
8673 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8674 "3092 Port generated ABTS async event "
8675 "on vpi %d rpi %d status 0x%x\n",
8676 vpi, rpi, iocbq->iocb.ulpStatus);
8677
8678 vport = lpfc_find_vport_by_vpid(phba, vpi);
8679 if (!vport)
8680 goto err_exit;
8681 ndlp = lpfc_findnode_rpi(vport, rpi);
8682 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8683 goto err_exit;
8684
8685 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8686 lpfc_sli_abts_recover_port(vport, ndlp);
8687 return;
8688
8689 err_exit:
8690 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8691 "3095 Event Context not found, no "
8692 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8693 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8694 vpi, rpi);
8695}
8696
8697/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8698 * @phba: pointer to HBA context object.
8699 * @ndlp: nodelist pointer for the impacted rport.
8700 * @axri: pointer to the wcqe containing the failed exchange.
8701 *
8702 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8703 * port. The port generates this event when an abort exchange request to an
8704 * rport fails twice in succession with no reply. The abort could be originated
8705 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8706 */
8707void
8708lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8709 struct lpfc_nodelist *ndlp,
8710 struct sli4_wcqe_xri_aborted *axri)
8711{
8712 struct lpfc_vport *vport;
5c1db2ac 8713 uint32_t ext_status = 0;
cb69f7de 8714
6b5151fd 8715 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
8716 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8717 "3115 Node Context not found, driver "
8718 "ignoring abts err event\n");
6b5151fd
JS
8719 return;
8720 }
8721
cb69f7de
JS
8722 vport = ndlp->vport;
8723 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8724 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 8725 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
cb69f7de
JS
8726 ndlp->vport->vpi, ndlp->nlp_rpi,
8727 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
8728 bf_get(lpfc_wcqe_xa_status, axri),
8729 axri->parameter);
cb69f7de 8730
5c1db2ac
JS
8731 /*
8732 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8733 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8734 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8735 */
e3d2b802 8736 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
8737 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8738 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
8739 lpfc_sli_abts_recover_port(vport, ndlp);
8740}
8741
e59058c4 8742/**
3621a710 8743 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
8744 * @phba: Pointer to HBA context object.
8745 * @pring: Pointer to driver SLI ring object.
8746 * @iocbq: Pointer to iocb object.
8747 *
8748 * This function is called by the slow ring event handler
8749 * function when there is an ASYNC event iocb in the ring.
8750 * This function is called with no lock held.
8751 * Currently this function handles only temperature related
8752 * ASYNC events. The function decodes the temperature sensor
8753 * event message and posts events for the management applications.
8754 **/
98c9ea5c 8755static void
57127f15
JS
8756lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8757 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8758{
8759 IOCB_t *icmd;
8760 uint16_t evt_code;
57127f15
JS
8761 struct temp_event temp_event_data;
8762 struct Scsi_Host *shost;
a257bf90 8763 uint32_t *iocb_w;
57127f15
JS
8764
8765 icmd = &iocbq->iocb;
8766 evt_code = icmd->un.asyncstat.evt_code;
57127f15 8767
cb69f7de
JS
8768 switch (evt_code) {
8769 case ASYNC_TEMP_WARN:
8770 case ASYNC_TEMP_SAFE:
8771 temp_event_data.data = (uint32_t) icmd->ulpContext;
8772 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8773 if (evt_code == ASYNC_TEMP_WARN) {
8774 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8775 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8776 "0347 Adapter is very hot, please take "
8777 "corrective action. temperature : %d Celsius\n",
8778 (uint32_t) icmd->ulpContext);
8779 } else {
8780 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8781 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8782 "0340 Adapter temperature is OK now. "
8783 "temperature : %d Celsius\n",
8784 (uint32_t) icmd->ulpContext);
8785 }
8786
8787 /* Send temperature change event to applications */
8788 shost = lpfc_shost_from_vport(phba->pport);
8789 fc_host_post_vendor_event(shost, fc_get_event_number(),
8790 sizeof(temp_event_data), (char *) &temp_event_data,
8791 LPFC_NL_VENDOR_ID);
8792 break;
8793 case ASYNC_STATUS_CN:
8794 lpfc_sli_abts_err_handler(phba, iocbq);
8795 break;
8796 default:
a257bf90 8797 iocb_w = (uint32_t *) icmd;
cb69f7de 8798 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 8799 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 8800 " evt_code 0x%x\n"
a257bf90
JS
8801 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8802 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8803 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8804 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 8805 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
8806 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8807 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8808 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8809 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8810
cb69f7de 8811 break;
57127f15 8812 }
57127f15
JS
8813}
8814
8815
e59058c4 8816/**
3621a710 8817 * lpfc_sli_setup - SLI ring setup function
e59058c4
JS
8818 * @phba: Pointer to HBA context object.
8819 *
8820 * lpfc_sli_setup sets up rings of the SLI interface with
8821 * number of iocbs per ring and iotags. This function is
8822 * called while driver attach to the HBA and before the
8823 * interrupts are enabled. So there is no need for locking.
8824 *
8825 * This function always returns 0.
8826 **/
dea3101e 8827int
8828lpfc_sli_setup(struct lpfc_hba *phba)
8829{
ed957684 8830 int i, totiocbsize = 0;
dea3101e 8831 struct lpfc_sli *psli = &phba->sli;
8832 struct lpfc_sli_ring *pring;
8833
2a76a283
JS
8834 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8835 if (phba->sli_rev == LPFC_SLI_REV4)
67d12733 8836 psli->num_rings += phba->cfg_fcp_io_channel;
dea3101e 8837 psli->sli_flag = 0;
8838 psli->fcp_ring = LPFC_FCP_RING;
8839 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 8840 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 8841
604a3e30
JB
8842 psli->iocbq_lookup = NULL;
8843 psli->iocbq_lookup_len = 0;
8844 psli->last_iotag = 0;
8845
dea3101e 8846 for (i = 0; i < psli->num_rings; i++) {
8847 pring = &psli->ring[i];
8848 switch (i) {
8849 case LPFC_FCP_RING: /* ring 0 - FCP */
8850 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8851 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8852 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8853 pring->sli.sli3.numCiocb +=
8854 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8855 pring->sli.sli3.numRiocb +=
8856 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8857 pring->sli.sli3.numCiocb +=
8858 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8859 pring->sli.sli3.numRiocb +=
8860 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8861 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8862 SLI3_IOCB_CMD_SIZE :
8863 SLI2_IOCB_CMD_SIZE;
7e56aa25 8864 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8865 SLI3_IOCB_RSP_SIZE :
8866 SLI2_IOCB_RSP_SIZE;
dea3101e 8867 pring->iotag_ctr = 0;
8868 pring->iotag_max =
92d7f7b0 8869 (phba->cfg_hba_queue_depth * 2);
dea3101e 8870 pring->fast_iotag = pring->iotag_max;
8871 pring->num_mask = 0;
8872 break;
a4bc3379 8873 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 8874 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8875 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8876 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8877 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8878 SLI3_IOCB_CMD_SIZE :
8879 SLI2_IOCB_CMD_SIZE;
7e56aa25 8880 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8881 SLI3_IOCB_RSP_SIZE :
8882 SLI2_IOCB_RSP_SIZE;
2e0fef85 8883 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 8884 pring->num_mask = 0;
8885 break;
8886 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8887 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
8888 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8889 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8890 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8891 SLI3_IOCB_CMD_SIZE :
8892 SLI2_IOCB_CMD_SIZE;
7e56aa25 8893 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
8894 SLI3_IOCB_RSP_SIZE :
8895 SLI2_IOCB_RSP_SIZE;
dea3101e 8896 pring->fast_iotag = 0;
8897 pring->iotag_ctr = 0;
8898 pring->iotag_max = 4096;
57127f15
JS
8899 pring->lpfc_sli_rcv_async_status =
8900 lpfc_sli_async_event_handler;
6669f9bb 8901 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 8902 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
8903 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8904 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 8905 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 8906 lpfc_els_unsol_event;
dea3101e 8907 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
8908 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8909 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 8910 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 8911 lpfc_els_unsol_event;
dea3101e 8912 pring->prt[2].profile = 0; /* Mask 2 */
8913 /* NameServer Inquiry */
6a9c52cf 8914 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 8915 /* NameServer */
6a9c52cf 8916 pring->prt[2].type = FC_TYPE_CT;
dea3101e 8917 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 8918 lpfc_ct_unsol_event;
dea3101e 8919 pring->prt[3].profile = 0; /* Mask 3 */
8920 /* NameServer response */
6a9c52cf 8921 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 8922 /* NameServer */
6a9c52cf 8923 pring->prt[3].type = FC_TYPE_CT;
dea3101e 8924 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 8925 lpfc_ct_unsol_event;
dea3101e 8926 break;
8927 }
7e56aa25
JS
8928 totiocbsize += (pring->sli.sli3.numCiocb *
8929 pring->sli.sli3.sizeCiocb) +
8930 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 8931 }
ed957684 8932 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 8933 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
8934 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8935 "SLI2 SLIM Data: x%x x%lx\n",
8936 phba->brd_no, totiocbsize,
8937 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 8938 }
cf5bf97e
JW
8939 if (phba->cfg_multi_ring_support == 2)
8940 lpfc_extra_ring_setup(phba);
dea3101e 8941
8942 return 0;
8943}
8944
e59058c4 8945/**
3621a710 8946 * lpfc_sli_queue_setup - Queue initialization function
e59058c4
JS
8947 * @phba: Pointer to HBA context object.
8948 *
8949 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8950 * ring. This function also initializes ring indices of each ring.
8951 * This function is called during the initialization of the SLI
8952 * interface of an HBA.
8953 * This function is called with no lock held and always returns
8954 * 1.
8955 **/
dea3101e 8956int
2e0fef85 8957lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e 8958{
8959 struct lpfc_sli *psli;
8960 struct lpfc_sli_ring *pring;
604a3e30 8961 int i;
dea3101e 8962
8963 psli = &phba->sli;
2e0fef85 8964 spin_lock_irq(&phba->hbalock);
dea3101e 8965 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 8966 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 8967 /* Initialize list headers for txq and txcmplq as double linked lists */
8968 for (i = 0; i < psli->num_rings; i++) {
8969 pring = &psli->ring[i];
8970 pring->ringno = i;
7e56aa25
JS
8971 pring->sli.sli3.next_cmdidx = 0;
8972 pring->sli.sli3.local_getidx = 0;
8973 pring->sli.sli3.cmdidx = 0;
dea3101e 8974 INIT_LIST_HEAD(&pring->txq);
8975 INIT_LIST_HEAD(&pring->txcmplq);
8976 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 8977 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 8978 INIT_LIST_HEAD(&pring->postbufq);
7e56aa25 8979 spin_lock_init(&pring->ring_lock);
dea3101e 8980 }
2e0fef85
JS
8981 spin_unlock_irq(&phba->hbalock);
8982 return 1;
dea3101e 8983}
8984
04c68496
JS
8985/**
8986 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8987 * @phba: Pointer to HBA context object.
8988 *
8989 * This routine flushes the mailbox command subsystem. It will unconditionally
8990 * flush all the mailbox commands in the three possible stages in the mailbox
8991 * command sub-system: pending mailbox command queue; the outstanding mailbox
8992 * command; and completed mailbox command queue. It is caller's responsibility
8993 * to make sure that the driver is in the proper state to flush the mailbox
8994 * command sub-system. Namely, the posting of mailbox commands into the
8995 * pending mailbox command queue from the various clients must be stopped;
8996 * either the HBA is in a state that it will never works on the outstanding
8997 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8998 * mailbox command has been completed.
8999 **/
9000static void
9001lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9002{
9003 LIST_HEAD(completions);
9004 struct lpfc_sli *psli = &phba->sli;
9005 LPFC_MBOXQ_t *pmb;
9006 unsigned long iflag;
9007
9008 /* Flush all the mailbox commands in the mbox system */
9009 spin_lock_irqsave(&phba->hbalock, iflag);
9010 /* The pending mailbox command queue */
9011 list_splice_init(&phba->sli.mboxq, &completions);
9012 /* The outstanding active mailbox command */
9013 if (psli->mbox_active) {
9014 list_add_tail(&psli->mbox_active->list, &completions);
9015 psli->mbox_active = NULL;
9016 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9017 }
9018 /* The completed mailbox command queue */
9019 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9020 spin_unlock_irqrestore(&phba->hbalock, iflag);
9021
9022 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9023 while (!list_empty(&completions)) {
9024 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9025 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9026 if (pmb->mbox_cmpl)
9027 pmb->mbox_cmpl(phba, pmb);
9028 }
9029}
9030
e59058c4 9031/**
3621a710 9032 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
9033 * @vport: Pointer to virtual port object.
9034 *
9035 * lpfc_sli_host_down is called to clean up the resources
9036 * associated with a vport before destroying virtual
9037 * port data structures.
9038 * This function does following operations:
9039 * - Free discovery resources associated with this virtual
9040 * port.
9041 * - Free iocbs associated with this virtual port in
9042 * the txq.
9043 * - Send abort for all iocb commands associated with this
9044 * vport in txcmplq.
9045 *
9046 * This function is called with no lock held and always returns 1.
9047 **/
92d7f7b0
JS
9048int
9049lpfc_sli_host_down(struct lpfc_vport *vport)
9050{
858c9f6c 9051 LIST_HEAD(completions);
92d7f7b0
JS
9052 struct lpfc_hba *phba = vport->phba;
9053 struct lpfc_sli *psli = &phba->sli;
9054 struct lpfc_sli_ring *pring;
9055 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
9056 int i;
9057 unsigned long flags = 0;
9058 uint16_t prev_pring_flag;
9059
9060 lpfc_cleanup_discovery_resources(vport);
9061
9062 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
9063 for (i = 0; i < psli->num_rings; i++) {
9064 pring = &psli->ring[i];
9065 prev_pring_flag = pring->flag;
5e9d9b82
JS
9066 /* Only slow rings */
9067 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9068 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9069 /* Set the lpfc data pending flag */
9070 set_bit(LPFC_DATA_READY, &phba->data_flags);
9071 }
92d7f7b0
JS
9072 /*
9073 * Error everything on the txq since these iocbs have not been
9074 * given to the FW yet.
9075 */
92d7f7b0
JS
9076 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9077 if (iocb->vport != vport)
9078 continue;
858c9f6c 9079 list_move_tail(&iocb->list, &completions);
92d7f7b0
JS
9080 }
9081
9082 /* Next issue ABTS for everything on the txcmplq */
9083 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9084 list) {
9085 if (iocb->vport != vport)
9086 continue;
9087 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9088 }
9089
9090 pring->flag = prev_pring_flag;
9091 }
9092
9093 spin_unlock_irqrestore(&phba->hbalock, flags);
9094
a257bf90
JS
9095 /* Cancel all the IOCBs from the completions list */
9096 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9097 IOERR_SLI_DOWN);
92d7f7b0
JS
9098 return 1;
9099}
9100
e59058c4 9101/**
3621a710 9102 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
9103 * @phba: Pointer to HBA context object.
9104 *
9105 * This function cleans up all iocb, buffers, mailbox commands
9106 * while shutting down the HBA. This function is called with no
9107 * lock held and always returns 1.
9108 * This function does the following to cleanup driver resources:
9109 * - Free discovery resources for each virtual port
9110 * - Cleanup any pending fabric iocbs
9111 * - Iterate through the iocb txq and free each entry
9112 * in the list.
9113 * - Free up any buffer posted to the HBA
9114 * - Free mailbox commands in the mailbox queue.
9115 **/
dea3101e 9116int
2e0fef85 9117lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 9118{
2534ba75 9119 LIST_HEAD(completions);
2e0fef85 9120 struct lpfc_sli *psli = &phba->sli;
dea3101e 9121 struct lpfc_sli_ring *pring;
0ff10d46 9122 struct lpfc_dmabuf *buf_ptr;
dea3101e 9123 unsigned long flags = 0;
04c68496
JS
9124 int i;
9125
9126 /* Shutdown the mailbox command sub-system */
618a5230 9127 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 9128
dea3101e 9129 lpfc_hba_down_prep(phba);
9130
92d7f7b0
JS
9131 lpfc_fabric_abort_hba(phba);
9132
2e0fef85 9133 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 9134 for (i = 0; i < psli->num_rings; i++) {
9135 pring = &psli->ring[i];
5e9d9b82
JS
9136 /* Only slow rings */
9137 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 9138 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
9139 /* Set the lpfc data pending flag */
9140 set_bit(LPFC_DATA_READY, &phba->data_flags);
9141 }
dea3101e 9142
9143 /*
9144 * Error everything on the txq since these iocbs have not been
9145 * given to the FW yet.
9146 */
2534ba75 9147 list_splice_init(&pring->txq, &completions);
2534ba75 9148 }
2e0fef85 9149 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 9150
a257bf90
JS
9151 /* Cancel all the IOCBs from the completions list */
9152 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9153 IOERR_SLI_DOWN);
dea3101e 9154
0ff10d46
JS
9155 spin_lock_irqsave(&phba->hbalock, flags);
9156 list_splice_init(&phba->elsbuf, &completions);
9157 phba->elsbuf_cnt = 0;
9158 phba->elsbuf_prev_cnt = 0;
9159 spin_unlock_irqrestore(&phba->hbalock, flags);
9160
9161 while (!list_empty(&completions)) {
9162 list_remove_head(&completions, buf_ptr,
9163 struct lpfc_dmabuf, list);
9164 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9165 kfree(buf_ptr);
9166 }
9167
dea3101e 9168 /* Return any active mbox cmds */
9169 del_timer_sync(&psli->mbox_tmo);
2e0fef85 9170
da0436e9 9171 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 9172 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 9173 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 9174
da0436e9
JS
9175 return 1;
9176}
9177
e59058c4 9178/**
3621a710 9179 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
9180 * @srcp: Source memory pointer.
9181 * @destp: Destination memory pointer.
9182 * @cnt: Number of words required to be copied.
9183 *
9184 * This function is used for copying data between driver memory
9185 * and the SLI memory. This function also changes the endianness
9186 * of each word if native endianness is different from SLI
9187 * endianness. This function can be called with or without
9188 * lock.
9189 **/
dea3101e 9190void
9191lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9192{
9193 uint32_t *src = srcp;
9194 uint32_t *dest = destp;
9195 uint32_t ldata;
9196 int i;
9197
9198 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9199 ldata = *src;
9200 ldata = le32_to_cpu(ldata);
9201 *dest = ldata;
9202 src++;
9203 dest++;
9204 }
9205}
9206
e59058c4 9207
a0c87cbd
JS
9208/**
9209 * lpfc_sli_bemem_bcopy - SLI memory copy function
9210 * @srcp: Source memory pointer.
9211 * @destp: Destination memory pointer.
9212 * @cnt: Number of words required to be copied.
9213 *
9214 * This function is used for copying data between a data structure
9215 * with big endian representation to local endianness.
9216 * This function can be called with or without lock.
9217 **/
9218void
9219lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9220{
9221 uint32_t *src = srcp;
9222 uint32_t *dest = destp;
9223 uint32_t ldata;
9224 int i;
9225
9226 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9227 ldata = *src;
9228 ldata = be32_to_cpu(ldata);
9229 *dest = ldata;
9230 src++;
9231 dest++;
9232 }
9233}
9234
e59058c4 9235/**
3621a710 9236 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
9237 * @phba: Pointer to HBA context object.
9238 * @pring: Pointer to driver SLI ring object.
9239 * @mp: Pointer to driver buffer object.
9240 *
9241 * This function is called with no lock held.
9242 * It always return zero after adding the buffer to the postbufq
9243 * buffer list.
9244 **/
dea3101e 9245int
2e0fef85
JS
9246lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9247 struct lpfc_dmabuf *mp)
dea3101e 9248{
9249 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9250 later */
2e0fef85 9251 spin_lock_irq(&phba->hbalock);
dea3101e 9252 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 9253 pring->postbufq_cnt++;
2e0fef85 9254 spin_unlock_irq(&phba->hbalock);
dea3101e 9255 return 0;
9256}
9257
e59058c4 9258/**
3621a710 9259 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
9260 * @phba: Pointer to HBA context object.
9261 *
9262 * When HBQ is enabled, buffers are searched based on tags. This function
9263 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9264 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9265 * does not conflict with tags of buffer posted for unsolicited events.
9266 * The function returns the allocated tag. The function is called with
9267 * no locks held.
9268 **/
76bb24ef
JS
9269uint32_t
9270lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9271{
9272 spin_lock_irq(&phba->hbalock);
9273 phba->buffer_tag_count++;
9274 /*
9275 * Always set the QUE_BUFTAG_BIT to distiguish between
9276 * a tag assigned by HBQ.
9277 */
9278 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9279 spin_unlock_irq(&phba->hbalock);
9280 return phba->buffer_tag_count;
9281}
9282
e59058c4 9283/**
3621a710 9284 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
9285 * @phba: Pointer to HBA context object.
9286 * @pring: Pointer to driver SLI ring object.
9287 * @tag: Buffer tag.
9288 *
9289 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9290 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9291 * iocb is posted to the response ring with the tag of the buffer.
9292 * This function searches the pring->postbufq list using the tag
9293 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9294 * iocb. If the buffer is found then lpfc_dmabuf object of the
9295 * buffer is returned to the caller else NULL is returned.
9296 * This function is called with no lock held.
9297 **/
76bb24ef
JS
9298struct lpfc_dmabuf *
9299lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9300 uint32_t tag)
9301{
9302 struct lpfc_dmabuf *mp, *next_mp;
9303 struct list_head *slp = &pring->postbufq;
9304
25985edc 9305 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
9306 spin_lock_irq(&phba->hbalock);
9307 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9308 if (mp->buffer_tag == tag) {
9309 list_del_init(&mp->list);
9310 pring->postbufq_cnt--;
9311 spin_unlock_irq(&phba->hbalock);
9312 return mp;
9313 }
9314 }
9315
9316 spin_unlock_irq(&phba->hbalock);
9317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 9318 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
9319 "ring %d Data x%lx x%p x%p x%x\n",
9320 pring->ringno, (unsigned long) tag,
9321 slp->next, slp->prev, pring->postbufq_cnt);
9322
9323 return NULL;
9324}
dea3101e 9325
e59058c4 9326/**
3621a710 9327 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
9328 * @phba: Pointer to HBA context object.
9329 * @pring: Pointer to driver SLI ring object.
9330 * @phys: DMA address of the buffer.
9331 *
9332 * This function searches the buffer list using the dma_address
9333 * of unsolicited event to find the driver's lpfc_dmabuf object
9334 * corresponding to the dma_address. The function returns the
9335 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9336 * This function is called by the ct and els unsolicited event
9337 * handlers to get the buffer associated with the unsolicited
9338 * event.
9339 *
9340 * This function is called with no lock held.
9341 **/
dea3101e 9342struct lpfc_dmabuf *
9343lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9344 dma_addr_t phys)
9345{
9346 struct lpfc_dmabuf *mp, *next_mp;
9347 struct list_head *slp = &pring->postbufq;
9348
25985edc 9349 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 9350 spin_lock_irq(&phba->hbalock);
dea3101e 9351 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9352 if (mp->phys == phys) {
9353 list_del_init(&mp->list);
9354 pring->postbufq_cnt--;
2e0fef85 9355 spin_unlock_irq(&phba->hbalock);
dea3101e 9356 return mp;
9357 }
9358 }
9359
2e0fef85 9360 spin_unlock_irq(&phba->hbalock);
dea3101e 9361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 9362 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 9363 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 9364 pring->ringno, (unsigned long long)phys,
dea3101e 9365 slp->next, slp->prev, pring->postbufq_cnt);
9366 return NULL;
9367}
9368
e59058c4 9369/**
3621a710 9370 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
9371 * @phba: Pointer to HBA context object.
9372 * @cmdiocb: Pointer to driver command iocb object.
9373 * @rspiocb: Pointer to driver response iocb object.
9374 *
9375 * This function is the completion handler for the abort iocbs for
9376 * ELS commands. This function is called from the ELS ring event
9377 * handler with no lock held. This function frees memory resources
9378 * associated with the abort iocb.
9379 **/
dea3101e 9380static void
2e0fef85
JS
9381lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9382 struct lpfc_iocbq *rspiocb)
dea3101e 9383{
2e0fef85 9384 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 9385 uint16_t abort_iotag, abort_context;
ff78d8f9 9386 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
9387
9388 if (irsp->ulpStatus) {
ff78d8f9
JS
9389
9390 /*
9391 * Assume that the port already completed and returned, or
9392 * will return the iocb. Just Log the message.
9393 */
2680eeaa
JS
9394 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9395 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9396
2e0fef85 9397 spin_lock_irq(&phba->hbalock);
45ed1190
JS
9398 if (phba->sli_rev < LPFC_SLI_REV4) {
9399 if (abort_iotag != 0 &&
9400 abort_iotag <= phba->sli.last_iotag)
9401 abort_iocb =
9402 phba->sli.iocbq_lookup[abort_iotag];
9403 } else
9404 /* For sli4 the abort_tag is the XRI,
9405 * so the abort routine puts the iotag of the iocb
9406 * being aborted in the context field of the abort
9407 * IOCB.
9408 */
9409 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 9410
2a9bf3d0
JS
9411 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9412 "0327 Cannot abort els iocb %p "
9413 "with tag %x context %x, abort status %x, "
9414 "abort code %x\n",
9415 abort_iocb, abort_iotag, abort_context,
9416 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 9417
ff78d8f9 9418 spin_unlock_irq(&phba->hbalock);
2680eeaa 9419 }
604a3e30 9420 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 9421 return;
9422}
9423
e59058c4 9424/**
3621a710 9425 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
9426 * @phba: Pointer to HBA context object.
9427 * @cmdiocb: Pointer to driver command iocb object.
9428 * @rspiocb: Pointer to driver response iocb object.
9429 *
9430 * The function is called from SLI ring event handler with no
9431 * lock held. This function is the completion handler for ELS commands
9432 * which are aborted. The function frees memory resources used for
9433 * the aborted ELS commands.
9434 **/
92d7f7b0
JS
9435static void
9436lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9437 struct lpfc_iocbq *rspiocb)
9438{
9439 IOCB_t *irsp = &rspiocb->iocb;
9440
9441 /* ELS cmd tag <ulpIoTag> completes */
9442 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 9443 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 9444 "x%x x%x x%x\n",
e8b62011 9445 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 9446 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
9447 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9448 lpfc_ct_free_iocb(phba, cmdiocb);
9449 else
9450 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
9451 return;
9452}
9453
e59058c4 9454/**
5af5eee7 9455 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
9456 * @phba: Pointer to HBA context object.
9457 * @pring: Pointer to driver SLI ring object.
9458 * @cmdiocb: Pointer to driver command iocb object.
9459 *
5af5eee7
JS
9460 * This function issues an abort iocb for the provided command iocb down to
9461 * the port. Other than the case the outstanding command iocb is an abort
9462 * request, this function issues abort out unconditionally. This function is
9463 * called with hbalock held. The function returns 0 when it fails due to
9464 * memory allocation failure or when the command iocb is an abort request.
e59058c4 9465 **/
5af5eee7
JS
9466static int
9467lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9468 struct lpfc_iocbq *cmdiocb)
dea3101e 9469{
2e0fef85 9470 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 9471 struct lpfc_iocbq *abtsiocbp;
dea3101e 9472 IOCB_t *icmd = NULL;
9473 IOCB_t *iabt = NULL;
5af5eee7 9474 int retval;
7e56aa25 9475 unsigned long iflags;
07951076 9476
92d7f7b0
JS
9477 /*
9478 * There are certain command types we don't want to abort. And we
9479 * don't want to abort commands that are already in the process of
9480 * being aborted.
07951076
JS
9481 */
9482 icmd = &cmdiocb->iocb;
2e0fef85 9483 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
9484 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9485 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
9486 return 0;
9487
dea3101e 9488 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 9489 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 9490 if (abtsiocbp == NULL)
9491 return 0;
dea3101e 9492
07951076 9493 /* This signals the response to set the correct status
341af102 9494 * before calling the completion handler
07951076
JS
9495 */
9496 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9497
dea3101e 9498 iabt = &abtsiocbp->iocb;
07951076
JS
9499 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9500 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 9501 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 9502 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
9503 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9504 }
da0436e9
JS
9505 else
9506 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
9507 iabt->ulpLe = 1;
9508 iabt->ulpClass = icmd->ulpClass;
dea3101e 9509
5ffc266e
JS
9510 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9511 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
341af102
JS
9512 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9513 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9514
2e0fef85 9515 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
9516 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9517 else
9518 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 9519
07951076 9520 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 9521
e8b62011
JS
9522 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9523 "0339 Abort xri x%x, original iotag x%x, "
9524 "abort cmd iotag x%x\n",
2a9bf3d0 9525 iabt->un.acxri.abortIoTag,
e8b62011 9526 iabt->un.acxri.abortContextTag,
2a9bf3d0 9527 abtsiocbp->iotag);
7e56aa25
JS
9528
9529 if (phba->sli_rev == LPFC_SLI_REV4) {
9530 /* Note: both hbalock and ring_lock need to be set here */
9531 spin_lock_irqsave(&pring->ring_lock, iflags);
9532 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9533 abtsiocbp, 0);
9534 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9535 } else {
9536 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9537 abtsiocbp, 0);
9538 }
dea3101e 9539
d7c255b2
JS
9540 if (retval)
9541 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
9542
9543 /*
9544 * Caller to this routine should check for IOCB_ERROR
9545 * and handle it properly. This routine no longer removes
9546 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9547 */
9548 return retval;
9549}
9550
9551/**
9552 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9553 * @phba: Pointer to HBA context object.
9554 * @pring: Pointer to driver SLI ring object.
9555 * @cmdiocb: Pointer to driver command iocb object.
9556 *
9557 * This function issues an abort iocb for the provided command iocb. In case
9558 * of unloading, the abort iocb will not be issued to commands on the ELS
9559 * ring. Instead, the callback function shall be changed to those commands
9560 * so that nothing happens when them finishes. This function is called with
9561 * hbalock held. The function returns 0 when the command iocb is an abort
9562 * request.
9563 **/
9564int
9565lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9566 struct lpfc_iocbq *cmdiocb)
9567{
9568 struct lpfc_vport *vport = cmdiocb->vport;
9569 int retval = IOCB_ERROR;
9570 IOCB_t *icmd = NULL;
9571
9572 /*
9573 * There are certain command types we don't want to abort. And we
9574 * don't want to abort commands that are already in the process of
9575 * being aborted.
9576 */
9577 icmd = &cmdiocb->iocb;
9578 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9579 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9580 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9581 return 0;
9582
9583 /*
9584 * If we're unloading, don't abort iocb on the ELS ring, but change
9585 * the callback so that nothing happens when it finishes.
9586 */
9587 if ((vport->load_flag & FC_UNLOADING) &&
9588 (pring->ringno == LPFC_ELS_RING)) {
9589 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9590 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9591 else
9592 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9593 goto abort_iotag_exit;
9594 }
9595
9596 /* Now, we try to issue the abort to the cmdiocb out */
9597 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9598
07951076 9599abort_iotag_exit:
2e0fef85
JS
9600 /*
9601 * Caller to this routine should check for IOCB_ERROR
9602 * and handle it properly. This routine no longer removes
9603 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 9604 */
2e0fef85 9605 return retval;
dea3101e 9606}
9607
5af5eee7
JS
9608/**
9609 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9610 * @phba: Pointer to HBA context object.
9611 * @pring: Pointer to driver SLI ring object.
9612 *
9613 * This function aborts all iocbs in the given ring and frees all the iocb
9614 * objects in txq. This function issues abort iocbs unconditionally for all
9615 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9616 * to complete before the return of this function. The caller is not required
9617 * to hold any locks.
9618 **/
9619static void
9620lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9621{
9622 LIST_HEAD(completions);
9623 struct lpfc_iocbq *iocb, *next_iocb;
9624
9625 if (pring->ringno == LPFC_ELS_RING)
9626 lpfc_fabric_abort_hba(phba);
9627
9628 spin_lock_irq(&phba->hbalock);
9629
9630 /* Take off all the iocbs on txq for cancelling */
9631 list_splice_init(&pring->txq, &completions);
9632 pring->txq_cnt = 0;
9633
9634 /* Next issue ABTS for everything on the txcmplq */
9635 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9636 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9637
9638 spin_unlock_irq(&phba->hbalock);
9639
9640 /* Cancel all the IOCBs from the completions list */
9641 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9642 IOERR_SLI_ABORTED);
9643}
9644
9645/**
9646 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9647 * @phba: pointer to lpfc HBA data structure.
9648 *
9649 * This routine will abort all pending and outstanding iocbs to an HBA.
9650 **/
9651void
9652lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9653{
9654 struct lpfc_sli *psli = &phba->sli;
9655 struct lpfc_sli_ring *pring;
9656 int i;
9657
9658 for (i = 0; i < psli->num_rings; i++) {
9659 pring = &psli->ring[i];
9660 lpfc_sli_iocb_ring_abort(phba, pring);
9661 }
9662}
9663
e59058c4 9664/**
3621a710 9665 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
9666 * @iocbq: Pointer to driver iocb object.
9667 * @vport: Pointer to driver virtual port object.
9668 * @tgt_id: SCSI ID of the target.
9669 * @lun_id: LUN ID of the scsi device.
9670 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9671 *
3621a710 9672 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
9673 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9674 * 0 if the filtering criteria is met for the given iocb and will return
9675 * 1 if the filtering criteria is not met.
9676 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9677 * given iocb is for the SCSI device specified by vport, tgt_id and
9678 * lun_id parameter.
9679 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9680 * given iocb is for the SCSI target specified by vport and tgt_id
9681 * parameters.
9682 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9683 * given iocb is for the SCSI host associated with the given vport.
9684 * This function is called with no locks held.
9685 **/
dea3101e 9686static int
51ef4c26
JS
9687lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9688 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 9689 lpfc_ctx_cmd ctx_cmd)
dea3101e 9690{
0bd4ca25 9691 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9692 int rc = 1;
9693
0bd4ca25
JSEC
9694 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9695 return rc;
9696
51ef4c26
JS
9697 if (iocbq->vport != vport)
9698 return rc;
9699
0bd4ca25 9700 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 9701
495a714c 9702 if (lpfc_cmd->pCmd == NULL)
dea3101e 9703 return rc;
9704
9705 switch (ctx_cmd) {
9706 case LPFC_CTX_LUN:
495a714c
JS
9707 if ((lpfc_cmd->rdata->pnode) &&
9708 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9709 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 9710 rc = 0;
9711 break;
9712 case LPFC_CTX_TGT:
495a714c
JS
9713 if ((lpfc_cmd->rdata->pnode) &&
9714 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 9715 rc = 0;
9716 break;
dea3101e 9717 case LPFC_CTX_HOST:
9718 rc = 0;
9719 break;
9720 default:
9721 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 9722 __func__, ctx_cmd);
dea3101e 9723 break;
9724 }
9725
9726 return rc;
9727}
9728
e59058c4 9729/**
3621a710 9730 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
9731 * @vport: Pointer to virtual port.
9732 * @tgt_id: SCSI ID of the target.
9733 * @lun_id: LUN ID of the scsi device.
9734 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9735 *
9736 * This function returns number of FCP commands pending for the vport.
9737 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9738 * commands pending on the vport associated with SCSI device specified
9739 * by tgt_id and lun_id parameters.
9740 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9741 * commands pending on the vport associated with SCSI target specified
9742 * by tgt_id parameter.
9743 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9744 * commands pending on the vport.
9745 * This function returns the number of iocbs which satisfy the filter.
9746 * This function is called without any lock held.
9747 **/
dea3101e 9748int
51ef4c26
JS
9749lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9750 lpfc_ctx_cmd ctx_cmd)
dea3101e 9751{
51ef4c26 9752 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9753 struct lpfc_iocbq *iocbq;
9754 int sum, i;
dea3101e 9755
0bd4ca25
JSEC
9756 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9757 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9758
51ef4c26
JS
9759 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9760 ctx_cmd) == 0)
0bd4ca25 9761 sum++;
dea3101e 9762 }
0bd4ca25 9763
dea3101e 9764 return sum;
9765}
9766
e59058c4 9767/**
3621a710 9768 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
9769 * @phba: Pointer to HBA context object
9770 * @cmdiocb: Pointer to command iocb object.
9771 * @rspiocb: Pointer to response iocb object.
9772 *
9773 * This function is called when an aborted FCP iocb completes. This
9774 * function is called by the ring event handler with no lock held.
9775 * This function frees the iocb.
9776 **/
5eb95af0 9777void
2e0fef85
JS
9778lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9779 struct lpfc_iocbq *rspiocb)
5eb95af0 9780{
cb69f7de
JS
9781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9782 "3096 ABORT_XRI_CN completing on xri x%x "
9783 "original iotag x%x, abort cmd iotag x%x "
9784 "status 0x%x, reason 0x%x\n",
9785 cmdiocb->iocb.un.acxri.abortContextTag,
9786 cmdiocb->iocb.un.acxri.abortIoTag,
9787 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9788 rspiocb->iocb.un.ulpWord[4]);
604a3e30 9789 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
9790 return;
9791}
9792
e59058c4 9793/**
3621a710 9794 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
9795 * @vport: Pointer to virtual port.
9796 * @pring: Pointer to driver SLI ring object.
9797 * @tgt_id: SCSI ID of the target.
9798 * @lun_id: LUN ID of the scsi device.
9799 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9800 *
9801 * This function sends an abort command for every SCSI command
9802 * associated with the given virtual port pending on the ring
9803 * filtered by lpfc_sli_validate_fcp_iocb function.
9804 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9805 * FCP iocbs associated with lun specified by tgt_id and lun_id
9806 * parameters
9807 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9808 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9809 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9810 * FCP iocbs associated with virtual port.
9811 * This function returns number of iocbs it failed to abort.
9812 * This function is called with no locks held.
9813 **/
dea3101e 9814int
51ef4c26
JS
9815lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9816 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 9817{
51ef4c26 9818 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
9819 struct lpfc_iocbq *iocbq;
9820 struct lpfc_iocbq *abtsiocb;
dea3101e 9821 IOCB_t *cmd = NULL;
dea3101e 9822 int errcnt = 0, ret_val = 0;
0bd4ca25 9823 int i;
dea3101e 9824
0bd4ca25
JSEC
9825 for (i = 1; i <= phba->sli.last_iotag; i++) {
9826 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 9827
51ef4c26 9828 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 9829 abort_cmd) != 0)
dea3101e 9830 continue;
9831
9832 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 9833 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 9834 if (abtsiocb == NULL) {
9835 errcnt++;
9836 continue;
9837 }
dea3101e 9838
0bd4ca25 9839 cmd = &iocbq->iocb;
dea3101e 9840 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9841 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
9842 if (phba->sli_rev == LPFC_SLI_REV4)
9843 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9844 else
9845 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 9846 abtsiocb->iocb.ulpLe = 1;
9847 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 9848 abtsiocb->vport = phba->pport;
dea3101e 9849
5ffc266e
JS
9850 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9851 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
341af102
JS
9852 if (iocbq->iocb_flag & LPFC_IO_FCP)
9853 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 9854
2e0fef85 9855 if (lpfc_is_link_up(phba))
dea3101e 9856 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9857 else
9858 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9859
5eb95af0
JSEC
9860 /* Setup callback routine and issue the command. */
9861 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
9862 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9863 abtsiocb, 0);
dea3101e 9864 if (ret_val == IOCB_ERROR) {
604a3e30 9865 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 9866 errcnt++;
9867 continue;
9868 }
9869 }
9870
9871 return errcnt;
9872}
9873
e59058c4 9874/**
3621a710 9875 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
9876 * @phba: Pointer to HBA context object.
9877 * @cmdiocbq: Pointer to command iocb.
9878 * @rspiocbq: Pointer to response iocb.
9879 *
9880 * This function is the completion handler for iocbs issued using
9881 * lpfc_sli_issue_iocb_wait function. This function is called by the
9882 * ring event handler function without any lock held. This function
9883 * can be called from both worker thread context and interrupt
9884 * context. This function also can be called from other thread which
9885 * cleans up the SLI layer objects.
9886 * This function copy the contents of the response iocb to the
9887 * response iocb memory object provided by the caller of
9888 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9889 * sleeps for the iocb completion.
9890 **/
68876920
JSEC
9891static void
9892lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9893 struct lpfc_iocbq *cmdiocbq,
9894 struct lpfc_iocbq *rspiocbq)
dea3101e 9895{
68876920
JSEC
9896 wait_queue_head_t *pdone_q;
9897 unsigned long iflags;
0f65ff68 9898 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 9899
2e0fef85 9900 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
9901 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9902 if (cmdiocbq->context2 && rspiocbq)
9903 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9904 &rspiocbq->iocb, sizeof(IOCB_t));
9905
0f65ff68
JS
9906 /* Set the exchange busy flag for task management commands */
9907 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9908 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9909 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9910 cur_iocbq);
9911 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9912 }
9913
68876920 9914 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
9915 if (pdone_q)
9916 wake_up(pdone_q);
858c9f6c 9917 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 9918 return;
9919}
9920
d11e31dd
JS
9921/**
9922 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9923 * @phba: Pointer to HBA context object..
9924 * @piocbq: Pointer to command iocb.
9925 * @flag: Flag to test.
9926 *
9927 * This routine grabs the hbalock and then test the iocb_flag to
9928 * see if the passed in flag is set.
9929 * Returns:
9930 * 1 if flag is set.
9931 * 0 if flag is not set.
9932 **/
9933static int
9934lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9935 struct lpfc_iocbq *piocbq, uint32_t flag)
9936{
9937 unsigned long iflags;
9938 int ret;
9939
9940 spin_lock_irqsave(&phba->hbalock, iflags);
9941 ret = piocbq->iocb_flag & flag;
9942 spin_unlock_irqrestore(&phba->hbalock, iflags);
9943 return ret;
9944
9945}
9946
e59058c4 9947/**
3621a710 9948 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
9949 * @phba: Pointer to HBA context object..
9950 * @pring: Pointer to sli ring.
9951 * @piocb: Pointer to command iocb.
9952 * @prspiocbq: Pointer to response iocb.
9953 * @timeout: Timeout in number of seconds.
9954 *
9955 * This function issues the iocb to firmware and waits for the
9956 * iocb to complete. If the iocb command is not
9957 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9958 * Caller should not free the iocb resources if this function
9959 * returns IOCB_TIMEDOUT.
9960 * The function waits for the iocb completion using an
9961 * non-interruptible wait.
9962 * This function will sleep while waiting for iocb completion.
9963 * So, this function should not be called from any context which
9964 * does not allow sleeping. Due to the same reason, this function
9965 * cannot be called with interrupt disabled.
9966 * This function assumes that the iocb completions occur while
9967 * this function sleep. So, this function cannot be called from
9968 * the thread which process iocb completion for this ring.
9969 * This function clears the iocb_flag of the iocb object before
9970 * issuing the iocb and the iocb completion handler sets this
9971 * flag and wakes this thread when the iocb completes.
9972 * The contents of the response iocb will be copied to prspiocbq
9973 * by the completion handler when the command completes.
9974 * This function returns IOCB_SUCCESS when success.
9975 * This function is called with no lock held.
9976 **/
dea3101e 9977int
2e0fef85 9978lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 9979 uint32_t ring_number,
2e0fef85
JS
9980 struct lpfc_iocbq *piocb,
9981 struct lpfc_iocbq *prspiocbq,
68876920 9982 uint32_t timeout)
dea3101e 9983{
7259f0d0 9984 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
9985 long timeleft, timeout_req = 0;
9986 int retval = IOCB_SUCCESS;
875fbdfe 9987 uint32_t creg_val;
0e9bb8d7
JS
9988 struct lpfc_iocbq *iocb;
9989 int txq_cnt = 0;
9990 int txcmplq_cnt = 0;
2a9bf3d0 9991 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea3101e 9992 /*
68876920
JSEC
9993 * If the caller has provided a response iocbq buffer, then context2
9994 * is NULL or its an error.
dea3101e 9995 */
68876920
JSEC
9996 if (prspiocbq) {
9997 if (piocb->context2)
9998 return IOCB_ERROR;
9999 piocb->context2 = prspiocbq;
dea3101e 10000 }
10001
68876920
JSEC
10002 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10003 piocb->context_un.wait_queue = &done_q;
10004 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 10005
875fbdfe 10006 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
10007 if (lpfc_readl(phba->HCregaddr, &creg_val))
10008 return IOCB_ERROR;
875fbdfe
JSEC
10009 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10010 writel(creg_val, phba->HCregaddr);
10011 readl(phba->HCregaddr); /* flush */
10012 }
10013
2a9bf3d0
JS
10014 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10015 SLI_IOCB_RET_IOCB);
68876920 10016 if (retval == IOCB_SUCCESS) {
256ec0d0 10017 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 10018 timeleft = wait_event_timeout(done_q,
d11e31dd 10019 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 10020 timeout_req);
dea3101e 10021
7054a606
JS
10022 if (piocb->iocb_flag & LPFC_IO_WAKE) {
10023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 10024 "0331 IOCB wake signaled\n");
7054a606 10025 } else if (timeleft == 0) {
68876920 10026 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
10027 "0338 IOCB wait timeout error - no "
10028 "wake response Data x%x\n", timeout);
68876920 10029 retval = IOCB_TIMEDOUT;
7054a606 10030 } else {
68876920 10031 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
10032 "0330 IOCB wake NOT set, "
10033 "Data x%x x%lx\n",
68876920
JSEC
10034 timeout, (timeleft / jiffies));
10035 retval = IOCB_TIMEDOUT;
dea3101e 10036 }
2a9bf3d0 10037 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
10038 if (phba->cfg_log_verbose & LOG_SLI) {
10039 list_for_each_entry(iocb, &pring->txq, list) {
10040 txq_cnt++;
10041 }
10042 list_for_each_entry(iocb, &pring->txcmplq, list) {
10043 txcmplq_cnt++;
10044 }
10045 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10046 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10047 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10048 }
2a9bf3d0 10049 return retval;
68876920
JSEC
10050 } else {
10051 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 10052 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 10053 retval);
68876920 10054 retval = IOCB_ERROR;
dea3101e 10055 }
10056
875fbdfe 10057 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
10058 if (lpfc_readl(phba->HCregaddr, &creg_val))
10059 return IOCB_ERROR;
875fbdfe
JSEC
10060 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10061 writel(creg_val, phba->HCregaddr);
10062 readl(phba->HCregaddr); /* flush */
10063 }
10064
68876920
JSEC
10065 if (prspiocbq)
10066 piocb->context2 = NULL;
10067
10068 piocb->context_un.wait_queue = NULL;
10069 piocb->iocb_cmpl = NULL;
dea3101e 10070 return retval;
10071}
68876920 10072
e59058c4 10073/**
3621a710 10074 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
10075 * @phba: Pointer to HBA context object.
10076 * @pmboxq: Pointer to driver mailbox object.
10077 * @timeout: Timeout in number of seconds.
10078 *
10079 * This function issues the mailbox to firmware and waits for the
10080 * mailbox command to complete. If the mailbox command is not
10081 * completed within timeout seconds, it returns MBX_TIMEOUT.
10082 * The function waits for the mailbox completion using an
10083 * interruptible wait. If the thread is woken up due to a
10084 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10085 * should not free the mailbox resources, if this function returns
10086 * MBX_TIMEOUT.
10087 * This function will sleep while waiting for mailbox completion.
10088 * So, this function should not be called from any context which
10089 * does not allow sleeping. Due to the same reason, this function
10090 * cannot be called with interrupt disabled.
10091 * This function assumes that the mailbox completion occurs while
10092 * this function sleep. So, this function cannot be called from
10093 * the worker thread which processes mailbox completion.
10094 * This function is called in the context of HBA management
10095 * applications.
10096 * This function returns MBX_SUCCESS when successful.
10097 * This function is called with no lock held.
10098 **/
dea3101e 10099int
2e0fef85 10100lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 10101 uint32_t timeout)
10102{
7259f0d0 10103 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 10104 int retval;
858c9f6c 10105 unsigned long flag;
dea3101e 10106
10107 /* The caller must leave context1 empty. */
98c9ea5c 10108 if (pmboxq->context1)
2e0fef85 10109 return MBX_NOT_FINISHED;
dea3101e 10110
495a714c 10111 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 10112 /* setup wake call as IOCB callback */
10113 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10114 /* setup context field to pass wait_queue pointer to wake function */
10115 pmboxq->context1 = &done_q;
10116
dea3101e 10117 /* now issue the command */
10118 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 10119 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
10120 wait_event_interruptible_timeout(done_q,
10121 pmboxq->mbox_flag & LPFC_MBX_WAKE,
256ec0d0 10122 msecs_to_jiffies(timeout * 1000));
7054a606 10123
858c9f6c 10124 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 10125 pmboxq->context1 = NULL;
7054a606
JS
10126 /*
10127 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10128 * else do not free the resources.
10129 */
d7c47992 10130 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 10131 retval = MBX_SUCCESS;
d7c47992
JS
10132 lpfc_sli4_swap_str(phba, pmboxq);
10133 } else {
7054a606 10134 retval = MBX_TIMEOUT;
858c9f6c
JS
10135 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10136 }
10137 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 10138 }
10139
dea3101e 10140 return retval;
10141}
10142
e59058c4 10143/**
3772a991 10144 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
10145 * @phba: Pointer to HBA context.
10146 *
3772a991
JS
10147 * This function is called to shutdown the driver's mailbox sub-system.
10148 * It first marks the mailbox sub-system is in a block state to prevent
10149 * the asynchronous mailbox command from issued off the pending mailbox
10150 * command queue. If the mailbox command sub-system shutdown is due to
10151 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10152 * the mailbox sub-system flush routine to forcefully bring down the
10153 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10154 * as with offline or HBA function reset), this routine will wait for the
10155 * outstanding mailbox command to complete before invoking the mailbox
10156 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 10157 **/
3772a991 10158void
618a5230 10159lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 10160{
3772a991 10161 struct lpfc_sli *psli = &phba->sli;
3772a991 10162 unsigned long timeout;
b4c02652 10163
618a5230
JS
10164 if (mbx_action == LPFC_MBX_NO_WAIT) {
10165 /* delay 100ms for port state */
10166 msleep(100);
10167 lpfc_sli_mbox_sys_flush(phba);
10168 return;
10169 }
a183a15f 10170 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 10171
3772a991
JS
10172 spin_lock_irq(&phba->hbalock);
10173 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 10174
3772a991 10175 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
10176 /* Determine how long we might wait for the active mailbox
10177 * command to be gracefully completed by firmware.
10178 */
a183a15f
JS
10179 if (phba->sli.mbox_active)
10180 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10181 phba->sli.mbox_active) *
10182 1000) + jiffies;
10183 spin_unlock_irq(&phba->hbalock);
10184
3772a991
JS
10185 while (phba->sli.mbox_active) {
10186 /* Check active mailbox complete status every 2ms */
10187 msleep(2);
10188 if (time_after(jiffies, timeout))
10189 /* Timeout, let the mailbox flush routine to
10190 * forcefully release active mailbox command
10191 */
10192 break;
10193 }
d7069f09
JS
10194 } else
10195 spin_unlock_irq(&phba->hbalock);
10196
3772a991
JS
10197 lpfc_sli_mbox_sys_flush(phba);
10198}
ed957684 10199
3772a991
JS
10200/**
10201 * lpfc_sli_eratt_read - read sli-3 error attention events
10202 * @phba: Pointer to HBA context.
10203 *
10204 * This function is called to read the SLI3 device error attention registers
10205 * for possible error attention events. The caller must hold the hostlock
10206 * with spin_lock_irq().
10207 *
25985edc 10208 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
10209 * Register and returns 0 otherwise.
10210 **/
10211static int
10212lpfc_sli_eratt_read(struct lpfc_hba *phba)
10213{
10214 uint32_t ha_copy;
b4c02652 10215
3772a991 10216 /* Read chip Host Attention (HA) register */
9940b97b
JS
10217 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10218 goto unplug_err;
10219
3772a991
JS
10220 if (ha_copy & HA_ERATT) {
10221 /* Read host status register to retrieve error event */
9940b97b
JS
10222 if (lpfc_sli_read_hs(phba))
10223 goto unplug_err;
b4c02652 10224
3772a991
JS
10225 /* Check if there is a deferred error condition is active */
10226 if ((HS_FFER1 & phba->work_hs) &&
10227 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 10228 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 10229 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
10230 /* Clear all interrupt enable conditions */
10231 writel(0, phba->HCregaddr);
10232 readl(phba->HCregaddr);
10233 }
10234
10235 /* Set the driver HA work bitmap */
3772a991
JS
10236 phba->work_ha |= HA_ERATT;
10237 /* Indicate polling handles this ERATT */
10238 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
10239 return 1;
10240 }
10241 return 0;
9940b97b
JS
10242
10243unplug_err:
10244 /* Set the driver HS work bitmap */
10245 phba->work_hs |= UNPLUG_ERR;
10246 /* Set the driver HA work bitmap */
10247 phba->work_ha |= HA_ERATT;
10248 /* Indicate polling handles this ERATT */
10249 phba->hba_flag |= HBA_ERATT_HANDLED;
10250 return 1;
b4c02652
JS
10251}
10252
da0436e9
JS
10253/**
10254 * lpfc_sli4_eratt_read - read sli-4 error attention events
10255 * @phba: Pointer to HBA context.
10256 *
10257 * This function is called to read the SLI4 device error attention registers
10258 * for possible error attention events. The caller must hold the hostlock
10259 * with spin_lock_irq().
10260 *
25985edc 10261 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
10262 * Register and returns 0 otherwise.
10263 **/
10264static int
10265lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10266{
10267 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
10268 uint32_t if_type, portsmphr;
10269 struct lpfc_register portstat_reg;
da0436e9 10270
2fcee4bf
JS
10271 /*
10272 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
10273 * registers for error attention. This can be changed later.
10274 */
2fcee4bf
JS
10275 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10276 switch (if_type) {
10277 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
10278 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10279 &uerr_sta_lo) ||
10280 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10281 &uerr_sta_hi)) {
10282 phba->work_hs |= UNPLUG_ERR;
10283 phba->work_ha |= HA_ERATT;
10284 phba->hba_flag |= HBA_ERATT_HANDLED;
10285 return 1;
10286 }
2fcee4bf
JS
10287 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10288 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10290 "1423 HBA Unrecoverable error: "
10291 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10292 "ue_mask_lo_reg=0x%x, "
10293 "ue_mask_hi_reg=0x%x\n",
10294 uerr_sta_lo, uerr_sta_hi,
10295 phba->sli4_hba.ue_mask_lo,
10296 phba->sli4_hba.ue_mask_hi);
10297 phba->work_status[0] = uerr_sta_lo;
10298 phba->work_status[1] = uerr_sta_hi;
10299 phba->work_ha |= HA_ERATT;
10300 phba->hba_flag |= HBA_ERATT_HANDLED;
10301 return 1;
10302 }
10303 break;
10304 case LPFC_SLI_INTF_IF_TYPE_2:
9940b97b
JS
10305 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10306 &portstat_reg.word0) ||
10307 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10308 &portsmphr)){
10309 phba->work_hs |= UNPLUG_ERR;
10310 phba->work_ha |= HA_ERATT;
10311 phba->hba_flag |= HBA_ERATT_HANDLED;
10312 return 1;
10313 }
2fcee4bf
JS
10314 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10315 phba->work_status[0] =
10316 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10317 phba->work_status[1] =
10318 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 10320 "2885 Port Status Event: "
2fcee4bf
JS
10321 "port status reg 0x%x, "
10322 "port smphr reg 0x%x, "
10323 "error 1=0x%x, error 2=0x%x\n",
10324 portstat_reg.word0,
10325 portsmphr,
10326 phba->work_status[0],
10327 phba->work_status[1]);
10328 phba->work_ha |= HA_ERATT;
10329 phba->hba_flag |= HBA_ERATT_HANDLED;
10330 return 1;
10331 }
10332 break;
10333 case LPFC_SLI_INTF_IF_TYPE_1:
10334 default:
a747c9ce 10335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
10336 "2886 HBA Error Attention on unsupported "
10337 "if type %d.", if_type);
a747c9ce 10338 return 1;
da0436e9 10339 }
2fcee4bf 10340
da0436e9
JS
10341 return 0;
10342}
10343
e59058c4 10344/**
3621a710 10345 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
10346 * @phba: Pointer to HBA context.
10347 *
3772a991 10348 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
10349 * error attention register bit for error attention events.
10350 *
25985edc 10351 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
10352 * Register and returns 0 otherwise.
10353 **/
10354int
10355lpfc_sli_check_eratt(struct lpfc_hba *phba)
10356{
10357 uint32_t ha_copy;
10358
10359 /* If somebody is waiting to handle an eratt, don't process it
10360 * here. The brdkill function will do this.
10361 */
10362 if (phba->link_flag & LS_IGNORE_ERATT)
10363 return 0;
10364
10365 /* Check if interrupt handler handles this ERATT */
10366 spin_lock_irq(&phba->hbalock);
10367 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10368 /* Interrupt handler has handled ERATT */
10369 spin_unlock_irq(&phba->hbalock);
10370 return 0;
10371 }
10372
a257bf90
JS
10373 /*
10374 * If there is deferred error attention, do not check for error
10375 * attention
10376 */
10377 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10378 spin_unlock_irq(&phba->hbalock);
10379 return 0;
10380 }
10381
3772a991
JS
10382 /* If PCI channel is offline, don't process it */
10383 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 10384 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10385 return 0;
10386 }
10387
10388 switch (phba->sli_rev) {
10389 case LPFC_SLI_REV2:
10390 case LPFC_SLI_REV3:
10391 /* Read chip Host Attention (HA) register */
10392 ha_copy = lpfc_sli_eratt_read(phba);
10393 break;
da0436e9 10394 case LPFC_SLI_REV4:
2fcee4bf 10395 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
10396 ha_copy = lpfc_sli4_eratt_read(phba);
10397 break;
3772a991
JS
10398 default:
10399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10400 "0299 Invalid SLI revision (%d)\n",
10401 phba->sli_rev);
10402 ha_copy = 0;
10403 break;
9399627f
JS
10404 }
10405 spin_unlock_irq(&phba->hbalock);
3772a991
JS
10406
10407 return ha_copy;
10408}
10409
10410/**
10411 * lpfc_intr_state_check - Check device state for interrupt handling
10412 * @phba: Pointer to HBA context.
10413 *
10414 * This inline routine checks whether a device or its PCI slot is in a state
10415 * that the interrupt should be handled.
10416 *
10417 * This function returns 0 if the device or the PCI slot is in a state that
10418 * interrupt should be handled, otherwise -EIO.
10419 */
10420static inline int
10421lpfc_intr_state_check(struct lpfc_hba *phba)
10422{
10423 /* If the pci channel is offline, ignore all the interrupts */
10424 if (unlikely(pci_channel_offline(phba->pcidev)))
10425 return -EIO;
10426
10427 /* Update device level interrupt statistics */
10428 phba->sli.slistat.sli_intr++;
10429
10430 /* Ignore all interrupts during initialization. */
10431 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10432 return -EIO;
10433
9399627f
JS
10434 return 0;
10435}
10436
10437/**
3772a991 10438 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
10439 * @irq: Interrupt number.
10440 * @dev_id: The device context pointer.
10441 *
9399627f 10442 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10443 * service routine when device with SLI-3 interface spec is enabled with
10444 * MSI-X multi-message interrupt mode and there are slow-path events in
10445 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10446 * interrupt mode, this function is called as part of the device-level
10447 * interrupt handler. When the PCI slot is in error recovery or the HBA
10448 * is undergoing initialization, the interrupt handler will not process
10449 * the interrupt. The link attention and ELS ring attention events are
10450 * handled by the worker thread. The interrupt handler signals the worker
10451 * thread and returns for these events. This function is called without
10452 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
10453 * structures.
10454 *
10455 * This function returns IRQ_HANDLED when interrupt is handled else it
10456 * returns IRQ_NONE.
e59058c4 10457 **/
dea3101e 10458irqreturn_t
3772a991 10459lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 10460{
2e0fef85 10461 struct lpfc_hba *phba;
a747c9ce 10462 uint32_t ha_copy, hc_copy;
dea3101e 10463 uint32_t work_ha_copy;
10464 unsigned long status;
5b75da2f 10465 unsigned long iflag;
dea3101e 10466 uint32_t control;
10467
92d7f7b0 10468 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
10469 struct lpfc_vport *vport;
10470 struct lpfc_nodelist *ndlp;
10471 struct lpfc_dmabuf *mp;
92d7f7b0
JS
10472 LPFC_MBOXQ_t *pmb;
10473 int rc;
10474
dea3101e 10475 /*
10476 * Get the driver's phba structure from the dev_id and
10477 * assume the HBA is not interrupting.
10478 */
9399627f 10479 phba = (struct lpfc_hba *)dev_id;
dea3101e 10480
10481 if (unlikely(!phba))
10482 return IRQ_NONE;
10483
dea3101e 10484 /*
9399627f
JS
10485 * Stuff needs to be attented to when this function is invoked as an
10486 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 10487 */
9399627f 10488 if (phba->intr_type == MSIX) {
3772a991
JS
10489 /* Check device state for handling interrupt */
10490 if (lpfc_intr_state_check(phba))
9399627f
JS
10491 return IRQ_NONE;
10492 /* Need to read HA REG for slow-path events */
5b75da2f 10493 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10494 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10495 goto unplug_error;
9399627f
JS
10496 /* If somebody is waiting to handle an eratt don't process it
10497 * here. The brdkill function will do this.
10498 */
10499 if (phba->link_flag & LS_IGNORE_ERATT)
10500 ha_copy &= ~HA_ERATT;
10501 /* Check the need for handling ERATT in interrupt handler */
10502 if (ha_copy & HA_ERATT) {
10503 if (phba->hba_flag & HBA_ERATT_HANDLED)
10504 /* ERATT polling has handled ERATT */
10505 ha_copy &= ~HA_ERATT;
10506 else
10507 /* Indicate interrupt handler handles ERATT */
10508 phba->hba_flag |= HBA_ERATT_HANDLED;
10509 }
a257bf90
JS
10510
10511 /*
10512 * If there is deferred error attention, do not check for any
10513 * interrupt.
10514 */
10515 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10516 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10517 return IRQ_NONE;
10518 }
10519
9399627f 10520 /* Clear up only attention source related to slow-path */
9940b97b
JS
10521 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10522 goto unplug_error;
10523
a747c9ce
JS
10524 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10525 HC_LAINT_ENA | HC_ERINT_ENA),
10526 phba->HCregaddr);
9399627f
JS
10527 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10528 phba->HAregaddr);
a747c9ce 10529 writel(hc_copy, phba->HCregaddr);
9399627f 10530 readl(phba->HAregaddr); /* flush */
5b75da2f 10531 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10532 } else
10533 ha_copy = phba->ha_copy;
dea3101e 10534
dea3101e 10535 work_ha_copy = ha_copy & phba->work_ha_mask;
10536
9399627f 10537 if (work_ha_copy) {
dea3101e 10538 if (work_ha_copy & HA_LATT) {
10539 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10540 /*
10541 * Turn off Link Attention interrupts
10542 * until CLEAR_LA done
10543 */
5b75da2f 10544 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10545 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
10546 if (lpfc_readl(phba->HCregaddr, &control))
10547 goto unplug_error;
dea3101e 10548 control &= ~HC_LAINT_ENA;
10549 writel(control, phba->HCregaddr);
10550 readl(phba->HCregaddr); /* flush */
5b75da2f 10551 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10552 }
10553 else
10554 work_ha_copy &= ~HA_LATT;
10555 }
10556
9399627f 10557 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
10558 /*
10559 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10560 * the only slow ring.
10561 */
10562 status = (work_ha_copy &
10563 (HA_RXMASK << (4*LPFC_ELS_RING)));
10564 status >>= (4*LPFC_ELS_RING);
10565 if (status & HA_RXMASK) {
5b75da2f 10566 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
10567 if (lpfc_readl(phba->HCregaddr, &control))
10568 goto unplug_error;
a58cbd52
JS
10569
10570 lpfc_debugfs_slow_ring_trc(phba,
10571 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10572 control, status,
10573 (uint32_t)phba->sli.slistat.sli_intr);
10574
858c9f6c 10575 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
10576 lpfc_debugfs_slow_ring_trc(phba,
10577 "ISR Disable ring:"
10578 "pwork:x%x hawork:x%x wait:x%x",
10579 phba->work_ha, work_ha_copy,
10580 (uint32_t)((unsigned long)
5e9d9b82 10581 &phba->work_waitq));
a58cbd52 10582
858c9f6c
JS
10583 control &=
10584 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 10585 writel(control, phba->HCregaddr);
10586 readl(phba->HCregaddr); /* flush */
dea3101e 10587 }
a58cbd52
JS
10588 else {
10589 lpfc_debugfs_slow_ring_trc(phba,
10590 "ISR slow ring: pwork:"
10591 "x%x hawork:x%x wait:x%x",
10592 phba->work_ha, work_ha_copy,
10593 (uint32_t)((unsigned long)
5e9d9b82 10594 &phba->work_waitq));
a58cbd52 10595 }
5b75da2f 10596 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 10597 }
10598 }
5b75da2f 10599 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 10600 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
10601 if (lpfc_sli_read_hs(phba))
10602 goto unplug_error;
a257bf90
JS
10603 /*
10604 * Check if there is a deferred error condition
10605 * is active
10606 */
10607 if ((HS_FFER1 & phba->work_hs) &&
10608 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
10609 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10610 phba->work_hs)) {
a257bf90
JS
10611 phba->hba_flag |= DEFER_ERATT;
10612 /* Clear all interrupt enable conditions */
10613 writel(0, phba->HCregaddr);
10614 readl(phba->HCregaddr);
10615 }
10616 }
10617
9399627f 10618 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 10619 pmb = phba->sli.mbox_active;
04c68496 10620 pmbox = &pmb->u.mb;
34b02dcd 10621 mbox = phba->mbox;
858c9f6c 10622 vport = pmb->vport;
92d7f7b0
JS
10623
10624 /* First check out the status word */
10625 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10626 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 10627 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
10628 /*
10629 * Stray Mailbox Interrupt, mbxCommand <cmd>
10630 * mbxStatus <status>
10631 */
09372820 10632 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 10633 LOG_SLI,
e8b62011 10634 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
10635 "Interrupt mbxCommand x%x "
10636 "mbxStatus x%x\n",
e8b62011 10637 (vport ? vport->vpi : 0),
92d7f7b0
JS
10638 pmbox->mbxCommand,
10639 pmbox->mbxStatus);
09372820
JS
10640 /* clear mailbox attention bit */
10641 work_ha_copy &= ~HA_MBATT;
10642 } else {
97eab634 10643 phba->sli.mbox_active = NULL;
5b75da2f 10644 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
10645 phba->last_completion_time = jiffies;
10646 del_timer(&phba->sli.mbox_tmo);
09372820
JS
10647 if (pmb->mbox_cmpl) {
10648 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10649 MAILBOX_CMD_SIZE);
7a470277
JS
10650 if (pmb->out_ext_byte_len &&
10651 pmb->context2)
10652 lpfc_sli_pcimem_bcopy(
10653 phba->mbox_ext,
10654 pmb->context2,
10655 pmb->out_ext_byte_len);
09372820
JS
10656 }
10657 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10658 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10659
10660 lpfc_debugfs_disc_trc(vport,
10661 LPFC_DISC_TRC_MBOX_VPORT,
10662 "MBOX dflt rpi: : "
10663 "status:x%x rpi:x%x",
10664 (uint32_t)pmbox->mbxStatus,
10665 pmbox->un.varWords[0], 0);
10666
10667 if (!pmbox->mbxStatus) {
10668 mp = (struct lpfc_dmabuf *)
10669 (pmb->context1);
10670 ndlp = (struct lpfc_nodelist *)
10671 pmb->context2;
10672
10673 /* Reg_LOGIN of dflt RPI was
10674 * successful. new lets get
10675 * rid of the RPI using the
10676 * same mbox buffer.
10677 */
10678 lpfc_unreg_login(phba,
10679 vport->vpi,
10680 pmbox->un.varWords[0],
10681 pmb);
10682 pmb->mbox_cmpl =
10683 lpfc_mbx_cmpl_dflt_rpi;
10684 pmb->context1 = mp;
10685 pmb->context2 = ndlp;
10686 pmb->vport = vport;
58da1ffb
JS
10687 rc = lpfc_sli_issue_mbox(phba,
10688 pmb,
10689 MBX_NOWAIT);
10690 if (rc != MBX_BUSY)
10691 lpfc_printf_log(phba,
10692 KERN_ERR,
10693 LOG_MBOX | LOG_SLI,
d7c255b2 10694 "0350 rc should have"
6a9c52cf 10695 "been MBX_BUSY\n");
3772a991
JS
10696 if (rc != MBX_NOT_FINISHED)
10697 goto send_current_mbox;
09372820 10698 }
858c9f6c 10699 }
5b75da2f
JS
10700 spin_lock_irqsave(
10701 &phba->pport->work_port_lock,
10702 iflag);
09372820
JS
10703 phba->pport->work_port_events &=
10704 ~WORKER_MBOX_TMO;
5b75da2f
JS
10705 spin_unlock_irqrestore(
10706 &phba->pport->work_port_lock,
10707 iflag);
09372820 10708 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 10709 }
97eab634 10710 } else
5b75da2f 10711 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 10712
92d7f7b0
JS
10713 if ((work_ha_copy & HA_MBATT) &&
10714 (phba->sli.mbox_active == NULL)) {
858c9f6c 10715send_current_mbox:
92d7f7b0 10716 /* Process next mailbox command if there is one */
58da1ffb
JS
10717 do {
10718 rc = lpfc_sli_issue_mbox(phba, NULL,
10719 MBX_NOWAIT);
10720 } while (rc == MBX_NOT_FINISHED);
10721 if (rc != MBX_SUCCESS)
10722 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10723 LOG_SLI, "0349 rc should be "
6a9c52cf 10724 "MBX_SUCCESS\n");
92d7f7b0
JS
10725 }
10726
5b75da2f 10727 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 10728 phba->work_ha |= work_ha_copy;
5b75da2f 10729 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 10730 lpfc_worker_wake_up(phba);
dea3101e 10731 }
9399627f 10732 return IRQ_HANDLED;
9940b97b
JS
10733unplug_error:
10734 spin_unlock_irqrestore(&phba->hbalock, iflag);
10735 return IRQ_HANDLED;
dea3101e 10736
3772a991 10737} /* lpfc_sli_sp_intr_handler */
9399627f
JS
10738
10739/**
3772a991 10740 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
10741 * @irq: Interrupt number.
10742 * @dev_id: The device context pointer.
10743 *
10744 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
10745 * service routine when device with SLI-3 interface spec is enabled with
10746 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10747 * ring event in the HBA. However, when the device is enabled with either
10748 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10749 * device-level interrupt handler. When the PCI slot is in error recovery
10750 * or the HBA is undergoing initialization, the interrupt handler will not
10751 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10752 * the intrrupt context. This function is called without any lock held.
10753 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
10754 *
10755 * This function returns IRQ_HANDLED when interrupt is handled else it
10756 * returns IRQ_NONE.
10757 **/
10758irqreturn_t
3772a991 10759lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
10760{
10761 struct lpfc_hba *phba;
10762 uint32_t ha_copy;
10763 unsigned long status;
5b75da2f 10764 unsigned long iflag;
9399627f
JS
10765
10766 /* Get the driver's phba structure from the dev_id and
10767 * assume the HBA is not interrupting.
10768 */
10769 phba = (struct lpfc_hba *) dev_id;
10770
10771 if (unlikely(!phba))
10772 return IRQ_NONE;
10773
10774 /*
10775 * Stuff needs to be attented to when this function is invoked as an
10776 * individual interrupt handler in MSI-X multi-message interrupt mode
10777 */
10778 if (phba->intr_type == MSIX) {
3772a991
JS
10779 /* Check device state for handling interrupt */
10780 if (lpfc_intr_state_check(phba))
9399627f
JS
10781 return IRQ_NONE;
10782 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
10783 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10784 return IRQ_HANDLED;
9399627f 10785 /* Clear up only attention source related to fast-path */
5b75da2f 10786 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
10787 /*
10788 * If there is deferred error attention, do not check for
10789 * any interrupt.
10790 */
10791 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 10792 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
10793 return IRQ_NONE;
10794 }
9399627f
JS
10795 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10796 phba->HAregaddr);
10797 readl(phba->HAregaddr); /* flush */
5b75da2f 10798 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
10799 } else
10800 ha_copy = phba->ha_copy;
dea3101e 10801
10802 /*
9399627f 10803 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 10804 */
9399627f
JS
10805 ha_copy &= ~(phba->work_ha_mask);
10806
10807 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 10808 status >>= (4*LPFC_FCP_RING);
858c9f6c 10809 if (status & HA_RXMASK)
dea3101e 10810 lpfc_sli_handle_fast_ring_event(phba,
10811 &phba->sli.ring[LPFC_FCP_RING],
10812 status);
a4bc3379
JS
10813
10814 if (phba->cfg_multi_ring_support == 2) {
10815 /*
9399627f
JS
10816 * Process all events on extra ring. Take the optimized path
10817 * for extra ring IO.
a4bc3379 10818 */
9399627f 10819 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 10820 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 10821 if (status & HA_RXMASK) {
a4bc3379
JS
10822 lpfc_sli_handle_fast_ring_event(phba,
10823 &phba->sli.ring[LPFC_EXTRA_RING],
10824 status);
10825 }
10826 }
dea3101e 10827 return IRQ_HANDLED;
3772a991 10828} /* lpfc_sli_fp_intr_handler */
9399627f
JS
10829
10830/**
3772a991 10831 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
10832 * @irq: Interrupt number.
10833 * @dev_id: The device context pointer.
10834 *
3772a991
JS
10835 * This function is the HBA device-level interrupt handler to device with
10836 * SLI-3 interface spec, called from the PCI layer when either MSI or
10837 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10838 * requires driver attention. This function invokes the slow-path interrupt
10839 * attention handling function and fast-path interrupt attention handling
10840 * function in turn to process the relevant HBA attention events. This
10841 * function is called without any lock held. It gets the hbalock to access
10842 * and update SLI data structures.
9399627f
JS
10843 *
10844 * This function returns IRQ_HANDLED when interrupt is handled, else it
10845 * returns IRQ_NONE.
10846 **/
10847irqreturn_t
3772a991 10848lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
10849{
10850 struct lpfc_hba *phba;
10851 irqreturn_t sp_irq_rc, fp_irq_rc;
10852 unsigned long status1, status2;
a747c9ce 10853 uint32_t hc_copy;
9399627f
JS
10854
10855 /*
10856 * Get the driver's phba structure from the dev_id and
10857 * assume the HBA is not interrupting.
10858 */
10859 phba = (struct lpfc_hba *) dev_id;
10860
10861 if (unlikely(!phba))
10862 return IRQ_NONE;
10863
3772a991
JS
10864 /* Check device state for handling interrupt */
10865 if (lpfc_intr_state_check(phba))
9399627f
JS
10866 return IRQ_NONE;
10867
10868 spin_lock(&phba->hbalock);
9940b97b
JS
10869 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10870 spin_unlock(&phba->hbalock);
10871 return IRQ_HANDLED;
10872 }
10873
9399627f
JS
10874 if (unlikely(!phba->ha_copy)) {
10875 spin_unlock(&phba->hbalock);
10876 return IRQ_NONE;
10877 } else if (phba->ha_copy & HA_ERATT) {
10878 if (phba->hba_flag & HBA_ERATT_HANDLED)
10879 /* ERATT polling has handled ERATT */
10880 phba->ha_copy &= ~HA_ERATT;
10881 else
10882 /* Indicate interrupt handler handles ERATT */
10883 phba->hba_flag |= HBA_ERATT_HANDLED;
10884 }
10885
a257bf90
JS
10886 /*
10887 * If there is deferred error attention, do not check for any interrupt.
10888 */
10889 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 10890 spin_unlock(&phba->hbalock);
a257bf90
JS
10891 return IRQ_NONE;
10892 }
10893
9399627f 10894 /* Clear attention sources except link and error attentions */
9940b97b
JS
10895 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10896 spin_unlock(&phba->hbalock);
10897 return IRQ_HANDLED;
10898 }
a747c9ce
JS
10899 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10900 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10901 phba->HCregaddr);
9399627f 10902 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 10903 writel(hc_copy, phba->HCregaddr);
9399627f
JS
10904 readl(phba->HAregaddr); /* flush */
10905 spin_unlock(&phba->hbalock);
10906
10907 /*
10908 * Invokes slow-path host attention interrupt handling as appropriate.
10909 */
10910
10911 /* status of events with mailbox and link attention */
10912 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10913
10914 /* status of events with ELS ring */
10915 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10916 status2 >>= (4*LPFC_ELS_RING);
10917
10918 if (status1 || (status2 & HA_RXMASK))
3772a991 10919 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
10920 else
10921 sp_irq_rc = IRQ_NONE;
10922
10923 /*
10924 * Invoke fast-path host attention interrupt handling as appropriate.
10925 */
10926
10927 /* status of events with FCP ring */
10928 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10929 status1 >>= (4*LPFC_FCP_RING);
10930
10931 /* status of events with extra ring */
10932 if (phba->cfg_multi_ring_support == 2) {
10933 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10934 status2 >>= (4*LPFC_EXTRA_RING);
10935 } else
10936 status2 = 0;
10937
10938 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 10939 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
10940 else
10941 fp_irq_rc = IRQ_NONE;
dea3101e 10942
9399627f
JS
10943 /* Return device-level interrupt handling status */
10944 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 10945} /* lpfc_sli_intr_handler */
4f774513
JS
10946
10947/**
10948 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10949 * @phba: pointer to lpfc hba data structure.
10950 *
10951 * This routine is invoked by the worker thread to process all the pending
10952 * SLI4 FCP abort XRI events.
10953 **/
10954void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10955{
10956 struct lpfc_cq_event *cq_event;
10957
10958 /* First, declare the fcp xri abort event has been handled */
10959 spin_lock_irq(&phba->hbalock);
10960 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10961 spin_unlock_irq(&phba->hbalock);
10962 /* Now, handle all the fcp xri abort events */
10963 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10964 /* Get the first event from the head of the event queue */
10965 spin_lock_irq(&phba->hbalock);
10966 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10967 cq_event, struct lpfc_cq_event, list);
10968 spin_unlock_irq(&phba->hbalock);
10969 /* Notify aborted XRI for FCP work queue */
10970 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10971 /* Free the event processed back to the free pool */
10972 lpfc_sli4_cq_event_release(phba, cq_event);
10973 }
10974}
10975
10976/**
10977 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10978 * @phba: pointer to lpfc hba data structure.
10979 *
10980 * This routine is invoked by the worker thread to process all the pending
10981 * SLI4 els abort xri events.
10982 **/
10983void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10984{
10985 struct lpfc_cq_event *cq_event;
10986
10987 /* First, declare the els xri abort event has been handled */
10988 spin_lock_irq(&phba->hbalock);
10989 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10990 spin_unlock_irq(&phba->hbalock);
10991 /* Now, handle all the els xri abort events */
10992 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10993 /* Get the first event from the head of the event queue */
10994 spin_lock_irq(&phba->hbalock);
10995 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10996 cq_event, struct lpfc_cq_event, list);
10997 spin_unlock_irq(&phba->hbalock);
10998 /* Notify aborted XRI for ELS work queue */
10999 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11000 /* Free the event processed back to the free pool */
11001 lpfc_sli4_cq_event_release(phba, cq_event);
11002 }
11003}
11004
341af102
JS
11005/**
11006 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11007 * @phba: pointer to lpfc hba data structure
11008 * @pIocbIn: pointer to the rspiocbq
11009 * @pIocbOut: pointer to the cmdiocbq
11010 * @wcqe: pointer to the complete wcqe
11011 *
11012 * This routine transfers the fields of a command iocbq to a response iocbq
11013 * by copying all the IOCB fields from command iocbq and transferring the
11014 * completion status information from the complete wcqe.
11015 **/
4f774513 11016static void
341af102
JS
11017lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11018 struct lpfc_iocbq *pIocbIn,
4f774513
JS
11019 struct lpfc_iocbq *pIocbOut,
11020 struct lpfc_wcqe_complete *wcqe)
11021{
341af102 11022 unsigned long iflags;
acd6859b 11023 uint32_t status;
4f774513
JS
11024 size_t offset = offsetof(struct lpfc_iocbq, iocb);
11025
11026 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11027 sizeof(struct lpfc_iocbq) - offset);
4f774513 11028 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
11029 status = bf_get(lpfc_wcqe_c_status, wcqe);
11030 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
11031 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11032 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11033 pIocbIn->iocb.un.fcpi.fcpi_parm =
11034 pIocbOut->iocb.un.fcpi.fcpi_parm -
11035 wcqe->total_data_placed;
11036 else
11037 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 11038 else {
4f774513 11039 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e
JS
11040 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
11041 }
341af102 11042
acd6859b
JS
11043 /* Convert BG errors for completion status */
11044 if (status == CQE_STATUS_DI_ERROR) {
11045 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11046
11047 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11048 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11049 else
11050 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11051
11052 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11053 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11054 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11055 BGS_GUARD_ERR_MASK;
11056 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11057 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11058 BGS_APPTAG_ERR_MASK;
11059 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11060 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11061 BGS_REFTAG_ERR_MASK;
11062
11063 /* Check to see if there was any good data before the error */
11064 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11065 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11066 BGS_HI_WATER_MARK_PRESENT_MASK;
11067 pIocbIn->iocb.unsli3.sli3_bg.bghm =
11068 wcqe->total_data_placed;
11069 }
11070
11071 /*
11072 * Set ALL the error bits to indicate we don't know what
11073 * type of error it is.
11074 */
11075 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11076 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11077 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11078 BGS_GUARD_ERR_MASK);
11079 }
11080
341af102
JS
11081 /* Pick up HBA exchange busy condition */
11082 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11083 spin_lock_irqsave(&phba->hbalock, iflags);
11084 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11085 spin_unlock_irqrestore(&phba->hbalock, iflags);
11086 }
4f774513
JS
11087}
11088
45ed1190
JS
11089/**
11090 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11091 * @phba: Pointer to HBA context object.
11092 * @wcqe: Pointer to work-queue completion queue entry.
11093 *
11094 * This routine handles an ELS work-queue completion event and construct
11095 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11096 * discovery engine to handle.
11097 *
11098 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11099 **/
11100static struct lpfc_iocbq *
11101lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11102 struct lpfc_iocbq *irspiocbq)
11103{
11104 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11105 struct lpfc_iocbq *cmdiocbq;
11106 struct lpfc_wcqe_complete *wcqe;
11107 unsigned long iflags;
11108
11109 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 11110 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
11111 pring->stats.iocb_event++;
11112 /* Look up the ELS command IOCB and create pseudo response IOCB */
11113 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11114 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11115 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
11116
11117 if (unlikely(!cmdiocbq)) {
11118 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11119 "0386 ELS complete with no corresponding "
11120 "cmdiocb: iotag (%d)\n",
11121 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11122 lpfc_sli_release_iocbq(phba, irspiocbq);
11123 return NULL;
11124 }
11125
11126 /* Fake the irspiocbq and copy necessary response information */
341af102 11127 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
11128
11129 return irspiocbq;
11130}
11131
04c68496
JS
11132/**
11133 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11134 * @phba: Pointer to HBA context object.
11135 * @cqe: Pointer to mailbox completion queue entry.
11136 *
11137 * This routine process a mailbox completion queue entry with asynchrous
11138 * event.
11139 *
11140 * Return: true if work posted to worker thread, otherwise false.
11141 **/
11142static bool
11143lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11144{
11145 struct lpfc_cq_event *cq_event;
11146 unsigned long iflags;
11147
11148 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11149 "0392 Async Event: word0:x%x, word1:x%x, "
11150 "word2:x%x, word3:x%x\n", mcqe->word0,
11151 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11152
11153 /* Allocate a new internal CQ_EVENT entry */
11154 cq_event = lpfc_sli4_cq_event_alloc(phba);
11155 if (!cq_event) {
11156 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11157 "0394 Failed to allocate CQ_EVENT entry\n");
11158 return false;
11159 }
11160
11161 /* Move the CQE into an asynchronous event entry */
11162 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11163 spin_lock_irqsave(&phba->hbalock, iflags);
11164 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11165 /* Set the async event flag */
11166 phba->hba_flag |= ASYNC_EVENT;
11167 spin_unlock_irqrestore(&phba->hbalock, iflags);
11168
11169 return true;
11170}
11171
11172/**
11173 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11174 * @phba: Pointer to HBA context object.
11175 * @cqe: Pointer to mailbox completion queue entry.
11176 *
11177 * This routine process a mailbox completion queue entry with mailbox
11178 * completion event.
11179 *
11180 * Return: true if work posted to worker thread, otherwise false.
11181 **/
11182static bool
11183lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11184{
11185 uint32_t mcqe_status;
11186 MAILBOX_t *mbox, *pmbox;
11187 struct lpfc_mqe *mqe;
11188 struct lpfc_vport *vport;
11189 struct lpfc_nodelist *ndlp;
11190 struct lpfc_dmabuf *mp;
11191 unsigned long iflags;
11192 LPFC_MBOXQ_t *pmb;
11193 bool workposted = false;
11194 int rc;
11195
11196 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11197 if (!bf_get(lpfc_trailer_completed, mcqe))
11198 goto out_no_mqe_complete;
11199
11200 /* Get the reference to the active mbox command */
11201 spin_lock_irqsave(&phba->hbalock, iflags);
11202 pmb = phba->sli.mbox_active;
11203 if (unlikely(!pmb)) {
11204 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11205 "1832 No pending MBOX command to handle\n");
11206 spin_unlock_irqrestore(&phba->hbalock, iflags);
11207 goto out_no_mqe_complete;
11208 }
11209 spin_unlock_irqrestore(&phba->hbalock, iflags);
11210 mqe = &pmb->u.mqe;
11211 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11212 mbox = phba->mbox;
11213 vport = pmb->vport;
11214
11215 /* Reset heartbeat timer */
11216 phba->last_completion_time = jiffies;
11217 del_timer(&phba->sli.mbox_tmo);
11218
11219 /* Move mbox data to caller's mailbox region, do endian swapping */
11220 if (pmb->mbox_cmpl && mbox)
11221 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 11222
73d91e50
JS
11223 /*
11224 * For mcqe errors, conditionally move a modified error code to
11225 * the mbox so that the error will not be missed.
11226 */
11227 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11228 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11229 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11230 bf_set(lpfc_mqe_status, mqe,
11231 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11232 }
04c68496
JS
11233 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11234 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11235 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11236 "MBOX dflt rpi: status:x%x rpi:x%x",
11237 mcqe_status,
11238 pmbox->un.varWords[0], 0);
11239 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11240 mp = (struct lpfc_dmabuf *)(pmb->context1);
11241 ndlp = (struct lpfc_nodelist *)pmb->context2;
11242 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11243 * RID of the PPI using the same mbox buffer.
11244 */
11245 lpfc_unreg_login(phba, vport->vpi,
11246 pmbox->un.varWords[0], pmb);
11247 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11248 pmb->context1 = mp;
11249 pmb->context2 = ndlp;
11250 pmb->vport = vport;
11251 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11252 if (rc != MBX_BUSY)
11253 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11254 LOG_SLI, "0385 rc should "
11255 "have been MBX_BUSY\n");
11256 if (rc != MBX_NOT_FINISHED)
11257 goto send_current_mbox;
11258 }
11259 }
11260 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11261 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11262 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11263
11264 /* There is mailbox completion work to do */
11265 spin_lock_irqsave(&phba->hbalock, iflags);
11266 __lpfc_mbox_cmpl_put(phba, pmb);
11267 phba->work_ha |= HA_MBATT;
11268 spin_unlock_irqrestore(&phba->hbalock, iflags);
11269 workposted = true;
11270
11271send_current_mbox:
11272 spin_lock_irqsave(&phba->hbalock, iflags);
11273 /* Release the mailbox command posting token */
11274 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11275 /* Setting active mailbox pointer need to be in sync to flag clear */
11276 phba->sli.mbox_active = NULL;
11277 spin_unlock_irqrestore(&phba->hbalock, iflags);
11278 /* Wake up worker thread to post the next pending mailbox command */
11279 lpfc_worker_wake_up(phba);
11280out_no_mqe_complete:
11281 if (bf_get(lpfc_trailer_consumed, mcqe))
11282 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11283 return workposted;
11284}
11285
11286/**
11287 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11288 * @phba: Pointer to HBA context object.
11289 * @cqe: Pointer to mailbox completion queue entry.
11290 *
11291 * This routine process a mailbox completion queue entry, it invokes the
11292 * proper mailbox complete handling or asynchrous event handling routine
11293 * according to the MCQE's async bit.
11294 *
11295 * Return: true if work posted to worker thread, otherwise false.
11296 **/
11297static bool
11298lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11299{
11300 struct lpfc_mcqe mcqe;
11301 bool workposted;
11302
11303 /* Copy the mailbox MCQE and convert endian order as needed */
11304 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11305
11306 /* Invoke the proper event handling routine */
11307 if (!bf_get(lpfc_trailer_async, &mcqe))
11308 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11309 else
11310 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11311 return workposted;
11312}
11313
4f774513
JS
11314/**
11315 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11316 * @phba: Pointer to HBA context object.
2a76a283 11317 * @cq: Pointer to associated CQ
4f774513
JS
11318 * @wcqe: Pointer to work-queue completion queue entry.
11319 *
11320 * This routine handles an ELS work-queue completion event.
11321 *
11322 * Return: true if work posted to worker thread, otherwise false.
11323 **/
11324static bool
2a76a283 11325lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11326 struct lpfc_wcqe_complete *wcqe)
11327{
4f774513
JS
11328 struct lpfc_iocbq *irspiocbq;
11329 unsigned long iflags;
2a76a283 11330 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
11331 int txq_cnt = 0;
11332 int txcmplq_cnt = 0;
11333 int fcp_txcmplq_cnt = 0;
4f774513 11334
45ed1190 11335 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
11336 irspiocbq = lpfc_sli_get_iocbq(phba);
11337 if (!irspiocbq) {
0e9bb8d7
JS
11338 if (!list_empty(&pring->txq))
11339 txq_cnt++;
11340 if (!list_empty(&pring->txcmplq))
11341 txcmplq_cnt++;
11342 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
11343 fcp_txcmplq_cnt++;
4f774513 11344 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
11345 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11346 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
11347 txq_cnt, phba->iocb_cnt,
11348 fcp_txcmplq_cnt,
11349 txcmplq_cnt);
45ed1190 11350 return false;
4f774513 11351 }
4f774513 11352
45ed1190
JS
11353 /* Save off the slow-path queue event for work thread to process */
11354 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 11355 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 11356 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
11357 &phba->sli4_hba.sp_queue_event);
11358 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 11359 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 11360
45ed1190 11361 return true;
4f774513
JS
11362}
11363
11364/**
11365 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11366 * @phba: Pointer to HBA context object.
11367 * @wcqe: Pointer to work-queue completion queue entry.
11368 *
11369 * This routine handles slow-path WQ entry comsumed event by invoking the
11370 * proper WQ release routine to the slow-path WQ.
11371 **/
11372static void
11373lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11374 struct lpfc_wcqe_release *wcqe)
11375{
2e90f4b5
JS
11376 /* sanity check on queue memory */
11377 if (unlikely(!phba->sli4_hba.els_wq))
11378 return;
4f774513
JS
11379 /* Check for the slow-path ELS work queue */
11380 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11381 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11382 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11383 else
11384 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11385 "2579 Slow-path wqe consume event carries "
11386 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11387 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11388 phba->sli4_hba.els_wq->queue_id);
11389}
11390
11391/**
11392 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11393 * @phba: Pointer to HBA context object.
11394 * @cq: Pointer to a WQ completion queue.
11395 * @wcqe: Pointer to work-queue completion queue entry.
11396 *
11397 * This routine handles an XRI abort event.
11398 *
11399 * Return: true if work posted to worker thread, otherwise false.
11400 **/
11401static bool
11402lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11403 struct lpfc_queue *cq,
11404 struct sli4_wcqe_xri_aborted *wcqe)
11405{
11406 bool workposted = false;
11407 struct lpfc_cq_event *cq_event;
11408 unsigned long iflags;
11409
11410 /* Allocate a new internal CQ_EVENT entry */
11411 cq_event = lpfc_sli4_cq_event_alloc(phba);
11412 if (!cq_event) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11414 "0602 Failed to allocate CQ_EVENT entry\n");
11415 return false;
11416 }
11417
11418 /* Move the CQE into the proper xri abort event list */
11419 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11420 switch (cq->subtype) {
11421 case LPFC_FCP:
11422 spin_lock_irqsave(&phba->hbalock, iflags);
11423 list_add_tail(&cq_event->list,
11424 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11425 /* Set the fcp xri abort event flag */
11426 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11427 spin_unlock_irqrestore(&phba->hbalock, iflags);
11428 workposted = true;
11429 break;
11430 case LPFC_ELS:
11431 spin_lock_irqsave(&phba->hbalock, iflags);
11432 list_add_tail(&cq_event->list,
11433 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11434 /* Set the els xri abort event flag */
11435 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11436 spin_unlock_irqrestore(&phba->hbalock, iflags);
11437 workposted = true;
11438 break;
11439 default:
11440 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11441 "0603 Invalid work queue CQE subtype (x%x)\n",
11442 cq->subtype);
11443 workposted = false;
11444 break;
11445 }
11446 return workposted;
11447}
11448
4f774513
JS
11449/**
11450 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11451 * @phba: Pointer to HBA context object.
11452 * @rcqe: Pointer to receive-queue completion queue entry.
11453 *
11454 * This routine process a receive-queue completion queue entry.
11455 *
11456 * Return: true if work posted to worker thread, otherwise false.
11457 **/
11458static bool
4d9ab994 11459lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 11460{
4f774513
JS
11461 bool workposted = false;
11462 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11463 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11464 struct hbq_dmabuf *dma_buf;
7851fe2c 11465 uint32_t status, rq_id;
4f774513
JS
11466 unsigned long iflags;
11467
2e90f4b5
JS
11468 /* sanity check on queue memory */
11469 if (unlikely(!hrq) || unlikely(!drq))
11470 return workposted;
11471
7851fe2c
JS
11472 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11473 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11474 else
11475 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11476 if (rq_id != hrq->queue_id)
4f774513
JS
11477 goto out;
11478
4d9ab994 11479 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
11480 switch (status) {
11481 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11483 "2537 Receive Frame Truncated!!\n");
b84daac9 11484 hrq->RQ_buf_trunc++;
4f774513 11485 case FC_STATUS_RQ_SUCCESS:
5ffc266e 11486 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
11487 spin_lock_irqsave(&phba->hbalock, iflags);
11488 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11489 if (!dma_buf) {
b84daac9 11490 hrq->RQ_no_buf_found++;
4f774513
JS
11491 spin_unlock_irqrestore(&phba->hbalock, iflags);
11492 goto out;
11493 }
b84daac9 11494 hrq->RQ_rcv_buf++;
4d9ab994 11495 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
4f774513 11496 /* save off the frame for the word thread to process */
4d9ab994 11497 list_add_tail(&dma_buf->cq_event.list,
45ed1190 11498 &phba->sli4_hba.sp_queue_event);
4f774513 11499 /* Frame received */
45ed1190 11500 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
11501 spin_unlock_irqrestore(&phba->hbalock, iflags);
11502 workposted = true;
11503 break;
11504 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11505 case FC_STATUS_INSUFF_BUF_FRM_DISC:
b84daac9 11506 hrq->RQ_no_posted_buf++;
4f774513
JS
11507 /* Post more buffers if possible */
11508 spin_lock_irqsave(&phba->hbalock, iflags);
11509 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11510 spin_unlock_irqrestore(&phba->hbalock, iflags);
11511 workposted = true;
11512 break;
11513 }
11514out:
11515 return workposted;
4f774513
JS
11516}
11517
4d9ab994
JS
11518/**
11519 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11520 * @phba: Pointer to HBA context object.
11521 * @cq: Pointer to the completion queue.
11522 * @wcqe: Pointer to a completion queue entry.
11523 *
25985edc 11524 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
11525 * entry.
11526 *
11527 * Return: true if work posted to worker thread, otherwise false.
11528 **/
11529static bool
11530lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11531 struct lpfc_cqe *cqe)
11532{
45ed1190 11533 struct lpfc_cqe cqevt;
4d9ab994
JS
11534 bool workposted = false;
11535
11536 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 11537 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
11538
11539 /* Check and process for different type of WCQE and dispatch */
45ed1190 11540 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 11541 case CQE_CODE_COMPL_WQE:
45ed1190 11542 /* Process the WQ/RQ complete event */
bc73905a 11543 phba->last_completion_time = jiffies;
2a76a283 11544 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 11545 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
11546 break;
11547 case CQE_CODE_RELEASE_WQE:
11548 /* Process the WQ release event */
11549 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 11550 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
11551 break;
11552 case CQE_CODE_XRI_ABORTED:
11553 /* Process the WQ XRI abort event */
bc73905a 11554 phba->last_completion_time = jiffies;
4d9ab994 11555 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 11556 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
11557 break;
11558 case CQE_CODE_RECEIVE:
7851fe2c 11559 case CQE_CODE_RECEIVE_V1:
4d9ab994 11560 /* Process the RQ event */
bc73905a 11561 phba->last_completion_time = jiffies;
4d9ab994 11562 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 11563 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
11564 break;
11565 default:
11566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11567 "0388 Not a valid WCQE code: x%x\n",
45ed1190 11568 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
11569 break;
11570 }
11571 return workposted;
11572}
11573
4f774513
JS
11574/**
11575 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11576 * @phba: Pointer to HBA context object.
11577 * @eqe: Pointer to fast-path event queue entry.
11578 *
11579 * This routine process a event queue entry from the slow-path event queue.
11580 * It will check the MajorCode and MinorCode to determine this is for a
11581 * completion event on a completion queue, if not, an error shall be logged
11582 * and just return. Otherwise, it will get to the corresponding completion
11583 * queue and process all the entries on that completion queue, rearm the
11584 * completion queue, and then return.
11585 *
11586 **/
11587static void
67d12733
JS
11588lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11589 struct lpfc_queue *speq)
4f774513 11590{
67d12733 11591 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
11592 struct lpfc_cqe *cqe;
11593 bool workposted = false;
11594 int ecount = 0;
11595 uint16_t cqid;
11596
4f774513 11597 /* Get the reference to the corresponding CQ */
cb5172ea 11598 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 11599
4f774513
JS
11600 list_for_each_entry(childq, &speq->child_list, list) {
11601 if (childq->queue_id == cqid) {
11602 cq = childq;
11603 break;
11604 }
11605 }
11606 if (unlikely(!cq)) {
75baf696
JS
11607 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11608 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11609 "0365 Slow-path CQ identifier "
11610 "(%d) does not exist\n", cqid);
4f774513
JS
11611 return;
11612 }
11613
11614 /* Process all the entries to the CQ */
11615 switch (cq->type) {
11616 case LPFC_MCQ:
11617 while ((cqe = lpfc_sli4_cq_get(cq))) {
11618 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
73d91e50 11619 if (!(++ecount % cq->entry_repost))
4f774513 11620 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
b84daac9 11621 cq->CQ_mbox++;
4f774513
JS
11622 }
11623 break;
11624 case LPFC_WCQ:
11625 while ((cqe = lpfc_sli4_cq_get(cq))) {
0558056c
JS
11626 if (cq->subtype == LPFC_FCP)
11627 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11628 cqe);
11629 else
11630 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11631 cqe);
73d91e50 11632 if (!(++ecount % cq->entry_repost))
4f774513
JS
11633 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11634 }
b84daac9
JS
11635
11636 /* Track the max number of CQEs processed in 1 EQ */
11637 if (ecount > cq->CQ_max_cqe)
11638 cq->CQ_max_cqe = ecount;
4f774513
JS
11639 break;
11640 default:
11641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11642 "0370 Invalid completion queue type (%d)\n",
11643 cq->type);
11644 return;
11645 }
11646
11647 /* Catch the no cq entry condition, log an error */
11648 if (unlikely(ecount == 0))
11649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11650 "0371 No entry from the CQ: identifier "
11651 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11652
11653 /* In any case, flash and re-arm the RCQ */
11654 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11655
11656 /* wake up worker thread if there are works to be done */
11657 if (workposted)
11658 lpfc_worker_wake_up(phba);
11659}
11660
11661/**
11662 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
11663 * @phba: Pointer to HBA context object.
11664 * @cq: Pointer to associated CQ
11665 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
11666 *
11667 * This routine process a fast-path work queue completion entry from fast-path
11668 * event queue for FCP command response completion.
11669 **/
11670static void
2a76a283 11671lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
11672 struct lpfc_wcqe_complete *wcqe)
11673{
2a76a283 11674 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
11675 struct lpfc_iocbq *cmdiocbq;
11676 struct lpfc_iocbq irspiocbq;
11677 unsigned long iflags;
11678
4f774513
JS
11679 /* Check for response status */
11680 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11681 /* If resource errors reported from HBA, reduce queue
11682 * depth of the SCSI device.
11683 */
e3d2b802
JS
11684 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11685 IOSTAT_LOCAL_REJECT)) &&
11686 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11687 IOERR_NO_RESOURCES))
4f774513 11688 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 11689
4f774513
JS
11690 /* Log the error status */
11691 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11692 "0373 FCP complete error: status=x%x, "
11693 "hw_status=x%x, total_data_specified=%d, "
11694 "parameter=x%x, word3=x%x\n",
11695 bf_get(lpfc_wcqe_c_status, wcqe),
11696 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11697 wcqe->total_data_placed, wcqe->parameter,
11698 wcqe->word3);
11699 }
11700
11701 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
11702 spin_lock_irqsave(&pring->ring_lock, iflags);
11703 pring->stats.iocb_event++;
4f774513
JS
11704 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11705 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 11706 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
11707 if (unlikely(!cmdiocbq)) {
11708 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11709 "0374 FCP complete with no corresponding "
11710 "cmdiocb: iotag (%d)\n",
11711 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11712 return;
11713 }
11714 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11715 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11716 "0375 FCP cmdiocb not callback function "
11717 "iotag: (%d)\n",
11718 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11719 return;
11720 }
11721
11722 /* Fake the irspiocb and copy necessary response information */
341af102 11723 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 11724
0f65ff68
JS
11725 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11726 spin_lock_irqsave(&phba->hbalock, iflags);
11727 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11728 spin_unlock_irqrestore(&phba->hbalock, iflags);
11729 }
11730
4f774513
JS
11731 /* Pass the cmd_iocb and the rsp state to the upper layer */
11732 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11733}
11734
11735/**
11736 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11737 * @phba: Pointer to HBA context object.
11738 * @cq: Pointer to completion queue.
11739 * @wcqe: Pointer to work-queue completion queue entry.
11740 *
11741 * This routine handles an fast-path WQ entry comsumed event by invoking the
11742 * proper WQ release routine to the slow-path WQ.
11743 **/
11744static void
11745lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11746 struct lpfc_wcqe_release *wcqe)
11747{
11748 struct lpfc_queue *childwq;
11749 bool wqid_matched = false;
11750 uint16_t fcp_wqid;
11751
11752 /* Check for fast-path FCP work queue release */
11753 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11754 list_for_each_entry(childwq, &cq->child_list, list) {
11755 if (childwq->queue_id == fcp_wqid) {
11756 lpfc_sli4_wq_release(childwq,
11757 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11758 wqid_matched = true;
11759 break;
11760 }
11761 }
11762 /* Report warning log message if no match found */
11763 if (wqid_matched != true)
11764 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11765 "2580 Fast-path wqe consume event carries "
11766 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11767}
11768
11769/**
11770 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11771 * @cq: Pointer to the completion queue.
11772 * @eqe: Pointer to fast-path completion queue entry.
11773 *
11774 * This routine process a fast-path work queue completion entry from fast-path
11775 * event queue for FCP command response completion.
11776 **/
11777static int
11778lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11779 struct lpfc_cqe *cqe)
11780{
11781 struct lpfc_wcqe_release wcqe;
11782 bool workposted = false;
11783
11784 /* Copy the work queue CQE and convert endian order if needed */
11785 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11786
11787 /* Check and process for different type of WCQE and dispatch */
11788 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11789 case CQE_CODE_COMPL_WQE:
b84daac9 11790 cq->CQ_wq++;
4f774513 11791 /* Process the WQ complete event */
98fc5dd9 11792 phba->last_completion_time = jiffies;
2a76a283 11793 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
11794 (struct lpfc_wcqe_complete *)&wcqe);
11795 break;
11796 case CQE_CODE_RELEASE_WQE:
b84daac9 11797 cq->CQ_release_wqe++;
4f774513
JS
11798 /* Process the WQ release event */
11799 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11800 (struct lpfc_wcqe_release *)&wcqe);
11801 break;
11802 case CQE_CODE_XRI_ABORTED:
b84daac9 11803 cq->CQ_xri_aborted++;
4f774513 11804 /* Process the WQ XRI abort event */
bc73905a 11805 phba->last_completion_time = jiffies;
4f774513
JS
11806 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11807 (struct sli4_wcqe_xri_aborted *)&wcqe);
11808 break;
11809 default:
11810 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11811 "0144 Not a valid WCQE code: x%x\n",
11812 bf_get(lpfc_wcqe_c_code, &wcqe));
11813 break;
11814 }
11815 return workposted;
11816}
11817
11818/**
67d12733 11819 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
11820 * @phba: Pointer to HBA context object.
11821 * @eqe: Pointer to fast-path event queue entry.
11822 *
11823 * This routine process a event queue entry from the fast-path event queue.
11824 * It will check the MajorCode and MinorCode to determine this is for a
11825 * completion event on a completion queue, if not, an error shall be logged
11826 * and just return. Otherwise, it will get to the corresponding completion
11827 * queue and process all the entries on the completion queue, rearm the
11828 * completion queue, and then return.
11829 **/
11830static void
67d12733
JS
11831lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11832 uint32_t qidx)
4f774513
JS
11833{
11834 struct lpfc_queue *cq;
11835 struct lpfc_cqe *cqe;
11836 bool workposted = false;
11837 uint16_t cqid;
11838 int ecount = 0;
11839
cb5172ea 11840 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 11841 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 11842 "0366 Not a valid completion "
4f774513 11843 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
11844 bf_get_le32(lpfc_eqe_major_code, eqe),
11845 bf_get_le32(lpfc_eqe_minor_code, eqe));
4f774513
JS
11846 return;
11847 }
11848
67d12733
JS
11849 /* Get the reference to the corresponding CQ */
11850 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11851
11852 /* Check if this is a Slow path event */
11853 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11854 lpfc_sli4_sp_handle_eqe(phba, eqe,
11855 phba->sli4_hba.hba_eq[qidx]);
11856 return;
11857 }
11858
2e90f4b5
JS
11859 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11860 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11861 "3146 Fast-path completion queues "
11862 "does not exist\n");
11863 return;
11864 }
67d12733 11865 cq = phba->sli4_hba.fcp_cq[qidx];
4f774513 11866 if (unlikely(!cq)) {
75baf696
JS
11867 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11869 "0367 Fast-path completion queue "
67d12733 11870 "(%d) does not exist\n", qidx);
4f774513
JS
11871 return;
11872 }
11873
4f774513
JS
11874 if (unlikely(cqid != cq->queue_id)) {
11875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11876 "0368 Miss-matched fast-path completion "
11877 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11878 cqid, cq->queue_id);
11879 return;
11880 }
11881
11882 /* Process all the entries to the CQ */
11883 while ((cqe = lpfc_sli4_cq_get(cq))) {
11884 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
73d91e50 11885 if (!(++ecount % cq->entry_repost))
4f774513
JS
11886 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11887 }
11888
b84daac9
JS
11889 /* Track the max number of CQEs processed in 1 EQ */
11890 if (ecount > cq->CQ_max_cqe)
11891 cq->CQ_max_cqe = ecount;
11892
4f774513
JS
11893 /* Catch the no cq entry condition */
11894 if (unlikely(ecount == 0))
11895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11896 "0369 No entry from fast-path completion "
11897 "queue fcpcqid=%d\n", cq->queue_id);
11898
11899 /* In any case, flash and re-arm the CQ */
11900 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11901
11902 /* wake up worker thread if there are works to be done */
11903 if (workposted)
11904 lpfc_worker_wake_up(phba);
11905}
11906
11907static void
11908lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11909{
11910 struct lpfc_eqe *eqe;
11911
11912 /* walk all the EQ entries and drop on the floor */
11913 while ((eqe = lpfc_sli4_eq_get(eq)))
11914 ;
11915
11916 /* Clear and re-arm the EQ */
11917 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11918}
11919
11920/**
67d12733 11921 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
11922 * @irq: Interrupt number.
11923 * @dev_id: The device context pointer.
11924 *
11925 * This function is directly called from the PCI layer as an interrupt
11926 * service routine when device with SLI-4 interface spec is enabled with
11927 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11928 * ring event in the HBA. However, when the device is enabled with either
11929 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11930 * device-level interrupt handler. When the PCI slot is in error recovery
11931 * or the HBA is undergoing initialization, the interrupt handler will not
11932 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11933 * the intrrupt context. This function is called without any lock held.
11934 * It gets the hbalock to access and update SLI data structures. Note that,
11935 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11936 * equal to that of FCP CQ index.
11937 *
67d12733
JS
11938 * The link attention and ELS ring attention events are handled
11939 * by the worker thread. The interrupt handler signals the worker thread
11940 * and returns for these events. This function is called without any lock
11941 * held. It gets the hbalock to access and update SLI data structures.
11942 *
4f774513
JS
11943 * This function returns IRQ_HANDLED when interrupt is handled else it
11944 * returns IRQ_NONE.
11945 **/
11946irqreturn_t
67d12733 11947lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
11948{
11949 struct lpfc_hba *phba;
11950 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11951 struct lpfc_queue *fpeq;
11952 struct lpfc_eqe *eqe;
11953 unsigned long iflag;
11954 int ecount = 0;
962bc51b 11955 int fcp_eqidx;
4f774513
JS
11956
11957 /* Get the driver's phba structure from the dev_id */
11958 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11959 phba = fcp_eq_hdl->phba;
11960 fcp_eqidx = fcp_eq_hdl->idx;
11961
11962 if (unlikely(!phba))
11963 return IRQ_NONE;
67d12733 11964 if (unlikely(!phba->sli4_hba.hba_eq))
5350d872 11965 return IRQ_NONE;
4f774513
JS
11966
11967 /* Get to the EQ struct associated with this vector */
67d12733 11968 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
2e90f4b5
JS
11969 if (unlikely(!fpeq))
11970 return IRQ_NONE;
4f774513 11971
ba20c853
JS
11972 if (lpfc_fcp_look_ahead) {
11973 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11974 lpfc_sli4_eq_clr_intr(fpeq);
11975 else {
11976 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11977 return IRQ_NONE;
11978 }
11979 }
11980
4f774513
JS
11981 /* Check device state for handling interrupt */
11982 if (unlikely(lpfc_intr_state_check(phba))) {
b84daac9 11983 fpeq->EQ_badstate++;
4f774513
JS
11984 /* Check again for link_state with lock held */
11985 spin_lock_irqsave(&phba->hbalock, iflag);
11986 if (phba->link_state < LPFC_LINK_DOWN)
11987 /* Flush, clear interrupt, and rearm the EQ */
11988 lpfc_sli4_eq_flush(phba, fpeq);
11989 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853
JS
11990 if (lpfc_fcp_look_ahead)
11991 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
11992 return IRQ_NONE;
11993 }
11994
11995 /*
11996 * Process all the event on FCP fast-path EQ
11997 */
11998 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
67d12733 11999 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
73d91e50 12000 if (!(++ecount % fpeq->entry_repost))
4f774513 12001 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
b84daac9 12002 fpeq->EQ_processed++;
4f774513
JS
12003 }
12004
b84daac9
JS
12005 /* Track the max number of EQEs processed in 1 intr */
12006 if (ecount > fpeq->EQ_max_eqe)
12007 fpeq->EQ_max_eqe = ecount;
12008
4f774513
JS
12009 /* Always clear and re-arm the fast-path EQ */
12010 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12011
12012 if (unlikely(ecount == 0)) {
b84daac9 12013 fpeq->EQ_no_entry++;
ba20c853
JS
12014
12015 if (lpfc_fcp_look_ahead) {
12016 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12017 return IRQ_NONE;
12018 }
12019
4f774513
JS
12020 if (phba->intr_type == MSIX)
12021 /* MSI-X treated interrupt served as no EQ share INT */
12022 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12023 "0358 MSI-X interrupt with no EQE\n");
12024 else
12025 /* Non MSI-X treated on interrupt as EQ share INT */
12026 return IRQ_NONE;
12027 }
12028
ba20c853
JS
12029 if (lpfc_fcp_look_ahead)
12030 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
4f774513
JS
12031 return IRQ_HANDLED;
12032} /* lpfc_sli4_fp_intr_handler */
12033
12034/**
12035 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12036 * @irq: Interrupt number.
12037 * @dev_id: The device context pointer.
12038 *
12039 * This function is the device-level interrupt handler to device with SLI-4
12040 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12041 * interrupt mode is enabled and there is an event in the HBA which requires
12042 * driver attention. This function invokes the slow-path interrupt attention
12043 * handling function and fast-path interrupt attention handling function in
12044 * turn to process the relevant HBA attention events. This function is called
12045 * without any lock held. It gets the hbalock to access and update SLI data
12046 * structures.
12047 *
12048 * This function returns IRQ_HANDLED when interrupt is handled, else it
12049 * returns IRQ_NONE.
12050 **/
12051irqreturn_t
12052lpfc_sli4_intr_handler(int irq, void *dev_id)
12053{
12054 struct lpfc_hba *phba;
67d12733
JS
12055 irqreturn_t hba_irq_rc;
12056 bool hba_handled = false;
962bc51b 12057 int fcp_eqidx;
4f774513
JS
12058
12059 /* Get the driver's phba structure from the dev_id */
12060 phba = (struct lpfc_hba *)dev_id;
12061
12062 if (unlikely(!phba))
12063 return IRQ_NONE;
12064
4f774513
JS
12065 /*
12066 * Invoke fast-path host attention interrupt handling as appropriate.
12067 */
67d12733
JS
12068 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12069 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
4f774513 12070 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
67d12733
JS
12071 if (hba_irq_rc == IRQ_HANDLED)
12072 hba_handled |= true;
4f774513
JS
12073 }
12074
67d12733 12075 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
12076} /* lpfc_sli4_intr_handler */
12077
12078/**
12079 * lpfc_sli4_queue_free - free a queue structure and associated memory
12080 * @queue: The queue structure to free.
12081 *
b595076a 12082 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
12083 * the host resident queue. This function must be called after destroying the
12084 * queue on the HBA.
12085 **/
12086void
12087lpfc_sli4_queue_free(struct lpfc_queue *queue)
12088{
12089 struct lpfc_dmabuf *dmabuf;
12090
12091 if (!queue)
12092 return;
12093
12094 while (!list_empty(&queue->page_list)) {
12095 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12096 list);
49198b37 12097 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
4f774513
JS
12098 dmabuf->virt, dmabuf->phys);
12099 kfree(dmabuf);
12100 }
12101 kfree(queue);
12102 return;
12103}
12104
12105/**
12106 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12107 * @phba: The HBA that this queue is being created on.
12108 * @entry_size: The size of each queue entry for this queue.
12109 * @entry count: The number of entries that this queue will handle.
12110 *
12111 * This function allocates a queue structure and the DMAable memory used for
12112 * the host resident queue. This function must be called before creating the
12113 * queue on the HBA.
12114 **/
12115struct lpfc_queue *
12116lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12117 uint32_t entry_count)
12118{
12119 struct lpfc_queue *queue;
12120 struct lpfc_dmabuf *dmabuf;
12121 int x, total_qe_count;
12122 void *dma_pointer;
cb5172ea 12123 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 12124
cb5172ea
JS
12125 if (!phba->sli4_hba.pc_sli4_params.supported)
12126 hw_page_size = SLI4_PAGE_SIZE;
12127
4f774513
JS
12128 queue = kzalloc(sizeof(struct lpfc_queue) +
12129 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12130 if (!queue)
12131 return NULL;
cb5172ea
JS
12132 queue->page_count = (ALIGN(entry_size * entry_count,
12133 hw_page_size))/hw_page_size;
4f774513
JS
12134 INIT_LIST_HEAD(&queue->list);
12135 INIT_LIST_HEAD(&queue->page_list);
12136 INIT_LIST_HEAD(&queue->child_list);
12137 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12138 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12139 if (!dmabuf)
12140 goto out_fail;
12141 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
cb5172ea 12142 hw_page_size, &dmabuf->phys,
4f774513
JS
12143 GFP_KERNEL);
12144 if (!dmabuf->virt) {
12145 kfree(dmabuf);
12146 goto out_fail;
12147 }
cb5172ea 12148 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12149 dmabuf->buffer_tag = x;
12150 list_add_tail(&dmabuf->list, &queue->page_list);
12151 /* initialize queue's entry array */
12152 dma_pointer = dmabuf->virt;
12153 for (; total_qe_count < entry_count &&
cb5172ea 12154 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
12155 total_qe_count++, dma_pointer += entry_size) {
12156 queue->qe[total_qe_count].address = dma_pointer;
12157 }
12158 }
12159 queue->entry_size = entry_size;
12160 queue->entry_count = entry_count;
73d91e50
JS
12161
12162 /*
12163 * entry_repost is calculated based on the number of entries in the
12164 * queue. This works out except for RQs. If buffers are NOT initially
12165 * posted for every RQE, entry_repost should be adjusted accordingly.
12166 */
12167 queue->entry_repost = (entry_count >> 3);
12168 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12169 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
4f774513
JS
12170 queue->phba = phba;
12171
12172 return queue;
12173out_fail:
12174 lpfc_sli4_queue_free(queue);
12175 return NULL;
12176}
12177
962bc51b
JS
12178/**
12179 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12180 * @phba: HBA structure that indicates port to create a queue on.
12181 * @pci_barset: PCI BAR set flag.
12182 *
12183 * This function shall perform iomap of the specified PCI BAR address to host
12184 * memory address if not already done so and return it. The returned host
12185 * memory address can be NULL.
12186 */
12187static void __iomem *
12188lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12189{
12190 struct pci_dev *pdev;
12191 unsigned long bar_map, bar_map_len;
12192
12193 if (!phba->pcidev)
12194 return NULL;
12195 else
12196 pdev = phba->pcidev;
12197
12198 switch (pci_barset) {
12199 case WQ_PCI_BAR_0_AND_1:
12200 if (!phba->pci_bar0_memmap_p) {
12201 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12202 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12203 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12204 }
12205 return phba->pci_bar0_memmap_p;
12206 case WQ_PCI_BAR_2_AND_3:
12207 if (!phba->pci_bar2_memmap_p) {
12208 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12209 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12210 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12211 }
12212 return phba->pci_bar2_memmap_p;
12213 case WQ_PCI_BAR_4_AND_5:
12214 if (!phba->pci_bar4_memmap_p) {
12215 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12216 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12217 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12218 }
12219 return phba->pci_bar4_memmap_p;
12220 default:
12221 break;
12222 }
12223 return NULL;
12224}
12225
173edbb2
JS
12226/**
12227 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12228 * @phba: HBA structure that indicates port to create a queue on.
12229 * @startq: The starting FCP EQ to modify
12230 *
12231 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12232 *
12233 * The @phba struct is used to send mailbox command to HBA. The @startq
12234 * is used to get the starting FCP EQ to change.
12235 * This function is asynchronous and will wait for the mailbox
12236 * command to finish before continuing.
12237 *
12238 * On success this function will return a zero. If unable to allocate enough
12239 * memory this function will return -ENOMEM. If the queue create mailbox command
12240 * fails this function will return -ENXIO.
12241 **/
12242uint32_t
12243lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12244{
12245 struct lpfc_mbx_modify_eq_delay *eq_delay;
12246 LPFC_MBOXQ_t *mbox;
12247 struct lpfc_queue *eq;
12248 int cnt, rc, length, status = 0;
12249 uint32_t shdr_status, shdr_add_status;
ee02006b 12250 uint32_t result;
173edbb2
JS
12251 int fcp_eqidx;
12252 union lpfc_sli4_cfg_shdr *shdr;
12253 uint16_t dmult;
12254
67d12733 12255 if (startq >= phba->cfg_fcp_io_channel)
173edbb2
JS
12256 return 0;
12257
12258 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12259 if (!mbox)
12260 return -ENOMEM;
12261 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12262 sizeof(struct lpfc_sli4_cfg_mhdr));
12263 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12264 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12265 length, LPFC_SLI4_MBX_EMBED);
12266 eq_delay = &mbox->u.mqe.un.eq_delay;
12267
12268 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12269 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12270 if (result > LPFC_DMULT_CONST)
12271 dmult = 0;
12272 else
12273 dmult = LPFC_DMULT_CONST/result - 1;
173edbb2
JS
12274
12275 cnt = 0;
67d12733 12276 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
173edbb2 12277 fcp_eqidx++) {
67d12733 12278 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
173edbb2
JS
12279 if (!eq)
12280 continue;
12281 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12282 eq_delay->u.request.eq[cnt].phase = 0;
12283 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12284 cnt++;
12285 if (cnt >= LPFC_MAX_EQ_DELAY)
12286 break;
12287 }
12288 eq_delay->u.request.num_eq = cnt;
12289
12290 mbox->vport = phba->pport;
12291 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12292 mbox->context1 = NULL;
12293 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12294 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12295 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12296 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12297 if (shdr_status || shdr_add_status || rc) {
12298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12299 "2512 MODIFY_EQ_DELAY mailbox failed with "
12300 "status x%x add_status x%x, mbx status x%x\n",
12301 shdr_status, shdr_add_status, rc);
12302 status = -ENXIO;
12303 }
12304 mempool_free(mbox, phba->mbox_mem_pool);
12305 return status;
12306}
12307
4f774513
JS
12308/**
12309 * lpfc_eq_create - Create an Event Queue on the HBA
12310 * @phba: HBA structure that indicates port to create a queue on.
12311 * @eq: The queue structure to use to create the event queue.
12312 * @imax: The maximum interrupt per second limit.
12313 *
12314 * This function creates an event queue, as detailed in @eq, on a port,
12315 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12316 *
12317 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12318 * is used to get the entry count and entry size that are necessary to
12319 * determine the number of pages to allocate and use for this queue. This
12320 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12321 * event queue. This function is asynchronous and will wait for the mailbox
12322 * command to finish before continuing.
12323 *
12324 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12325 * memory this function will return -ENOMEM. If the queue create mailbox command
12326 * fails this function will return -ENXIO.
4f774513
JS
12327 **/
12328uint32_t
ee02006b 12329lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
12330{
12331 struct lpfc_mbx_eq_create *eq_create;
12332 LPFC_MBOXQ_t *mbox;
12333 int rc, length, status = 0;
12334 struct lpfc_dmabuf *dmabuf;
12335 uint32_t shdr_status, shdr_add_status;
12336 union lpfc_sli4_cfg_shdr *shdr;
12337 uint16_t dmult;
49198b37
JS
12338 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12339
2e90f4b5
JS
12340 /* sanity check on queue memory */
12341 if (!eq)
12342 return -ENODEV;
49198b37
JS
12343 if (!phba->sli4_hba.pc_sli4_params.supported)
12344 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12345
12346 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12347 if (!mbox)
12348 return -ENOMEM;
12349 length = (sizeof(struct lpfc_mbx_eq_create) -
12350 sizeof(struct lpfc_sli4_cfg_mhdr));
12351 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12352 LPFC_MBOX_OPCODE_EQ_CREATE,
12353 length, LPFC_SLI4_MBX_EMBED);
12354 eq_create = &mbox->u.mqe.un.eq_create;
12355 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12356 eq->page_count);
12357 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12358 LPFC_EQE_SIZE);
12359 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12360 /* Calculate delay multiper from maximum interrupt per second */
ee02006b
JS
12361 if (imax > LPFC_DMULT_CONST)
12362 dmult = 0;
12363 else
12364 dmult = LPFC_DMULT_CONST/imax - 1;
4f774513
JS
12365 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12366 dmult);
12367 switch (eq->entry_count) {
12368 default:
12369 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12370 "0360 Unsupported EQ count. (%d)\n",
12371 eq->entry_count);
12372 if (eq->entry_count < 256)
12373 return -EINVAL;
12374 /* otherwise default to smallest count (drop through) */
12375 case 256:
12376 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12377 LPFC_EQ_CNT_256);
12378 break;
12379 case 512:
12380 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12381 LPFC_EQ_CNT_512);
12382 break;
12383 case 1024:
12384 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12385 LPFC_EQ_CNT_1024);
12386 break;
12387 case 2048:
12388 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12389 LPFC_EQ_CNT_2048);
12390 break;
12391 case 4096:
12392 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12393 LPFC_EQ_CNT_4096);
12394 break;
12395 }
12396 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 12397 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12398 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12399 putPaddrLow(dmabuf->phys);
12400 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12401 putPaddrHigh(dmabuf->phys);
12402 }
12403 mbox->vport = phba->pport;
12404 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12405 mbox->context1 = NULL;
12406 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12407 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12408 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12409 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12410 if (shdr_status || shdr_add_status || rc) {
12411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12412 "2500 EQ_CREATE mailbox failed with "
12413 "status x%x add_status x%x, mbx status x%x\n",
12414 shdr_status, shdr_add_status, rc);
12415 status = -ENXIO;
12416 }
12417 eq->type = LPFC_EQ;
12418 eq->subtype = LPFC_NONE;
12419 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12420 if (eq->queue_id == 0xFFFF)
12421 status = -ENXIO;
12422 eq->host_index = 0;
12423 eq->hba_index = 0;
12424
8fa38513 12425 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12426 return status;
12427}
12428
12429/**
12430 * lpfc_cq_create - Create a Completion Queue on the HBA
12431 * @phba: HBA structure that indicates port to create a queue on.
12432 * @cq: The queue structure to use to create the completion queue.
12433 * @eq: The event queue to bind this completion queue to.
12434 *
12435 * This function creates a completion queue, as detailed in @wq, on a port,
12436 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12437 *
12438 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12439 * is used to get the entry count and entry size that are necessary to
12440 * determine the number of pages to allocate and use for this queue. The @eq
12441 * is used to indicate which event queue to bind this completion queue to. This
12442 * function will send the CQ_CREATE mailbox command to the HBA to setup the
12443 * completion queue. This function is asynchronous and will wait for the mailbox
12444 * command to finish before continuing.
12445 *
12446 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12447 * memory this function will return -ENOMEM. If the queue create mailbox command
12448 * fails this function will return -ENXIO.
4f774513
JS
12449 **/
12450uint32_t
12451lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12452 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12453{
12454 struct lpfc_mbx_cq_create *cq_create;
12455 struct lpfc_dmabuf *dmabuf;
12456 LPFC_MBOXQ_t *mbox;
12457 int rc, length, status = 0;
12458 uint32_t shdr_status, shdr_add_status;
12459 union lpfc_sli4_cfg_shdr *shdr;
49198b37
JS
12460 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12461
2e90f4b5
JS
12462 /* sanity check on queue memory */
12463 if (!cq || !eq)
12464 return -ENODEV;
49198b37
JS
12465 if (!phba->sli4_hba.pc_sli4_params.supported)
12466 hw_page_size = SLI4_PAGE_SIZE;
12467
4f774513
JS
12468 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12469 if (!mbox)
12470 return -ENOMEM;
12471 length = (sizeof(struct lpfc_mbx_cq_create) -
12472 sizeof(struct lpfc_sli4_cfg_mhdr));
12473 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12474 LPFC_MBOX_OPCODE_CQ_CREATE,
12475 length, LPFC_SLI4_MBX_EMBED);
12476 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 12477 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
12478 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12479 cq->page_count);
12480 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12481 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
12482 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12483 phba->sli4_hba.pc_sli4_params.cqv);
12484 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
c31098ce
JS
12485 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12486 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
5a6f133e
JS
12487 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12488 eq->queue_id);
12489 } else {
12490 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12491 eq->queue_id);
12492 }
4f774513
JS
12493 switch (cq->entry_count) {
12494 default:
12495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12496 "0361 Unsupported CQ count. (%d)\n",
12497 cq->entry_count);
4f4c1863
JS
12498 if (cq->entry_count < 256) {
12499 status = -EINVAL;
12500 goto out;
12501 }
4f774513
JS
12502 /* otherwise default to smallest count (drop through) */
12503 case 256:
12504 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12505 LPFC_CQ_CNT_256);
12506 break;
12507 case 512:
12508 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12509 LPFC_CQ_CNT_512);
12510 break;
12511 case 1024:
12512 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12513 LPFC_CQ_CNT_1024);
12514 break;
12515 }
12516 list_for_each_entry(dmabuf, &cq->page_list, list) {
49198b37 12517 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
12518 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12519 putPaddrLow(dmabuf->phys);
12520 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12521 putPaddrHigh(dmabuf->phys);
12522 }
12523 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12524
12525 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12526 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12527 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12528 if (shdr_status || shdr_add_status || rc) {
12529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12530 "2501 CQ_CREATE mailbox failed with "
12531 "status x%x add_status x%x, mbx status x%x\n",
12532 shdr_status, shdr_add_status, rc);
12533 status = -ENXIO;
12534 goto out;
12535 }
12536 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12537 if (cq->queue_id == 0xFFFF) {
12538 status = -ENXIO;
12539 goto out;
12540 }
12541 /* link the cq onto the parent eq child list */
12542 list_add_tail(&cq->list, &eq->child_list);
12543 /* Set up completion queue's type and subtype */
12544 cq->type = type;
12545 cq->subtype = subtype;
12546 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 12547 cq->assoc_qid = eq->queue_id;
4f774513
JS
12548 cq->host_index = 0;
12549 cq->hba_index = 0;
4f774513 12550
8fa38513
JS
12551out:
12552 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12553 return status;
12554}
12555
b19a061a
JS
12556/**
12557 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12558 * @phba: HBA structure that indicates port to create a queue on.
12559 * @mq: The queue structure to use to create the mailbox queue.
12560 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12561 * @cq: The completion queue to associate with this cq.
12562 *
12563 * This function provides failback (fb) functionality when the
12564 * mq_create_ext fails on older FW generations. It's purpose is identical
12565 * to mq_create_ext otherwise.
12566 *
12567 * This routine cannot fail as all attributes were previously accessed and
12568 * initialized in mq_create_ext.
12569 **/
12570static void
12571lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12572 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12573{
12574 struct lpfc_mbx_mq_create *mq_create;
12575 struct lpfc_dmabuf *dmabuf;
12576 int length;
12577
12578 length = (sizeof(struct lpfc_mbx_mq_create) -
12579 sizeof(struct lpfc_sli4_cfg_mhdr));
12580 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12581 LPFC_MBOX_OPCODE_MQ_CREATE,
12582 length, LPFC_SLI4_MBX_EMBED);
12583 mq_create = &mbox->u.mqe.un.mq_create;
12584 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12585 mq->page_count);
12586 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12587 cq->queue_id);
12588 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12589 switch (mq->entry_count) {
12590 case 16:
5a6f133e
JS
12591 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12592 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
12593 break;
12594 case 32:
5a6f133e
JS
12595 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12596 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
12597 break;
12598 case 64:
5a6f133e
JS
12599 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12600 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
12601 break;
12602 case 128:
5a6f133e
JS
12603 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12604 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
12605 break;
12606 }
12607 list_for_each_entry(dmabuf, &mq->page_list, list) {
12608 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12609 putPaddrLow(dmabuf->phys);
12610 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12611 putPaddrHigh(dmabuf->phys);
12612 }
12613}
12614
04c68496
JS
12615/**
12616 * lpfc_mq_create - Create a mailbox Queue on the HBA
12617 * @phba: HBA structure that indicates port to create a queue on.
12618 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
12619 * @cq: The completion queue to associate with this cq.
12620 * @subtype: The queue's subtype.
04c68496
JS
12621 *
12622 * This function creates a mailbox queue, as detailed in @mq, on a port,
12623 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12624 *
12625 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12626 * is used to get the entry count and entry size that are necessary to
12627 * determine the number of pages to allocate and use for this queue. This
12628 * function will send the MQ_CREATE mailbox command to the HBA to setup the
12629 * mailbox queue. This function is asynchronous and will wait for the mailbox
12630 * command to finish before continuing.
12631 *
12632 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12633 * memory this function will return -ENOMEM. If the queue create mailbox command
12634 * fails this function will return -ENXIO.
04c68496 12635 **/
b19a061a 12636int32_t
04c68496
JS
12637lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12638 struct lpfc_queue *cq, uint32_t subtype)
12639{
12640 struct lpfc_mbx_mq_create *mq_create;
b19a061a 12641 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
12642 struct lpfc_dmabuf *dmabuf;
12643 LPFC_MBOXQ_t *mbox;
12644 int rc, length, status = 0;
12645 uint32_t shdr_status, shdr_add_status;
12646 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12647 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 12648
2e90f4b5
JS
12649 /* sanity check on queue memory */
12650 if (!mq || !cq)
12651 return -ENODEV;
49198b37
JS
12652 if (!phba->sli4_hba.pc_sli4_params.supported)
12653 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 12654
04c68496
JS
12655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12656 if (!mbox)
12657 return -ENOMEM;
b19a061a 12658 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
12659 sizeof(struct lpfc_sli4_cfg_mhdr));
12660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 12661 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 12662 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
12663
12664 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 12665 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
12666 bf_set(lpfc_mbx_mq_create_ext_num_pages,
12667 &mq_create_ext->u.request, mq->page_count);
12668 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12669 &mq_create_ext->u.request, 1);
12670 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
12671 &mq_create_ext->u.request, 1);
12672 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12673 &mq_create_ext->u.request, 1);
70f3c073
JS
12674 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12675 &mq_create_ext->u.request, 1);
12676 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12677 &mq_create_ext->u.request, 1);
b19a061a 12678 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
12679 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12680 phba->sli4_hba.pc_sli4_params.mqv);
12681 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12682 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12683 cq->queue_id);
12684 else
12685 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12686 cq->queue_id);
04c68496
JS
12687 switch (mq->entry_count) {
12688 default:
12689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12690 "0362 Unsupported MQ count. (%d)\n",
12691 mq->entry_count);
4f4c1863
JS
12692 if (mq->entry_count < 16) {
12693 status = -EINVAL;
12694 goto out;
12695 }
04c68496
JS
12696 /* otherwise default to smallest count (drop through) */
12697 case 16:
5a6f133e
JS
12698 bf_set(lpfc_mq_context_ring_size,
12699 &mq_create_ext->u.request.context,
12700 LPFC_MQ_RING_SIZE_16);
04c68496
JS
12701 break;
12702 case 32:
5a6f133e
JS
12703 bf_set(lpfc_mq_context_ring_size,
12704 &mq_create_ext->u.request.context,
12705 LPFC_MQ_RING_SIZE_32);
04c68496
JS
12706 break;
12707 case 64:
5a6f133e
JS
12708 bf_set(lpfc_mq_context_ring_size,
12709 &mq_create_ext->u.request.context,
12710 LPFC_MQ_RING_SIZE_64);
04c68496
JS
12711 break;
12712 case 128:
5a6f133e
JS
12713 bf_set(lpfc_mq_context_ring_size,
12714 &mq_create_ext->u.request.context,
12715 LPFC_MQ_RING_SIZE_128);
04c68496
JS
12716 break;
12717 }
12718 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 12719 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 12720 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 12721 putPaddrLow(dmabuf->phys);
b19a061a 12722 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
12723 putPaddrHigh(dmabuf->phys);
12724 }
12725 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
12726 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12727 &mq_create_ext->u.response);
12728 if (rc != MBX_SUCCESS) {
12729 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12730 "2795 MQ_CREATE_EXT failed with "
12731 "status x%x. Failback to MQ_CREATE.\n",
12732 rc);
12733 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12734 mq_create = &mbox->u.mqe.un.mq_create;
12735 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12736 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12737 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12738 &mq_create->u.response);
12739 }
12740
04c68496 12741 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
12742 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12743 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12744 if (shdr_status || shdr_add_status || rc) {
12745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12746 "2502 MQ_CREATE mailbox failed with "
12747 "status x%x add_status x%x, mbx status x%x\n",
12748 shdr_status, shdr_add_status, rc);
12749 status = -ENXIO;
12750 goto out;
12751 }
04c68496
JS
12752 if (mq->queue_id == 0xFFFF) {
12753 status = -ENXIO;
12754 goto out;
12755 }
12756 mq->type = LPFC_MQ;
2a622bfb 12757 mq->assoc_qid = cq->queue_id;
04c68496
JS
12758 mq->subtype = subtype;
12759 mq->host_index = 0;
12760 mq->hba_index = 0;
12761
12762 /* link the mq onto the parent cq child list */
12763 list_add_tail(&mq->list, &cq->child_list);
12764out:
8fa38513 12765 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
12766 return status;
12767}
12768
4f774513
JS
12769/**
12770 * lpfc_wq_create - Create a Work Queue on the HBA
12771 * @phba: HBA structure that indicates port to create a queue on.
12772 * @wq: The queue structure to use to create the work queue.
12773 * @cq: The completion queue to bind this work queue to.
12774 * @subtype: The subtype of the work queue indicating its functionality.
12775 *
12776 * This function creates a work queue, as detailed in @wq, on a port, described
12777 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12778 *
12779 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12780 * is used to get the entry count and entry size that are necessary to
12781 * determine the number of pages to allocate and use for this queue. The @cq
12782 * is used to indicate which completion queue to bind this work queue to. This
12783 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12784 * work queue. This function is asynchronous and will wait for the mailbox
12785 * command to finish before continuing.
12786 *
12787 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12788 * memory this function will return -ENOMEM. If the queue create mailbox command
12789 * fails this function will return -ENXIO.
4f774513
JS
12790 **/
12791uint32_t
12792lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12793 struct lpfc_queue *cq, uint32_t subtype)
12794{
12795 struct lpfc_mbx_wq_create *wq_create;
12796 struct lpfc_dmabuf *dmabuf;
12797 LPFC_MBOXQ_t *mbox;
12798 int rc, length, status = 0;
12799 uint32_t shdr_status, shdr_add_status;
12800 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12801 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 12802 struct dma_address *page;
962bc51b
JS
12803 void __iomem *bar_memmap_p;
12804 uint32_t db_offset;
12805 uint16_t pci_barset;
49198b37 12806
2e90f4b5
JS
12807 /* sanity check on queue memory */
12808 if (!wq || !cq)
12809 return -ENODEV;
49198b37
JS
12810 if (!phba->sli4_hba.pc_sli4_params.supported)
12811 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
12812
12813 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12814 if (!mbox)
12815 return -ENOMEM;
12816 length = (sizeof(struct lpfc_mbx_wq_create) -
12817 sizeof(struct lpfc_sli4_cfg_mhdr));
12818 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12819 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12820 length, LPFC_SLI4_MBX_EMBED);
12821 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 12822 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
12823 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12824 wq->page_count);
12825 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12826 cq->queue_id);
5a6f133e
JS
12827 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12828 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 12829
5a6f133e
JS
12830 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12831 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12832 wq->entry_count);
12833 switch (wq->entry_size) {
12834 default:
12835 case 64:
12836 bf_set(lpfc_mbx_wq_create_wqe_size,
12837 &wq_create->u.request_1,
12838 LPFC_WQ_WQE_SIZE_64);
12839 break;
12840 case 128:
12841 bf_set(lpfc_mbx_wq_create_wqe_size,
12842 &wq_create->u.request_1,
12843 LPFC_WQ_WQE_SIZE_128);
12844 break;
12845 }
12846 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12847 (PAGE_SIZE/SLI4_PAGE_SIZE));
12848 page = wq_create->u.request_1.page;
12849 } else {
12850 page = wq_create->u.request.page;
12851 }
4f774513 12852 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 12853 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
12854 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12855 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 12856 }
962bc51b
JS
12857
12858 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12859 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12860
4f774513
JS
12861 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12862 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
12863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12865 if (shdr_status || shdr_add_status || rc) {
12866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12867 "2503 WQ_CREATE mailbox failed with "
12868 "status x%x add_status x%x, mbx status x%x\n",
12869 shdr_status, shdr_add_status, rc);
12870 status = -ENXIO;
12871 goto out;
12872 }
12873 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12874 if (wq->queue_id == 0xFFFF) {
12875 status = -ENXIO;
12876 goto out;
12877 }
962bc51b
JS
12878 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12879 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12880 &wq_create->u.response);
12881 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12882 (wq->db_format != LPFC_DB_RING_FORMAT)) {
12883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12884 "3265 WQ[%d] doorbell format not "
12885 "supported: x%x\n", wq->queue_id,
12886 wq->db_format);
12887 status = -EINVAL;
12888 goto out;
12889 }
12890 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12891 &wq_create->u.response);
12892 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12893 if (!bar_memmap_p) {
12894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12895 "3263 WQ[%d] failed to memmap pci "
12896 "barset:x%x\n", wq->queue_id,
12897 pci_barset);
12898 status = -ENOMEM;
12899 goto out;
12900 }
12901 db_offset = wq_create->u.response.doorbell_offset;
12902 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12903 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12905 "3252 WQ[%d] doorbell offset not "
12906 "supported: x%x\n", wq->queue_id,
12907 db_offset);
12908 status = -EINVAL;
12909 goto out;
12910 }
12911 wq->db_regaddr = bar_memmap_p + db_offset;
12912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12913 "3264 WQ[%d]: barset:x%x, offset:x%x\n",
12914 wq->queue_id, pci_barset, db_offset);
12915 } else {
12916 wq->db_format = LPFC_DB_LIST_FORMAT;
12917 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12918 }
4f774513 12919 wq->type = LPFC_WQ;
2a622bfb 12920 wq->assoc_qid = cq->queue_id;
4f774513
JS
12921 wq->subtype = subtype;
12922 wq->host_index = 0;
12923 wq->hba_index = 0;
ff78d8f9 12924 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
12925
12926 /* link the wq onto the parent cq child list */
12927 list_add_tail(&wq->list, &cq->child_list);
12928out:
8fa38513 12929 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
12930 return status;
12931}
12932
73d91e50
JS
12933/**
12934 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12935 * @phba: HBA structure that indicates port to create a queue on.
12936 * @rq: The queue structure to use for the receive queue.
12937 * @qno: The associated HBQ number
12938 *
12939 *
12940 * For SLI4 we need to adjust the RQ repost value based on
12941 * the number of buffers that are initially posted to the RQ.
12942 */
12943void
12944lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12945{
12946 uint32_t cnt;
12947
2e90f4b5
JS
12948 /* sanity check on queue memory */
12949 if (!rq)
12950 return;
73d91e50
JS
12951 cnt = lpfc_hbq_defs[qno]->entry_count;
12952
12953 /* Recalc repost for RQs based on buffers initially posted */
12954 cnt = (cnt >> 3);
12955 if (cnt < LPFC_QUEUE_MIN_REPOST)
12956 cnt = LPFC_QUEUE_MIN_REPOST;
12957
12958 rq->entry_repost = cnt;
12959}
12960
4f774513
JS
12961/**
12962 * lpfc_rq_create - Create a Receive Queue on the HBA
12963 * @phba: HBA structure that indicates port to create a queue on.
12964 * @hrq: The queue structure to use to create the header receive queue.
12965 * @drq: The queue structure to use to create the data receive queue.
12966 * @cq: The completion queue to bind this work queue to.
12967 *
12968 * This function creates a receive buffer queue pair , as detailed in @hrq and
12969 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12970 * to the HBA.
12971 *
12972 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12973 * struct is used to get the entry count that is necessary to determine the
12974 * number of pages to use for this queue. The @cq is used to indicate which
12975 * completion queue to bind received buffers that are posted to these queues to.
12976 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12977 * receive queue pair. This function is asynchronous and will wait for the
12978 * mailbox command to finish before continuing.
12979 *
12980 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
12981 * memory this function will return -ENOMEM. If the queue create mailbox command
12982 * fails this function will return -ENXIO.
4f774513
JS
12983 **/
12984uint32_t
12985lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12986 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12987{
12988 struct lpfc_mbx_rq_create *rq_create;
12989 struct lpfc_dmabuf *dmabuf;
12990 LPFC_MBOXQ_t *mbox;
12991 int rc, length, status = 0;
12992 uint32_t shdr_status, shdr_add_status;
12993 union lpfc_sli4_cfg_shdr *shdr;
49198b37 12994 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
12995 void __iomem *bar_memmap_p;
12996 uint32_t db_offset;
12997 uint16_t pci_barset;
49198b37 12998
2e90f4b5
JS
12999 /* sanity check on queue memory */
13000 if (!hrq || !drq || !cq)
13001 return -ENODEV;
49198b37
JS
13002 if (!phba->sli4_hba.pc_sli4_params.supported)
13003 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
13004
13005 if (hrq->entry_count != drq->entry_count)
13006 return -EINVAL;
13007 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13008 if (!mbox)
13009 return -ENOMEM;
13010 length = (sizeof(struct lpfc_mbx_rq_create) -
13011 sizeof(struct lpfc_sli4_cfg_mhdr));
13012 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13013 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13014 length, LPFC_SLI4_MBX_EMBED);
13015 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
13016 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13017 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13018 phba->sli4_hba.pc_sli4_params.rqv);
13019 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13020 bf_set(lpfc_rq_context_rqe_count_1,
13021 &rq_create->u.request.context,
13022 hrq->entry_count);
13023 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
13024 bf_set(lpfc_rq_context_rqe_size,
13025 &rq_create->u.request.context,
13026 LPFC_RQE_SIZE_8);
13027 bf_set(lpfc_rq_context_page_size,
13028 &rq_create->u.request.context,
13029 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
13030 } else {
13031 switch (hrq->entry_count) {
13032 default:
13033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13034 "2535 Unsupported RQ count. (%d)\n",
13035 hrq->entry_count);
4f4c1863
JS
13036 if (hrq->entry_count < 512) {
13037 status = -EINVAL;
13038 goto out;
13039 }
5a6f133e
JS
13040 /* otherwise default to smallest count (drop through) */
13041 case 512:
13042 bf_set(lpfc_rq_context_rqe_count,
13043 &rq_create->u.request.context,
13044 LPFC_RQ_RING_SIZE_512);
13045 break;
13046 case 1024:
13047 bf_set(lpfc_rq_context_rqe_count,
13048 &rq_create->u.request.context,
13049 LPFC_RQ_RING_SIZE_1024);
13050 break;
13051 case 2048:
13052 bf_set(lpfc_rq_context_rqe_count,
13053 &rq_create->u.request.context,
13054 LPFC_RQ_RING_SIZE_2048);
13055 break;
13056 case 4096:
13057 bf_set(lpfc_rq_context_rqe_count,
13058 &rq_create->u.request.context,
13059 LPFC_RQ_RING_SIZE_4096);
13060 break;
13061 }
13062 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13063 LPFC_HDR_BUF_SIZE);
4f774513
JS
13064 }
13065 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13066 cq->queue_id);
13067 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13068 hrq->page_count);
4f774513 13069 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 13070 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
13071 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13072 putPaddrLow(dmabuf->phys);
13073 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13074 putPaddrHigh(dmabuf->phys);
13075 }
962bc51b
JS
13076 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13077 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13078
4f774513
JS
13079 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13080 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
13081 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13082 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13083 if (shdr_status || shdr_add_status || rc) {
13084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13085 "2504 RQ_CREATE mailbox failed with "
13086 "status x%x add_status x%x, mbx status x%x\n",
13087 shdr_status, shdr_add_status, rc);
13088 status = -ENXIO;
13089 goto out;
13090 }
13091 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13092 if (hrq->queue_id == 0xFFFF) {
13093 status = -ENXIO;
13094 goto out;
13095 }
962bc51b
JS
13096
13097 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13098 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13099 &rq_create->u.response);
13100 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13101 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13103 "3262 RQ [%d] doorbell format not "
13104 "supported: x%x\n", hrq->queue_id,
13105 hrq->db_format);
13106 status = -EINVAL;
13107 goto out;
13108 }
13109
13110 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13111 &rq_create->u.response);
13112 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13113 if (!bar_memmap_p) {
13114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13115 "3269 RQ[%d] failed to memmap pci "
13116 "barset:x%x\n", hrq->queue_id,
13117 pci_barset);
13118 status = -ENOMEM;
13119 goto out;
13120 }
13121
13122 db_offset = rq_create->u.response.doorbell_offset;
13123 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13124 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13126 "3270 RQ[%d] doorbell offset not "
13127 "supported: x%x\n", hrq->queue_id,
13128 db_offset);
13129 status = -EINVAL;
13130 goto out;
13131 }
13132 hrq->db_regaddr = bar_memmap_p + db_offset;
13133 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13134 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
13135 hrq->queue_id, pci_barset, db_offset);
13136 } else {
13137 hrq->db_format = LPFC_DB_RING_FORMAT;
13138 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13139 }
4f774513 13140 hrq->type = LPFC_HRQ;
2a622bfb 13141 hrq->assoc_qid = cq->queue_id;
4f774513
JS
13142 hrq->subtype = subtype;
13143 hrq->host_index = 0;
13144 hrq->hba_index = 0;
13145
13146 /* now create the data queue */
13147 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13148 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13149 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
13150 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13151 phba->sli4_hba.pc_sli4_params.rqv);
13152 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13153 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 13154 &rq_create->u.request.context, hrq->entry_count);
5a6f133e 13155 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
c31098ce
JS
13156 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
13157 LPFC_RQE_SIZE_8);
13158 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
13159 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
13160 } else {
13161 switch (drq->entry_count) {
13162 default:
13163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13164 "2536 Unsupported RQ count. (%d)\n",
13165 drq->entry_count);
4f4c1863
JS
13166 if (drq->entry_count < 512) {
13167 status = -EINVAL;
13168 goto out;
13169 }
5a6f133e
JS
13170 /* otherwise default to smallest count (drop through) */
13171 case 512:
13172 bf_set(lpfc_rq_context_rqe_count,
13173 &rq_create->u.request.context,
13174 LPFC_RQ_RING_SIZE_512);
13175 break;
13176 case 1024:
13177 bf_set(lpfc_rq_context_rqe_count,
13178 &rq_create->u.request.context,
13179 LPFC_RQ_RING_SIZE_1024);
13180 break;
13181 case 2048:
13182 bf_set(lpfc_rq_context_rqe_count,
13183 &rq_create->u.request.context,
13184 LPFC_RQ_RING_SIZE_2048);
13185 break;
13186 case 4096:
13187 bf_set(lpfc_rq_context_rqe_count,
13188 &rq_create->u.request.context,
13189 LPFC_RQ_RING_SIZE_4096);
13190 break;
13191 }
13192 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13193 LPFC_DATA_BUF_SIZE);
4f774513
JS
13194 }
13195 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13196 cq->queue_id);
13197 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13198 drq->page_count);
4f774513
JS
13199 list_for_each_entry(dmabuf, &drq->page_list, list) {
13200 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13201 putPaddrLow(dmabuf->phys);
13202 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13203 putPaddrHigh(dmabuf->phys);
13204 }
962bc51b
JS
13205 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13206 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
13207 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13208 /* The IOCTL status is embedded in the mailbox subheader. */
13209 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13210 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13211 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13212 if (shdr_status || shdr_add_status || rc) {
13213 status = -ENXIO;
13214 goto out;
13215 }
13216 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13217 if (drq->queue_id == 0xFFFF) {
13218 status = -ENXIO;
13219 goto out;
13220 }
13221 drq->type = LPFC_DRQ;
2a622bfb 13222 drq->assoc_qid = cq->queue_id;
4f774513
JS
13223 drq->subtype = subtype;
13224 drq->host_index = 0;
13225 drq->hba_index = 0;
13226
13227 /* link the header and data RQs onto the parent cq child list */
13228 list_add_tail(&hrq->list, &cq->child_list);
13229 list_add_tail(&drq->list, &cq->child_list);
13230
13231out:
8fa38513 13232 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
13233 return status;
13234}
13235
13236/**
13237 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13238 * @eq: The queue structure associated with the queue to destroy.
13239 *
13240 * This function destroys a queue, as detailed in @eq by sending an mailbox
13241 * command, specific to the type of queue, to the HBA.
13242 *
13243 * The @eq struct is used to get the queue ID of the queue to destroy.
13244 *
13245 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13246 * command fails this function will return -ENXIO.
4f774513
JS
13247 **/
13248uint32_t
13249lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13250{
13251 LPFC_MBOXQ_t *mbox;
13252 int rc, length, status = 0;
13253 uint32_t shdr_status, shdr_add_status;
13254 union lpfc_sli4_cfg_shdr *shdr;
13255
2e90f4b5 13256 /* sanity check on queue memory */
4f774513
JS
13257 if (!eq)
13258 return -ENODEV;
13259 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13260 if (!mbox)
13261 return -ENOMEM;
13262 length = (sizeof(struct lpfc_mbx_eq_destroy) -
13263 sizeof(struct lpfc_sli4_cfg_mhdr));
13264 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13265 LPFC_MBOX_OPCODE_EQ_DESTROY,
13266 length, LPFC_SLI4_MBX_EMBED);
13267 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13268 eq->queue_id);
13269 mbox->vport = eq->phba->pport;
13270 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13271
13272 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13273 /* The IOCTL status is embedded in the mailbox subheader. */
13274 shdr = (union lpfc_sli4_cfg_shdr *)
13275 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13276 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13277 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13278 if (shdr_status || shdr_add_status || rc) {
13279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13280 "2505 EQ_DESTROY mailbox failed with "
13281 "status x%x add_status x%x, mbx status x%x\n",
13282 shdr_status, shdr_add_status, rc);
13283 status = -ENXIO;
13284 }
13285
13286 /* Remove eq from any list */
13287 list_del_init(&eq->list);
8fa38513 13288 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
13289 return status;
13290}
13291
13292/**
13293 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13294 * @cq: The queue structure associated with the queue to destroy.
13295 *
13296 * This function destroys a queue, as detailed in @cq by sending an mailbox
13297 * command, specific to the type of queue, to the HBA.
13298 *
13299 * The @cq struct is used to get the queue ID of the queue to destroy.
13300 *
13301 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13302 * command fails this function will return -ENXIO.
4f774513
JS
13303 **/
13304uint32_t
13305lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13306{
13307 LPFC_MBOXQ_t *mbox;
13308 int rc, length, status = 0;
13309 uint32_t shdr_status, shdr_add_status;
13310 union lpfc_sli4_cfg_shdr *shdr;
13311
2e90f4b5 13312 /* sanity check on queue memory */
4f774513
JS
13313 if (!cq)
13314 return -ENODEV;
13315 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13316 if (!mbox)
13317 return -ENOMEM;
13318 length = (sizeof(struct lpfc_mbx_cq_destroy) -
13319 sizeof(struct lpfc_sli4_cfg_mhdr));
13320 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13321 LPFC_MBOX_OPCODE_CQ_DESTROY,
13322 length, LPFC_SLI4_MBX_EMBED);
13323 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13324 cq->queue_id);
13325 mbox->vport = cq->phba->pport;
13326 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13327 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13328 /* The IOCTL status is embedded in the mailbox subheader. */
13329 shdr = (union lpfc_sli4_cfg_shdr *)
13330 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13331 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13332 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13333 if (shdr_status || shdr_add_status || rc) {
13334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13335 "2506 CQ_DESTROY mailbox failed with "
13336 "status x%x add_status x%x, mbx status x%x\n",
13337 shdr_status, shdr_add_status, rc);
13338 status = -ENXIO;
13339 }
13340 /* Remove cq from any list */
13341 list_del_init(&cq->list);
8fa38513 13342 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
13343 return status;
13344}
13345
04c68496
JS
13346/**
13347 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13348 * @qm: The queue structure associated with the queue to destroy.
13349 *
13350 * This function destroys a queue, as detailed in @mq by sending an mailbox
13351 * command, specific to the type of queue, to the HBA.
13352 *
13353 * The @mq struct is used to get the queue ID of the queue to destroy.
13354 *
13355 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13356 * command fails this function will return -ENXIO.
04c68496
JS
13357 **/
13358uint32_t
13359lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13360{
13361 LPFC_MBOXQ_t *mbox;
13362 int rc, length, status = 0;
13363 uint32_t shdr_status, shdr_add_status;
13364 union lpfc_sli4_cfg_shdr *shdr;
13365
2e90f4b5 13366 /* sanity check on queue memory */
04c68496
JS
13367 if (!mq)
13368 return -ENODEV;
13369 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13370 if (!mbox)
13371 return -ENOMEM;
13372 length = (sizeof(struct lpfc_mbx_mq_destroy) -
13373 sizeof(struct lpfc_sli4_cfg_mhdr));
13374 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13375 LPFC_MBOX_OPCODE_MQ_DESTROY,
13376 length, LPFC_SLI4_MBX_EMBED);
13377 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13378 mq->queue_id);
13379 mbox->vport = mq->phba->pport;
13380 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13381 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13382 /* The IOCTL status is embedded in the mailbox subheader. */
13383 shdr = (union lpfc_sli4_cfg_shdr *)
13384 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13385 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13386 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13387 if (shdr_status || shdr_add_status || rc) {
13388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13389 "2507 MQ_DESTROY mailbox failed with "
13390 "status x%x add_status x%x, mbx status x%x\n",
13391 shdr_status, shdr_add_status, rc);
13392 status = -ENXIO;
13393 }
13394 /* Remove mq from any list */
13395 list_del_init(&mq->list);
8fa38513 13396 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
13397 return status;
13398}
13399
4f774513
JS
13400/**
13401 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13402 * @wq: The queue structure associated with the queue to destroy.
13403 *
13404 * This function destroys a queue, as detailed in @wq by sending an mailbox
13405 * command, specific to the type of queue, to the HBA.
13406 *
13407 * The @wq struct is used to get the queue ID of the queue to destroy.
13408 *
13409 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13410 * command fails this function will return -ENXIO.
4f774513
JS
13411 **/
13412uint32_t
13413lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13414{
13415 LPFC_MBOXQ_t *mbox;
13416 int rc, length, status = 0;
13417 uint32_t shdr_status, shdr_add_status;
13418 union lpfc_sli4_cfg_shdr *shdr;
13419
2e90f4b5 13420 /* sanity check on queue memory */
4f774513
JS
13421 if (!wq)
13422 return -ENODEV;
13423 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13424 if (!mbox)
13425 return -ENOMEM;
13426 length = (sizeof(struct lpfc_mbx_wq_destroy) -
13427 sizeof(struct lpfc_sli4_cfg_mhdr));
13428 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13429 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13430 length, LPFC_SLI4_MBX_EMBED);
13431 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13432 wq->queue_id);
13433 mbox->vport = wq->phba->pport;
13434 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13435 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13436 shdr = (union lpfc_sli4_cfg_shdr *)
13437 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13438 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13439 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13440 if (shdr_status || shdr_add_status || rc) {
13441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13442 "2508 WQ_DESTROY mailbox failed with "
13443 "status x%x add_status x%x, mbx status x%x\n",
13444 shdr_status, shdr_add_status, rc);
13445 status = -ENXIO;
13446 }
13447 /* Remove wq from any list */
13448 list_del_init(&wq->list);
8fa38513 13449 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
13450 return status;
13451}
13452
13453/**
13454 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13455 * @rq: The queue structure associated with the queue to destroy.
13456 *
13457 * This function destroys a queue, as detailed in @rq by sending an mailbox
13458 * command, specific to the type of queue, to the HBA.
13459 *
13460 * The @rq struct is used to get the queue ID of the queue to destroy.
13461 *
13462 * On success this function will return a zero. If the queue destroy mailbox
d439d286 13463 * command fails this function will return -ENXIO.
4f774513
JS
13464 **/
13465uint32_t
13466lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13467 struct lpfc_queue *drq)
13468{
13469 LPFC_MBOXQ_t *mbox;
13470 int rc, length, status = 0;
13471 uint32_t shdr_status, shdr_add_status;
13472 union lpfc_sli4_cfg_shdr *shdr;
13473
2e90f4b5 13474 /* sanity check on queue memory */
4f774513
JS
13475 if (!hrq || !drq)
13476 return -ENODEV;
13477 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13478 if (!mbox)
13479 return -ENOMEM;
13480 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 13481 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
13482 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13483 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13484 length, LPFC_SLI4_MBX_EMBED);
13485 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13486 hrq->queue_id);
13487 mbox->vport = hrq->phba->pport;
13488 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13489 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13490 /* The IOCTL status is embedded in the mailbox subheader. */
13491 shdr = (union lpfc_sli4_cfg_shdr *)
13492 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13493 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13494 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13495 if (shdr_status || shdr_add_status || rc) {
13496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13497 "2509 RQ_DESTROY mailbox failed with "
13498 "status x%x add_status x%x, mbx status x%x\n",
13499 shdr_status, shdr_add_status, rc);
13500 if (rc != MBX_TIMEOUT)
13501 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13502 return -ENXIO;
13503 }
13504 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13505 drq->queue_id);
13506 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13507 shdr = (union lpfc_sli4_cfg_shdr *)
13508 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13509 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13510 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13511 if (shdr_status || shdr_add_status || rc) {
13512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13513 "2510 RQ_DESTROY mailbox failed with "
13514 "status x%x add_status x%x, mbx status x%x\n",
13515 shdr_status, shdr_add_status, rc);
13516 status = -ENXIO;
13517 }
13518 list_del_init(&hrq->list);
13519 list_del_init(&drq->list);
8fa38513 13520 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
13521 return status;
13522}
13523
13524/**
13525 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13526 * @phba: The virtual port for which this call being executed.
13527 * @pdma_phys_addr0: Physical address of the 1st SGL page.
13528 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13529 * @xritag: the xritag that ties this io to the SGL pages.
13530 *
13531 * This routine will post the sgl pages for the IO that has the xritag
13532 * that is in the iocbq structure. The xritag is assigned during iocbq
13533 * creation and persists for as long as the driver is loaded.
13534 * if the caller has fewer than 256 scatter gather segments to map then
13535 * pdma_phys_addr1 should be 0.
13536 * If the caller needs to map more than 256 scatter gather segment then
13537 * pdma_phys_addr1 should be a valid physical address.
13538 * physical address for SGLs must be 64 byte aligned.
13539 * If you are going to map 2 SGL's then the first one must have 256 entries
13540 * the second sgl can have between 1 and 256 entries.
13541 *
13542 * Return codes:
13543 * 0 - Success
13544 * -ENXIO, -ENOMEM - Failure
13545 **/
13546int
13547lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13548 dma_addr_t pdma_phys_addr0,
13549 dma_addr_t pdma_phys_addr1,
13550 uint16_t xritag)
13551{
13552 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13553 LPFC_MBOXQ_t *mbox;
13554 int rc;
13555 uint32_t shdr_status, shdr_add_status;
6d368e53 13556 uint32_t mbox_tmo;
4f774513
JS
13557 union lpfc_sli4_cfg_shdr *shdr;
13558
13559 if (xritag == NO_XRI) {
13560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13561 "0364 Invalid param:\n");
13562 return -EINVAL;
13563 }
13564
13565 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13566 if (!mbox)
13567 return -ENOMEM;
13568
13569 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13570 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13571 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 13572 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
13573
13574 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13575 &mbox->u.mqe.un.post_sgl_pages;
13576 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13577 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13578
13579 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13580 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13581 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13582 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13583
13584 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13585 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13586 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13587 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13588 if (!phba->sli4_hba.intr_enable)
13589 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 13590 else {
a183a15f 13591 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
13592 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13593 }
4f774513
JS
13594 /* The IOCTL status is embedded in the mailbox subheader. */
13595 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13598 if (rc != MBX_TIMEOUT)
13599 mempool_free(mbox, phba->mbox_mem_pool);
13600 if (shdr_status || shdr_add_status || rc) {
13601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13602 "2511 POST_SGL mailbox failed with "
13603 "status x%x add_status x%x, mbx status x%x\n",
13604 shdr_status, shdr_add_status, rc);
13605 rc = -ENXIO;
13606 }
13607 return 0;
13608}
4f774513 13609
6d368e53 13610/**
88a2cfbb 13611 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
13612 * @phba: pointer to lpfc hba data structure.
13613 *
13614 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
13615 * HBA consistent with the SLI-4 interface spec. This routine
13616 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13617 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 13618 *
88a2cfbb
JS
13619 * Returns
13620 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13621 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
13622 **/
6d368e53
JS
13623uint16_t
13624lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13625{
13626 unsigned long xri;
13627
13628 /*
13629 * Fetch the next logical xri. Because this index is logical,
13630 * the driver starts at 0 each time.
13631 */
13632 spin_lock_irq(&phba->hbalock);
13633 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13634 phba->sli4_hba.max_cfg_param.max_xri, 0);
13635 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13636 spin_unlock_irq(&phba->hbalock);
13637 return NO_XRI;
13638 } else {
13639 set_bit(xri, phba->sli4_hba.xri_bmask);
13640 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 13641 }
6d368e53
JS
13642 spin_unlock_irq(&phba->hbalock);
13643 return xri;
13644}
13645
13646/**
13647 * lpfc_sli4_free_xri - Release an xri for reuse.
13648 * @phba: pointer to lpfc hba data structure.
13649 *
13650 * This routine is invoked to release an xri to the pool of
13651 * available rpis maintained by the driver.
13652 **/
13653void
13654__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13655{
13656 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
13657 phba->sli4_hba.max_cfg_param.xri_used--;
13658 }
13659}
13660
13661/**
13662 * lpfc_sli4_free_xri - Release an xri for reuse.
13663 * @phba: pointer to lpfc hba data structure.
13664 *
13665 * This routine is invoked to release an xri to the pool of
13666 * available rpis maintained by the driver.
13667 **/
13668void
13669lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13670{
13671 spin_lock_irq(&phba->hbalock);
13672 __lpfc_sli4_free_xri(phba, xri);
13673 spin_unlock_irq(&phba->hbalock);
13674}
13675
4f774513
JS
13676/**
13677 * lpfc_sli4_next_xritag - Get an xritag for the io
13678 * @phba: Pointer to HBA context object.
13679 *
13680 * This function gets an xritag for the iocb. If there is no unused xritag
13681 * it will return 0xffff.
13682 * The function returns the allocated xritag if successful, else returns zero.
13683 * Zero is not a valid xritag.
13684 * The caller is not required to hold any lock.
13685 **/
13686uint16_t
13687lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13688{
6d368e53 13689 uint16_t xri_index;
4f774513 13690
6d368e53 13691 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
13692 if (xri_index == NO_XRI)
13693 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13694 "2004 Failed to allocate XRI.last XRITAG is %d"
13695 " Max XRI is %d, Used XRI is %d\n",
13696 xri_index,
13697 phba->sli4_hba.max_cfg_param.max_xri,
13698 phba->sli4_hba.max_cfg_param.xri_used);
13699 return xri_index;
4f774513
JS
13700}
13701
13702/**
6d368e53 13703 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
4f774513 13704 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
13705 * @post_sgl_list: pointer to els sgl entry list.
13706 * @count: number of els sgl entries on the list.
4f774513
JS
13707 *
13708 * This routine is invoked to post a block of driver's sgl pages to the
13709 * HBA using non-embedded mailbox command. No Lock is held. This routine
13710 * is only called when the driver is loading and after all IO has been
13711 * stopped.
13712 **/
8a9d2e80
JS
13713static int
13714lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13715 struct list_head *post_sgl_list,
13716 int post_cnt)
4f774513 13717{
8a9d2e80 13718 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
13719 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13720 struct sgl_page_pairs *sgl_pg_pairs;
13721 void *viraddr;
13722 LPFC_MBOXQ_t *mbox;
13723 uint32_t reqlen, alloclen, pg_pairs;
13724 uint32_t mbox_tmo;
8a9d2e80
JS
13725 uint16_t xritag_start = 0;
13726 int rc = 0;
4f774513
JS
13727 uint32_t shdr_status, shdr_add_status;
13728 union lpfc_sli4_cfg_shdr *shdr;
13729
8a9d2e80 13730 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
4f774513 13731 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13732 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13733 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13734 "2559 Block sgl registration required DMA "
13735 "size (%d) great than a page\n", reqlen);
13736 return -ENOMEM;
13737 }
13738 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 13739 if (!mbox)
4f774513 13740 return -ENOMEM;
4f774513
JS
13741
13742 /* Allocate DMA memory and set up the non-embedded mailbox command */
13743 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13744 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13745 LPFC_SLI4_MBX_NEMBED);
13746
13747 if (alloclen < reqlen) {
13748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13749 "0285 Allocated DMA memory size (%d) is "
13750 "less than the requested DMA memory "
13751 "size (%d)\n", alloclen, reqlen);
13752 lpfc_sli4_mbox_cmd_free(phba, mbox);
13753 return -ENOMEM;
13754 }
4f774513 13755 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 13756 viraddr = mbox->sge_array->addr[0];
4f774513
JS
13757 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13758 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13759
8a9d2e80
JS
13760 pg_pairs = 0;
13761 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
13762 /* Set up the sge entry */
13763 sgl_pg_pairs->sgl_pg0_addr_lo =
13764 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13765 sgl_pg_pairs->sgl_pg0_addr_hi =
13766 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13767 sgl_pg_pairs->sgl_pg1_addr_lo =
13768 cpu_to_le32(putPaddrLow(0));
13769 sgl_pg_pairs->sgl_pg1_addr_hi =
13770 cpu_to_le32(putPaddrHigh(0));
6d368e53 13771
4f774513
JS
13772 /* Keep the first xritag on the list */
13773 if (pg_pairs == 0)
13774 xritag_start = sglq_entry->sli4_xritag;
13775 sgl_pg_pairs++;
8a9d2e80 13776 pg_pairs++;
4f774513 13777 }
6d368e53
JS
13778
13779 /* Complete initialization and perform endian conversion. */
4f774513 13780 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
8a9d2e80 13781 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
4f774513 13782 sgl->word0 = cpu_to_le32(sgl->word0);
4f774513
JS
13783 if (!phba->sli4_hba.intr_enable)
13784 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13785 else {
a183a15f 13786 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13787 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13788 }
13789 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13790 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13791 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13792 if (rc != MBX_TIMEOUT)
13793 lpfc_sli4_mbox_cmd_free(phba, mbox);
13794 if (shdr_status || shdr_add_status || rc) {
13795 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13796 "2513 POST_SGL_BLOCK mailbox command failed "
13797 "status x%x add_status x%x mbx status x%x\n",
13798 shdr_status, shdr_add_status, rc);
13799 rc = -ENXIO;
13800 }
13801 return rc;
13802}
13803
13804/**
13805 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13806 * @phba: pointer to lpfc hba data structure.
13807 * @sblist: pointer to scsi buffer list.
13808 * @count: number of scsi buffers on the list.
13809 *
13810 * This routine is invoked to post a block of @count scsi sgl pages from a
13811 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13812 * No Lock is held.
13813 *
13814 **/
13815int
8a9d2e80
JS
13816lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13817 struct list_head *sblist,
13818 int count)
4f774513
JS
13819{
13820 struct lpfc_scsi_buf *psb;
13821 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13822 struct sgl_page_pairs *sgl_pg_pairs;
13823 void *viraddr;
13824 LPFC_MBOXQ_t *mbox;
13825 uint32_t reqlen, alloclen, pg_pairs;
13826 uint32_t mbox_tmo;
13827 uint16_t xritag_start = 0;
13828 int rc = 0;
13829 uint32_t shdr_status, shdr_add_status;
13830 dma_addr_t pdma_phys_bpl1;
13831 union lpfc_sli4_cfg_shdr *shdr;
13832
13833 /* Calculate the requested length of the dma memory */
8a9d2e80 13834 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 13835 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 13836 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
13837 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13838 "0217 Block sgl registration required DMA "
13839 "size (%d) great than a page\n", reqlen);
13840 return -ENOMEM;
13841 }
13842 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13843 if (!mbox) {
13844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13845 "0283 Failed to allocate mbox cmd memory\n");
13846 return -ENOMEM;
13847 }
13848
13849 /* Allocate DMA memory and set up the non-embedded mailbox command */
13850 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13851 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13852 LPFC_SLI4_MBX_NEMBED);
13853
13854 if (alloclen < reqlen) {
13855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13856 "2561 Allocated DMA memory size (%d) is "
13857 "less than the requested DMA memory "
13858 "size (%d)\n", alloclen, reqlen);
13859 lpfc_sli4_mbox_cmd_free(phba, mbox);
13860 return -ENOMEM;
13861 }
6d368e53 13862
4f774513 13863 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
13864 viraddr = mbox->sge_array->addr[0];
13865
13866 /* Set up the SGL pages in the non-embedded DMA pages */
13867 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13868 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13869
13870 pg_pairs = 0;
13871 list_for_each_entry(psb, sblist, list) {
13872 /* Set up the sge entry */
13873 sgl_pg_pairs->sgl_pg0_addr_lo =
13874 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13875 sgl_pg_pairs->sgl_pg0_addr_hi =
13876 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13877 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13878 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13879 else
13880 pdma_phys_bpl1 = 0;
13881 sgl_pg_pairs->sgl_pg1_addr_lo =
13882 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13883 sgl_pg_pairs->sgl_pg1_addr_hi =
13884 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13885 /* Keep the first xritag on the list */
13886 if (pg_pairs == 0)
13887 xritag_start = psb->cur_iocbq.sli4_xritag;
13888 sgl_pg_pairs++;
13889 pg_pairs++;
13890 }
13891 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13892 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13893 /* Perform endian conversion if necessary */
13894 sgl->word0 = cpu_to_le32(sgl->word0);
13895
13896 if (!phba->sli4_hba.intr_enable)
13897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13898 else {
a183a15f 13899 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
13900 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13901 }
13902 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13903 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13904 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13905 if (rc != MBX_TIMEOUT)
13906 lpfc_sli4_mbox_cmd_free(phba, mbox);
13907 if (shdr_status || shdr_add_status || rc) {
13908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13909 "2564 POST_SGL_BLOCK mailbox command failed "
13910 "status x%x add_status x%x mbx status x%x\n",
13911 shdr_status, shdr_add_status, rc);
13912 rc = -ENXIO;
13913 }
13914 return rc;
13915}
13916
13917/**
13918 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13919 * @phba: pointer to lpfc_hba struct that the frame was received on
13920 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13921 *
13922 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13923 * valid type of frame that the LPFC driver will handle. This function will
13924 * return a zero if the frame is a valid frame or a non zero value when the
13925 * frame does not pass the check.
13926 **/
13927static int
13928lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13929{
474ffb74
TH
13930 /* make rctl_names static to save stack space */
13931 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
4f774513
JS
13932 char *type_names[] = FC_TYPE_NAMES_INIT;
13933 struct fc_vft_header *fc_vft_hdr;
546fc854 13934 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
13935
13936 switch (fc_hdr->fh_r_ctl) {
13937 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13938 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13939 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13940 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13941 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13942 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13943 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13944 case FC_RCTL_DD_CMD_STATUS: /* command status */
13945 case FC_RCTL_ELS_REQ: /* extended link services request */
13946 case FC_RCTL_ELS_REP: /* extended link services reply */
13947 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13948 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13949 case FC_RCTL_BA_NOP: /* basic link service NOP */
13950 case FC_RCTL_BA_ABTS: /* basic link service abort */
13951 case FC_RCTL_BA_RMC: /* remove connection */
13952 case FC_RCTL_BA_ACC: /* basic accept */
13953 case FC_RCTL_BA_RJT: /* basic reject */
13954 case FC_RCTL_BA_PRMT:
13955 case FC_RCTL_ACK_1: /* acknowledge_1 */
13956 case FC_RCTL_ACK_0: /* acknowledge_0 */
13957 case FC_RCTL_P_RJT: /* port reject */
13958 case FC_RCTL_F_RJT: /* fabric reject */
13959 case FC_RCTL_P_BSY: /* port busy */
13960 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13961 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13962 case FC_RCTL_LCR: /* link credit reset */
13963 case FC_RCTL_END: /* end */
13964 break;
13965 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13966 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13967 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13968 return lpfc_fc_frame_check(phba, fc_hdr);
13969 default:
13970 goto drop;
13971 }
13972 switch (fc_hdr->fh_type) {
13973 case FC_TYPE_BLS:
13974 case FC_TYPE_ELS:
13975 case FC_TYPE_FCP:
13976 case FC_TYPE_CT:
13977 break;
13978 case FC_TYPE_IP:
13979 case FC_TYPE_ILS:
13980 default:
13981 goto drop;
13982 }
546fc854 13983
4f774513 13984 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
546fc854
JS
13985 "2538 Received frame rctl:%s type:%s "
13986 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
4f774513 13987 rctl_names[fc_hdr->fh_r_ctl],
546fc854
JS
13988 type_names[fc_hdr->fh_type],
13989 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13990 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13991 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
4f774513
JS
13992 return 0;
13993drop:
13994 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13995 "2539 Dropped frame rctl:%s type:%s\n",
13996 rctl_names[fc_hdr->fh_r_ctl],
13997 type_names[fc_hdr->fh_type]);
13998 return 1;
13999}
14000
14001/**
14002 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14003 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14004 *
14005 * This function processes the FC header to retrieve the VFI from the VF
14006 * header, if one exists. This function will return the VFI if one exists
14007 * or 0 if no VSAN Header exists.
14008 **/
14009static uint32_t
14010lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14011{
14012 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14013
14014 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14015 return 0;
14016 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14017}
14018
14019/**
14020 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14021 * @phba: Pointer to the HBA structure to search for the vport on
14022 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14023 * @fcfi: The FC Fabric ID that the frame came from
14024 *
14025 * This function searches the @phba for a vport that matches the content of the
14026 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14027 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14028 * returns the matching vport pointer or NULL if unable to match frame to a
14029 * vport.
14030 **/
14031static struct lpfc_vport *
14032lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14033 uint16_t fcfi)
14034{
14035 struct lpfc_vport **vports;
14036 struct lpfc_vport *vport = NULL;
14037 int i;
14038 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14039 fc_hdr->fh_d_id[1] << 8 |
14040 fc_hdr->fh_d_id[2]);
939723a4 14041
bf08611b
JS
14042 if (did == Fabric_DID)
14043 return phba->pport;
939723a4
JS
14044 if ((phba->pport->fc_flag & FC_PT2PT) &&
14045 !(phba->link_state == LPFC_HBA_READY))
14046 return phba->pport;
14047
4f774513
JS
14048 vports = lpfc_create_vport_work_array(phba);
14049 if (vports != NULL)
14050 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14051 if (phba->fcf.fcfi == fcfi &&
14052 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14053 vports[i]->fc_myDID == did) {
14054 vport = vports[i];
14055 break;
14056 }
14057 }
14058 lpfc_destroy_vport_work_array(phba, vports);
14059 return vport;
14060}
14061
45ed1190
JS
14062/**
14063 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14064 * @vport: The vport to work on.
14065 *
14066 * This function updates the receive sequence time stamp for this vport. The
14067 * receive sequence time stamp indicates the time that the last frame of the
14068 * the sequence that has been idle for the longest amount of time was received.
14069 * the driver uses this time stamp to indicate if any received sequences have
14070 * timed out.
14071 **/
14072void
14073lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14074{
14075 struct lpfc_dmabuf *h_buf;
14076 struct hbq_dmabuf *dmabuf = NULL;
14077
14078 /* get the oldest sequence on the rcv list */
14079 h_buf = list_get_first(&vport->rcv_buffer_list,
14080 struct lpfc_dmabuf, list);
14081 if (!h_buf)
14082 return;
14083 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14084 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14085}
14086
14087/**
14088 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14089 * @vport: The vport that the received sequences were sent to.
14090 *
14091 * This function cleans up all outstanding received sequences. This is called
14092 * by the driver when a link event or user action invalidates all the received
14093 * sequences.
14094 **/
14095void
14096lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14097{
14098 struct lpfc_dmabuf *h_buf, *hnext;
14099 struct lpfc_dmabuf *d_buf, *dnext;
14100 struct hbq_dmabuf *dmabuf = NULL;
14101
14102 /* start with the oldest sequence on the rcv list */
14103 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14104 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14105 list_del_init(&dmabuf->hbuf.list);
14106 list_for_each_entry_safe(d_buf, dnext,
14107 &dmabuf->dbuf.list, list) {
14108 list_del_init(&d_buf->list);
14109 lpfc_in_buf_free(vport->phba, d_buf);
14110 }
14111 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14112 }
14113}
14114
14115/**
14116 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14117 * @vport: The vport that the received sequences were sent to.
14118 *
14119 * This function determines whether any received sequences have timed out by
14120 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14121 * indicates that there is at least one timed out sequence this routine will
14122 * go through the received sequences one at a time from most inactive to most
14123 * active to determine which ones need to be cleaned up. Once it has determined
14124 * that a sequence needs to be cleaned up it will simply free up the resources
14125 * without sending an abort.
14126 **/
14127void
14128lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14129{
14130 struct lpfc_dmabuf *h_buf, *hnext;
14131 struct lpfc_dmabuf *d_buf, *dnext;
14132 struct hbq_dmabuf *dmabuf = NULL;
14133 unsigned long timeout;
14134 int abort_count = 0;
14135
14136 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14137 vport->rcv_buffer_time_stamp);
14138 if (list_empty(&vport->rcv_buffer_list) ||
14139 time_before(jiffies, timeout))
14140 return;
14141 /* start with the oldest sequence on the rcv list */
14142 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14143 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14144 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14145 dmabuf->time_stamp);
14146 if (time_before(jiffies, timeout))
14147 break;
14148 abort_count++;
14149 list_del_init(&dmabuf->hbuf.list);
14150 list_for_each_entry_safe(d_buf, dnext,
14151 &dmabuf->dbuf.list, list) {
14152 list_del_init(&d_buf->list);
14153 lpfc_in_buf_free(vport->phba, d_buf);
14154 }
14155 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14156 }
14157 if (abort_count)
14158 lpfc_update_rcv_time_stamp(vport);
14159}
14160
4f774513
JS
14161/**
14162 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14163 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14164 *
14165 * This function searches through the existing incomplete sequences that have
14166 * been sent to this @vport. If the frame matches one of the incomplete
14167 * sequences then the dbuf in the @dmabuf is added to the list of frames that
14168 * make up that sequence. If no sequence is found that matches this frame then
14169 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14170 * This function returns a pointer to the first dmabuf in the sequence list that
14171 * the frame was linked to.
14172 **/
14173static struct hbq_dmabuf *
14174lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14175{
14176 struct fc_frame_header *new_hdr;
14177 struct fc_frame_header *temp_hdr;
14178 struct lpfc_dmabuf *d_buf;
14179 struct lpfc_dmabuf *h_buf;
14180 struct hbq_dmabuf *seq_dmabuf = NULL;
14181 struct hbq_dmabuf *temp_dmabuf = NULL;
14182
4d9ab994 14183 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 14184 dmabuf->time_stamp = jiffies;
4f774513
JS
14185 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14186 /* Use the hdr_buf to find the sequence that this frame belongs to */
14187 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14188 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14189 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14190 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14191 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14192 continue;
14193 /* found a pending sequence that matches this frame */
14194 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14195 break;
14196 }
14197 if (!seq_dmabuf) {
14198 /*
14199 * This indicates first frame received for this sequence.
14200 * Queue the buffer on the vport's rcv_buffer_list.
14201 */
14202 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 14203 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
14204 return dmabuf;
14205 }
14206 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
14207 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
14208 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
14209 list_del_init(&seq_dmabuf->hbuf.list);
14210 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14211 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 14212 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
14213 return dmabuf;
14214 }
45ed1190
JS
14215 /* move this sequence to the tail to indicate a young sequence */
14216 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
14217 seq_dmabuf->time_stamp = jiffies;
14218 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
14219 if (list_empty(&seq_dmabuf->dbuf.list)) {
14220 temp_hdr = dmabuf->hbuf.virt;
14221 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14222 return seq_dmabuf;
14223 }
4f774513
JS
14224 /* find the correct place in the sequence to insert this frame */
14225 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
14226 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14227 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14228 /*
14229 * If the frame's sequence count is greater than the frame on
14230 * the list then insert the frame right after this frame
14231 */
eeead811
JS
14232 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14233 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513
JS
14234 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14235 return seq_dmabuf;
14236 }
14237 }
14238 return NULL;
14239}
14240
6669f9bb
JS
14241/**
14242 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14243 * @vport: pointer to a vitural port
14244 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14245 *
14246 * This function tries to abort from the partially assembed sequence, described
14247 * by the information from basic abbort @dmabuf. It checks to see whether such
14248 * partially assembled sequence held by the driver. If so, it shall free up all
14249 * the frames from the partially assembled sequence.
14250 *
14251 * Return
14252 * true -- if there is matching partially assembled sequence present and all
14253 * the frames freed with the sequence;
14254 * false -- if there is no matching partially assembled sequence present so
14255 * nothing got aborted in the lower layer driver
14256 **/
14257static bool
14258lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14259 struct hbq_dmabuf *dmabuf)
14260{
14261 struct fc_frame_header *new_hdr;
14262 struct fc_frame_header *temp_hdr;
14263 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14264 struct hbq_dmabuf *seq_dmabuf = NULL;
14265
14266 /* Use the hdr_buf to find the sequence that matches this frame */
14267 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14268 INIT_LIST_HEAD(&dmabuf->hbuf.list);
14269 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14270 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14271 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14272 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14273 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14274 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14275 continue;
14276 /* found a pending sequence that matches this frame */
14277 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14278 break;
14279 }
14280
14281 /* Free up all the frames from the partially assembled sequence */
14282 if (seq_dmabuf) {
14283 list_for_each_entry_safe(d_buf, n_buf,
14284 &seq_dmabuf->dbuf.list, list) {
14285 list_del_init(&d_buf->list);
14286 lpfc_in_buf_free(vport->phba, d_buf);
14287 }
14288 return true;
14289 }
14290 return false;
14291}
14292
6dd9e31c
JS
14293/**
14294 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14295 * @vport: pointer to a vitural port
14296 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14297 *
14298 * This function tries to abort from the assembed sequence from upper level
14299 * protocol, described by the information from basic abbort @dmabuf. It
14300 * checks to see whether such pending context exists at upper level protocol.
14301 * If so, it shall clean up the pending context.
14302 *
14303 * Return
14304 * true -- if there is matching pending context of the sequence cleaned
14305 * at ulp;
14306 * false -- if there is no matching pending context of the sequence present
14307 * at ulp.
14308 **/
14309static bool
14310lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14311{
14312 struct lpfc_hba *phba = vport->phba;
14313 int handled;
14314
14315 /* Accepting abort at ulp with SLI4 only */
14316 if (phba->sli_rev < LPFC_SLI_REV4)
14317 return false;
14318
14319 /* Register all caring upper level protocols to attend abort */
14320 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14321 if (handled)
14322 return true;
14323
14324 return false;
14325}
14326
6669f9bb 14327/**
546fc854 14328 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
14329 * @phba: Pointer to HBA context object.
14330 * @cmd_iocbq: pointer to the command iocbq structure.
14331 * @rsp_iocbq: pointer to the response iocbq structure.
14332 *
546fc854 14333 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
14334 * event. It properly releases the memory allocated to the sequence abort
14335 * accept iocb.
14336 **/
14337static void
546fc854 14338lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
14339 struct lpfc_iocbq *cmd_iocbq,
14340 struct lpfc_iocbq *rsp_iocbq)
14341{
6dd9e31c
JS
14342 struct lpfc_nodelist *ndlp;
14343
14344 if (cmd_iocbq) {
14345 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14346 lpfc_nlp_put(ndlp);
14347 lpfc_nlp_not_used(ndlp);
6669f9bb 14348 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 14349 }
6b5151fd
JS
14350
14351 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14352 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14353 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14354 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
14355 rsp_iocbq->iocb.ulpStatus,
14356 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
14357}
14358
6d368e53
JS
14359/**
14360 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14361 * @phba: Pointer to HBA context object.
14362 * @xri: xri id in transaction.
14363 *
14364 * This function validates the xri maps to the known range of XRIs allocated an
14365 * used by the driver.
14366 **/
7851fe2c 14367uint16_t
6d368e53
JS
14368lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14369 uint16_t xri)
14370{
14371 int i;
14372
14373 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14374 if (xri == phba->sli4_hba.xri_ids[i])
14375 return i;
14376 }
14377 return NO_XRI;
14378}
14379
6669f9bb 14380/**
546fc854 14381 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
14382 * @phba: Pointer to HBA context object.
14383 * @fc_hdr: pointer to a FC frame header.
14384 *
546fc854 14385 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
14386 * event after aborting the sequence handling.
14387 **/
14388static void
6dd9e31c
JS
14389lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14390 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 14391{
6dd9e31c 14392 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
14393 struct lpfc_iocbq *ctiocb = NULL;
14394 struct lpfc_nodelist *ndlp;
ee0f4fe1 14395 uint16_t oxid, rxid, xri, lxri;
5ffc266e 14396 uint32_t sid, fctl;
6669f9bb 14397 IOCB_t *icmd;
546fc854 14398 int rc;
6669f9bb
JS
14399
14400 if (!lpfc_is_link_up(phba))
14401 return;
14402
14403 sid = sli4_sid_from_fc_hdr(fc_hdr);
14404 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 14405 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 14406
6dd9e31c 14407 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 14408 if (!ndlp) {
6dd9e31c
JS
14409 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14410 if (!ndlp) {
14411 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14412 "1268 Failed to allocate ndlp for "
14413 "oxid:x%x SID:x%x\n", oxid, sid);
14414 return;
14415 }
14416 lpfc_nlp_init(vport, ndlp, sid);
14417 /* Put ndlp onto pport node list */
14418 lpfc_enqueue_node(vport, ndlp);
14419 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14420 /* re-setup ndlp without removing from node list */
14421 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14422 if (!ndlp) {
14423 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14424 "3275 Failed to active ndlp found "
14425 "for oxid:x%x SID:x%x\n", oxid, sid);
14426 return;
14427 }
6669f9bb
JS
14428 }
14429
546fc854 14430 /* Allocate buffer for rsp iocb */
6669f9bb
JS
14431 ctiocb = lpfc_sli_get_iocbq(phba);
14432 if (!ctiocb)
14433 return;
14434
5ffc266e
JS
14435 /* Extract the F_CTL field from FC_HDR */
14436 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14437
6669f9bb 14438 icmd = &ctiocb->iocb;
6669f9bb 14439 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 14440 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
14441 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14442 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14443 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14444
14445 /* Fill in the rest of iocb fields */
14446 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14447 icmd->ulpBdeCount = 0;
14448 icmd->ulpLe = 1;
14449 icmd->ulpClass = CLASS3;
6d368e53 14450 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 14451 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 14452
6669f9bb
JS
14453 ctiocb->iocb_cmpl = NULL;
14454 ctiocb->vport = phba->pport;
546fc854 14455 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 14456 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
14457 ctiocb->sli4_xritag = NO_XRI;
14458
ee0f4fe1
JS
14459 if (fctl & FC_FC_EX_CTX)
14460 /* Exchange responder sent the abort so we
14461 * own the oxid.
14462 */
14463 xri = oxid;
14464 else
14465 xri = rxid;
14466 lxri = lpfc_sli4_xri_inrange(phba, xri);
14467 if (lxri != NO_XRI)
14468 lpfc_set_rrq_active(phba, ndlp, lxri,
14469 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
14470 /* For BA_ABTS from exchange responder, if the logical xri with
14471 * the oxid maps to the FCP XRI range, the port no longer has
14472 * that exchange context, send a BLS_RJT. Override the IOCB for
14473 * a BA_RJT.
14474 */
14475 if ((fctl & FC_FC_EX_CTX) &&
14476 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14477 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14478 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14479 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14480 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14481 }
14482
14483 /* If BA_ABTS failed to abort a partially assembled receive sequence,
14484 * the driver no longer has that exchange, send a BLS_RJT. Override
14485 * the IOCB for a BA_RJT.
546fc854 14486 */
6dd9e31c 14487 if (aborted == false) {
546fc854
JS
14488 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14489 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14490 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14491 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14492 }
6669f9bb 14493
5ffc266e
JS
14494 if (fctl & FC_FC_EX_CTX) {
14495 /* ABTS sent by responder to CT exchange, construction
14496 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14497 * field and RX_ID from ABTS for RX_ID field.
14498 */
546fc854 14499 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
14500 } else {
14501 /* ABTS sent by initiator to CT exchange, construction
14502 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 14503 * XRI_TAG field.
5ffc266e 14504 */
546fc854 14505 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 14506 }
f09c3acc 14507 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 14508 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 14509
546fc854 14510 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
14511 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14512 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14513 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
14514
14515 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14516 if (rc == IOCB_ERROR) {
6dd9e31c
JS
14517 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14518 "2925 Failed to issue CT ABTS RSP x%x on "
14519 "xri x%x, Data x%x\n",
14520 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14521 phba->link_state);
14522 lpfc_nlp_put(ndlp);
14523 ctiocb->context1 = NULL;
546fc854
JS
14524 lpfc_sli_release_iocbq(phba, ctiocb);
14525 }
6669f9bb
JS
14526}
14527
14528/**
14529 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14530 * @vport: Pointer to the vport on which this sequence was received
14531 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14532 *
14533 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14534 * receive sequence is only partially assembed by the driver, it shall abort
14535 * the partially assembled frames for the sequence. Otherwise, if the
14536 * unsolicited receive sequence has been completely assembled and passed to
14537 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14538 * unsolicited sequence has been aborted. After that, it will issue a basic
14539 * accept to accept the abort.
14540 **/
14541void
14542lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14543 struct hbq_dmabuf *dmabuf)
14544{
14545 struct lpfc_hba *phba = vport->phba;
14546 struct fc_frame_header fc_hdr;
5ffc266e 14547 uint32_t fctl;
6dd9e31c 14548 bool aborted;
6669f9bb 14549
6669f9bb
JS
14550 /* Make a copy of fc_hdr before the dmabuf being released */
14551 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 14552 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 14553
5ffc266e 14554 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
14555 /* ABTS by responder to exchange, no cleanup needed */
14556 aborted = true;
5ffc266e 14557 } else {
6dd9e31c
JS
14558 /* ABTS by initiator to exchange, need to do cleanup */
14559 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14560 if (aborted == false)
14561 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 14562 }
6dd9e31c
JS
14563 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14564
14565 /* Respond with BA_ACC or BA_RJT accordingly */
14566 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
14567}
14568
4f774513
JS
14569/**
14570 * lpfc_seq_complete - Indicates if a sequence is complete
14571 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14572 *
14573 * This function checks the sequence, starting with the frame described by
14574 * @dmabuf, to see if all the frames associated with this sequence are present.
14575 * the frames associated with this sequence are linked to the @dmabuf using the
14576 * dbuf list. This function looks for two major things. 1) That the first frame
14577 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14578 * set. 3) That there are no holes in the sequence count. The function will
14579 * return 1 when the sequence is complete, otherwise it will return 0.
14580 **/
14581static int
14582lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14583{
14584 struct fc_frame_header *hdr;
14585 struct lpfc_dmabuf *d_buf;
14586 struct hbq_dmabuf *seq_dmabuf;
14587 uint32_t fctl;
14588 int seq_count = 0;
14589
14590 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14591 /* make sure first fame of sequence has a sequence count of zero */
14592 if (hdr->fh_seq_cnt != seq_count)
14593 return 0;
14594 fctl = (hdr->fh_f_ctl[0] << 16 |
14595 hdr->fh_f_ctl[1] << 8 |
14596 hdr->fh_f_ctl[2]);
14597 /* If last frame of sequence we can return success. */
14598 if (fctl & FC_FC_END_SEQ)
14599 return 1;
14600 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14601 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14602 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14603 /* If there is a hole in the sequence count then fail. */
eeead811 14604 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
14605 return 0;
14606 fctl = (hdr->fh_f_ctl[0] << 16 |
14607 hdr->fh_f_ctl[1] << 8 |
14608 hdr->fh_f_ctl[2]);
14609 /* If last frame of sequence we can return success. */
14610 if (fctl & FC_FC_END_SEQ)
14611 return 1;
14612 }
14613 return 0;
14614}
14615
14616/**
14617 * lpfc_prep_seq - Prep sequence for ULP processing
14618 * @vport: Pointer to the vport on which this sequence was received
14619 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14620 *
14621 * This function takes a sequence, described by a list of frames, and creates
14622 * a list of iocbq structures to describe the sequence. This iocbq list will be
14623 * used to issue to the generic unsolicited sequence handler. This routine
14624 * returns a pointer to the first iocbq in the list. If the function is unable
14625 * to allocate an iocbq then it throw out the received frames that were not
14626 * able to be described and return a pointer to the first iocbq. If unable to
14627 * allocate any iocbqs (including the first) this function will return NULL.
14628 **/
14629static struct lpfc_iocbq *
14630lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14631{
7851fe2c 14632 struct hbq_dmabuf *hbq_buf;
4f774513
JS
14633 struct lpfc_dmabuf *d_buf, *n_buf;
14634 struct lpfc_iocbq *first_iocbq, *iocbq;
14635 struct fc_frame_header *fc_hdr;
14636 uint32_t sid;
7851fe2c 14637 uint32_t len, tot_len;
eeead811 14638 struct ulp_bde64 *pbde;
4f774513
JS
14639
14640 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14641 /* remove from receive buffer list */
14642 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 14643 lpfc_update_rcv_time_stamp(vport);
4f774513 14644 /* get the Remote Port's SID */
6669f9bb 14645 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 14646 tot_len = 0;
4f774513
JS
14647 /* Get an iocbq struct to fill in. */
14648 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14649 if (first_iocbq) {
14650 /* Initialize the first IOCB. */
8fa38513 14651 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 14652 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
939723a4
JS
14653
14654 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14655 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14656 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14657 first_iocbq->iocb.un.rcvels.parmRo =
14658 sli4_did_from_fc_hdr(fc_hdr);
14659 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14660 } else
14661 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
14662 first_iocbq->iocb.ulpContext = NO_XRI;
14663 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14664 be16_to_cpu(fc_hdr->fh_ox_id);
14665 /* iocbq is prepped for internal consumption. Physical vpi. */
14666 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14667 vport->phba->vpi_ids[vport->vpi];
4f774513
JS
14668 /* put the first buffer into the first IOCBq */
14669 first_iocbq->context2 = &seq_dmabuf->dbuf;
14670 first_iocbq->context3 = NULL;
14671 first_iocbq->iocb.ulpBdeCount = 1;
14672 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14673 LPFC_DATA_BUF_SIZE;
14674 first_iocbq->iocb.un.rcvels.remoteID = sid;
7851fe2c 14675 tot_len = bf_get(lpfc_rcqe_length,
4d9ab994 14676 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7851fe2c 14677 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
14678 }
14679 iocbq = first_iocbq;
14680 /*
14681 * Each IOCBq can have two Buffers assigned, so go through the list
14682 * of buffers for this sequence and save two buffers in each IOCBq
14683 */
14684 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14685 if (!iocbq) {
14686 lpfc_in_buf_free(vport->phba, d_buf);
14687 continue;
14688 }
14689 if (!iocbq->context3) {
14690 iocbq->context3 = d_buf;
14691 iocbq->iocb.ulpBdeCount++;
eeead811
JS
14692 pbde = (struct ulp_bde64 *)
14693 &iocbq->iocb.unsli3.sli3Words[4];
14694 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14695
14696 /* We need to get the size out of the right CQE */
14697 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14698 len = bf_get(lpfc_rcqe_length,
14699 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14700 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14701 tot_len += len;
4f774513
JS
14702 } else {
14703 iocbq = lpfc_sli_get_iocbq(vport->phba);
14704 if (!iocbq) {
14705 if (first_iocbq) {
14706 first_iocbq->iocb.ulpStatus =
14707 IOSTAT_FCP_RSP_ERROR;
14708 first_iocbq->iocb.un.ulpWord[4] =
14709 IOERR_NO_RESOURCES;
14710 }
14711 lpfc_in_buf_free(vport->phba, d_buf);
14712 continue;
14713 }
14714 iocbq->context2 = d_buf;
14715 iocbq->context3 = NULL;
14716 iocbq->iocb.ulpBdeCount = 1;
14717 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14718 LPFC_DATA_BUF_SIZE;
7851fe2c
JS
14719
14720 /* We need to get the size out of the right CQE */
14721 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14722 len = bf_get(lpfc_rcqe_length,
14723 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14724 tot_len += len;
14725 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14726
4f774513
JS
14727 iocbq->iocb.un.rcvels.remoteID = sid;
14728 list_add_tail(&iocbq->list, &first_iocbq->list);
14729 }
14730 }
14731 return first_iocbq;
14732}
14733
6669f9bb
JS
14734static void
14735lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14736 struct hbq_dmabuf *seq_dmabuf)
14737{
14738 struct fc_frame_header *fc_hdr;
14739 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14740 struct lpfc_hba *phba = vport->phba;
14741
14742 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14743 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14744 if (!iocbq) {
14745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14746 "2707 Ring %d handler: Failed to allocate "
14747 "iocb Rctl x%x Type x%x received\n",
14748 LPFC_ELS_RING,
14749 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14750 return;
14751 }
14752 if (!lpfc_complete_unsol_iocb(phba,
14753 &phba->sli.ring[LPFC_ELS_RING],
14754 iocbq, fc_hdr->fh_r_ctl,
14755 fc_hdr->fh_type))
6d368e53 14756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
14757 "2540 Ring %d handler: unexpected Rctl "
14758 "x%x Type x%x received\n",
14759 LPFC_ELS_RING,
14760 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14761
14762 /* Free iocb created in lpfc_prep_seq */
14763 list_for_each_entry_safe(curr_iocb, next_iocb,
14764 &iocbq->list, list) {
14765 list_del_init(&curr_iocb->list);
14766 lpfc_sli_release_iocbq(phba, curr_iocb);
14767 }
14768 lpfc_sli_release_iocbq(phba, iocbq);
14769}
14770
4f774513
JS
14771/**
14772 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14773 * @phba: Pointer to HBA context object.
14774 *
14775 * This function is called with no lock held. This function processes all
14776 * the received buffers and gives it to upper layers when a received buffer
14777 * indicates that it is the final frame in the sequence. The interrupt
14778 * service routine processes received buffers at interrupt contexts and adds
14779 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14780 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14781 * appropriate receive function when the final frame in a sequence is received.
14782 **/
4d9ab994
JS
14783void
14784lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14785 struct hbq_dmabuf *dmabuf)
4f774513 14786{
4d9ab994 14787 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
14788 struct fc_frame_header *fc_hdr;
14789 struct lpfc_vport *vport;
14790 uint32_t fcfi;
939723a4 14791 uint32_t did;
4f774513 14792
4f774513 14793 /* Process each received buffer */
4d9ab994
JS
14794 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14795 /* check to see if this a valid type of frame */
14796 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14797 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14798 return;
14799 }
7851fe2c
JS
14800 if ((bf_get(lpfc_cqe_code,
14801 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14802 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14803 &dmabuf->cq_event.cqe.rcqe_cmpl);
14804 else
14805 fcfi = bf_get(lpfc_rcqe_fcf_id,
14806 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 14807
4d9ab994 14808 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
939723a4 14809 if (!vport) {
4d9ab994
JS
14810 /* throw out the frame */
14811 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14812 return;
14813 }
939723a4
JS
14814
14815 /* d_id this frame is directed to */
14816 did = sli4_did_from_fc_hdr(fc_hdr);
14817
14818 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14819 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14820 (did != Fabric_DID)) {
14821 /*
14822 * Throw out the frame if we are not pt2pt.
14823 * The pt2pt protocol allows for discovery frames
14824 * to be received without a registered VPI.
14825 */
14826 if (!(vport->fc_flag & FC_PT2PT) ||
14827 (phba->link_state == LPFC_HBA_READY)) {
14828 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14829 return;
14830 }
14831 }
14832
6669f9bb
JS
14833 /* Handle the basic abort sequence (BA_ABTS) event */
14834 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14835 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14836 return;
14837 }
14838
4d9ab994
JS
14839 /* Link this frame */
14840 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14841 if (!seq_dmabuf) {
14842 /* unable to add frame to vport - throw it out */
14843 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14844 return;
14845 }
14846 /* If not last frame in sequence continue processing frames. */
def9c7a9 14847 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 14848 return;
def9c7a9 14849
6669f9bb
JS
14850 /* Send the complete sequence to the upper layer protocol */
14851 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 14852}
6fb120a7
JS
14853
14854/**
14855 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14856 * @phba: pointer to lpfc hba data structure.
14857 *
14858 * This routine is invoked to post rpi header templates to the
14859 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14860 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14861 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14862 *
14863 * This routine does not require any locks. It's usage is expected
14864 * to be driver load or reset recovery when the driver is
14865 * sequential.
14866 *
14867 * Return codes
af901ca1 14868 * 0 - successful
d439d286 14869 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14870 * When this error occurs, the driver is not guaranteed
14871 * to have any rpi regions posted to the device and
14872 * must either attempt to repost the regions or take a
14873 * fatal error.
14874 **/
14875int
14876lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14877{
14878 struct lpfc_rpi_hdr *rpi_page;
14879 uint32_t rc = 0;
6d368e53
JS
14880 uint16_t lrpi = 0;
14881
14882 /* SLI4 ports that support extents do not require RPI headers. */
14883 if (!phba->sli4_hba.rpi_hdrs_in_use)
14884 goto exit;
14885 if (phba->sli4_hba.extents_in_use)
14886 return -EIO;
6fb120a7 14887
6fb120a7 14888 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
14889 /*
14890 * Assign the rpi headers a physical rpi only if the driver
14891 * has not initialized those resources. A port reset only
14892 * needs the headers posted.
14893 */
14894 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14895 LPFC_RPI_RSRC_RDY)
14896 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14897
6fb120a7
JS
14898 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14899 if (rc != MBX_SUCCESS) {
14900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14901 "2008 Error %d posting all rpi "
14902 "headers\n", rc);
14903 rc = -EIO;
14904 break;
14905 }
14906 }
14907
6d368e53
JS
14908 exit:
14909 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14910 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
14911 return rc;
14912}
14913
14914/**
14915 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14916 * @phba: pointer to lpfc hba data structure.
14917 * @rpi_page: pointer to the rpi memory region.
14918 *
14919 * This routine is invoked to post a single rpi header to the
14920 * HBA consistent with the SLI-4 interface spec. This memory region
14921 * maps up to 64 rpi context regions.
14922 *
14923 * Return codes
af901ca1 14924 * 0 - successful
d439d286
JS
14925 * -ENOMEM - No available memory
14926 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
14927 **/
14928int
14929lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14930{
14931 LPFC_MBOXQ_t *mboxq;
14932 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14933 uint32_t rc = 0;
6fb120a7
JS
14934 uint32_t shdr_status, shdr_add_status;
14935 union lpfc_sli4_cfg_shdr *shdr;
14936
6d368e53
JS
14937 /* SLI4 ports that support extents do not require RPI headers. */
14938 if (!phba->sli4_hba.rpi_hdrs_in_use)
14939 return rc;
14940 if (phba->sli4_hba.extents_in_use)
14941 return -EIO;
14942
6fb120a7
JS
14943 /* The port is notified of the header region via a mailbox command. */
14944 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14945 if (!mboxq) {
14946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14947 "2001 Unable to allocate memory for issuing "
14948 "SLI_CONFIG_SPECIAL mailbox command\n");
14949 return -ENOMEM;
14950 }
14951
14952 /* Post all rpi memory regions to the port. */
14953 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
14954 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14955 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14956 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
14957 sizeof(struct lpfc_sli4_cfg_mhdr),
14958 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
14959
14960
14961 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
14962 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14963 rpi_page->start_rpi);
6d368e53
JS
14964 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14965 hdr_tmpl, rpi_page->page_count);
14966
6fb120a7
JS
14967 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14968 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 14969 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
14970 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14971 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14972 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14973 if (rc != MBX_TIMEOUT)
14974 mempool_free(mboxq, phba->mbox_mem_pool);
14975 if (shdr_status || shdr_add_status || rc) {
14976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14977 "2514 POST_RPI_HDR mailbox failed with "
14978 "status x%x add_status x%x, mbx status x%x\n",
14979 shdr_status, shdr_add_status, rc);
14980 rc = -ENXIO;
14981 }
14982 return rc;
14983}
14984
14985/**
14986 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14987 * @phba: pointer to lpfc hba data structure.
14988 *
14989 * This routine is invoked to post rpi header templates to the
14990 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
14991 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14992 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
14993 *
14994 * Returns
af901ca1 14995 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
14996 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14997 **/
14998int
14999lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15000{
6d368e53
JS
15001 unsigned long rpi;
15002 uint16_t max_rpi, rpi_limit;
15003 uint16_t rpi_remaining, lrpi = 0;
6fb120a7
JS
15004 struct lpfc_rpi_hdr *rpi_hdr;
15005
15006 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6fb120a7
JS
15007 rpi_limit = phba->sli4_hba.next_rpi;
15008
15009 /*
6d368e53
JS
15010 * Fetch the next logical rpi. Because this index is logical,
15011 * the driver starts at 0 each time.
6fb120a7
JS
15012 */
15013 spin_lock_irq(&phba->hbalock);
6d368e53
JS
15014 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15015 if (rpi >= rpi_limit)
6fb120a7
JS
15016 rpi = LPFC_RPI_ALLOC_ERROR;
15017 else {
15018 set_bit(rpi, phba->sli4_hba.rpi_bmask);
15019 phba->sli4_hba.max_cfg_param.rpi_used++;
15020 phba->sli4_hba.rpi_count++;
15021 }
15022
15023 /*
15024 * Don't try to allocate more rpi header regions if the device limit
6d368e53 15025 * has been exhausted.
6fb120a7
JS
15026 */
15027 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15028 (phba->sli4_hba.rpi_count >= max_rpi)) {
15029 spin_unlock_irq(&phba->hbalock);
15030 return rpi;
15031 }
15032
6d368e53
JS
15033 /*
15034 * RPI header postings are not required for SLI4 ports capable of
15035 * extents.
15036 */
15037 if (!phba->sli4_hba.rpi_hdrs_in_use) {
15038 spin_unlock_irq(&phba->hbalock);
15039 return rpi;
15040 }
15041
6fb120a7
JS
15042 /*
15043 * If the driver is running low on rpi resources, allocate another
15044 * page now. Note that the next_rpi value is used because
15045 * it represents how many are actually in use whereas max_rpi notes
15046 * how many are supported max by the device.
15047 */
6d368e53 15048 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
6fb120a7
JS
15049 spin_unlock_irq(&phba->hbalock);
15050 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15051 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15052 if (!rpi_hdr) {
15053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15054 "2002 Error Could not grow rpi "
15055 "count\n");
15056 } else {
6d368e53
JS
15057 lrpi = rpi_hdr->start_rpi;
15058 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
15059 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15060 }
15061 }
15062
15063 return rpi;
15064}
15065
d7c47992
JS
15066/**
15067 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15068 * @phba: pointer to lpfc hba data structure.
15069 *
15070 * This routine is invoked to release an rpi to the pool of
15071 * available rpis maintained by the driver.
15072 **/
15073void
15074__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15075{
15076 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15077 phba->sli4_hba.rpi_count--;
15078 phba->sli4_hba.max_cfg_param.rpi_used--;
15079 }
15080}
15081
6fb120a7
JS
15082/**
15083 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15084 * @phba: pointer to lpfc hba data structure.
15085 *
15086 * This routine is invoked to release an rpi to the pool of
15087 * available rpis maintained by the driver.
15088 **/
15089void
15090lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15091{
15092 spin_lock_irq(&phba->hbalock);
d7c47992 15093 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
15094 spin_unlock_irq(&phba->hbalock);
15095}
15096
15097/**
15098 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15099 * @phba: pointer to lpfc hba data structure.
15100 *
15101 * This routine is invoked to remove the memory region that
15102 * provided rpi via a bitmask.
15103 **/
15104void
15105lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15106{
15107 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
15108 kfree(phba->sli4_hba.rpi_ids);
15109 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
15110}
15111
15112/**
15113 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15114 * @phba: pointer to lpfc hba data structure.
15115 *
15116 * This routine is invoked to remove the memory region that
15117 * provided rpi via a bitmask.
15118 **/
15119int
6b5151fd
JS
15120lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
15121 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
15122{
15123 LPFC_MBOXQ_t *mboxq;
15124 struct lpfc_hba *phba = ndlp->phba;
15125 int rc;
15126
15127 /* The port is notified of the header region via a mailbox command. */
15128 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15129 if (!mboxq)
15130 return -ENOMEM;
15131
15132 /* Post all rpi memory regions to the port. */
15133 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
15134 if (cmpl) {
15135 mboxq->mbox_cmpl = cmpl;
15136 mboxq->context1 = arg;
15137 mboxq->context2 = ndlp;
72859909
JS
15138 } else
15139 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 15140 mboxq->vport = ndlp->vport;
6fb120a7
JS
15141 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15142 if (rc == MBX_NOT_FINISHED) {
15143 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15144 "2010 Resume RPI Mailbox failed "
15145 "status %d, mbxStatus x%x\n", rc,
15146 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15147 mempool_free(mboxq, phba->mbox_mem_pool);
15148 return -EIO;
15149 }
15150 return 0;
15151}
15152
15153/**
15154 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 15155 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 15156 *
76a95d75 15157 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
15158 *
15159 * Returns:
15160 * 0 success
15161 * -Evalue otherwise
15162 **/
15163int
76a95d75 15164lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
15165{
15166 LPFC_MBOXQ_t *mboxq;
15167 int rc = 0;
6a9c52cf 15168 int retval = MBX_SUCCESS;
6fb120a7 15169 uint32_t mbox_tmo;
76a95d75 15170 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
15171 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15172 if (!mboxq)
15173 return -ENOMEM;
76a95d75 15174 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 15175 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 15176 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 15177 if (rc != MBX_SUCCESS) {
76a95d75 15178 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
15179 "2022 INIT VPI Mailbox failed "
15180 "status %d, mbxStatus x%x\n", rc,
15181 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 15182 retval = -EIO;
6fb120a7 15183 }
6a9c52cf 15184 if (rc != MBX_TIMEOUT)
76a95d75 15185 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
15186
15187 return retval;
6fb120a7
JS
15188}
15189
15190/**
15191 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15192 * @phba: pointer to lpfc hba data structure.
15193 * @mboxq: Pointer to mailbox object.
15194 *
15195 * This routine is invoked to manually add a single FCF record. The caller
15196 * must pass a completely initialized FCF_Record. This routine takes
15197 * care of the nonembedded mailbox operations.
15198 **/
15199static void
15200lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
15201{
15202 void *virt_addr;
15203 union lpfc_sli4_cfg_shdr *shdr;
15204 uint32_t shdr_status, shdr_add_status;
15205
15206 virt_addr = mboxq->sge_array->addr[0];
15207 /* The IOCTL status is embedded in the mailbox subheader. */
15208 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
15209 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15210 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15211
15212 if ((shdr_status || shdr_add_status) &&
15213 (shdr_status != STATUS_FCF_IN_USE))
15214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15215 "2558 ADD_FCF_RECORD mailbox failed with "
15216 "status x%x add_status x%x\n",
15217 shdr_status, shdr_add_status);
15218
15219 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15220}
15221
15222/**
15223 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15224 * @phba: pointer to lpfc hba data structure.
15225 * @fcf_record: pointer to the initialized fcf record to add.
15226 *
15227 * This routine is invoked to manually add a single FCF record. The caller
15228 * must pass a completely initialized FCF_Record. This routine takes
15229 * care of the nonembedded mailbox operations.
15230 **/
15231int
15232lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
15233{
15234 int rc = 0;
15235 LPFC_MBOXQ_t *mboxq;
15236 uint8_t *bytep;
15237 void *virt_addr;
15238 dma_addr_t phys_addr;
15239 struct lpfc_mbx_sge sge;
15240 uint32_t alloc_len, req_len;
15241 uint32_t fcfindex;
15242
15243 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15244 if (!mboxq) {
15245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15246 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15247 return -ENOMEM;
15248 }
15249
15250 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
15251 sizeof(uint32_t);
15252
15253 /* Allocate DMA memory and set up the non-embedded mailbox command */
15254 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15255 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
15256 req_len, LPFC_SLI4_MBX_NEMBED);
15257 if (alloc_len < req_len) {
15258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15259 "2523 Allocated DMA memory size (x%x) is "
15260 "less than the requested DMA memory "
15261 "size (x%x)\n", alloc_len, req_len);
15262 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15263 return -ENOMEM;
15264 }
15265
15266 /*
15267 * Get the first SGE entry from the non-embedded DMA memory. This
15268 * routine only uses a single SGE.
15269 */
15270 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
15271 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
15272 virt_addr = mboxq->sge_array->addr[0];
15273 /*
15274 * Configure the FCF record for FCFI 0. This is the driver's
15275 * hardcoded default and gets used in nonFIP mode.
15276 */
15277 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
15278 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
15279 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
15280
15281 /*
15282 * Copy the fcf_index and the FCF Record Data. The data starts after
15283 * the FCoE header plus word10. The data copy needs to be endian
15284 * correct.
15285 */
15286 bytep += sizeof(uint32_t);
15287 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
15288 mboxq->vport = phba->pport;
15289 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
15290 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15291 if (rc == MBX_NOT_FINISHED) {
15292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15293 "2515 ADD_FCF_RECORD mailbox failed with "
15294 "status 0x%x\n", rc);
15295 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15296 rc = -EIO;
15297 } else
15298 rc = 0;
15299
15300 return rc;
15301}
15302
15303/**
15304 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15305 * @phba: pointer to lpfc hba data structure.
15306 * @fcf_record: pointer to the fcf record to write the default data.
15307 * @fcf_index: FCF table entry index.
15308 *
15309 * This routine is invoked to build the driver's default FCF record. The
15310 * values used are hardcoded. This routine handles memory initialization.
15311 *
15312 **/
15313void
15314lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15315 struct fcf_record *fcf_record,
15316 uint16_t fcf_index)
15317{
15318 memset(fcf_record, 0, sizeof(struct fcf_record));
15319 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15320 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15321 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15322 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15323 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15324 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15325 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15326 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15327 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15328 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15329 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15330 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15331 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 15332 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
15333 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15334 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15335 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15336 /* Set the VLAN bit map */
15337 if (phba->valid_vlan) {
15338 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15339 = 1 << (phba->vlan_id % 8);
15340 }
15341}
15342
15343/**
0c9ab6f5 15344 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
15345 * @phba: pointer to lpfc hba data structure.
15346 * @fcf_index: FCF table entry offset.
15347 *
0c9ab6f5
JS
15348 * This routine is invoked to scan the entire FCF table by reading FCF
15349 * record and processing it one at a time starting from the @fcf_index
15350 * for initial FCF discovery or fast FCF failover rediscovery.
15351 *
25985edc 15352 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 15353 * otherwise.
6fb120a7
JS
15354 **/
15355int
0c9ab6f5 15356lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
15357{
15358 int rc = 0, error;
15359 LPFC_MBOXQ_t *mboxq;
6fb120a7 15360
32b9793f 15361 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 15362 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
15363 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15364 if (!mboxq) {
15365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15366 "2000 Failed to allocate mbox for "
15367 "READ_FCF cmd\n");
4d9ab994 15368 error = -ENOMEM;
0c9ab6f5 15369 goto fail_fcf_scan;
6fb120a7 15370 }
ecfd03c6 15371 /* Construct the read FCF record mailbox command */
0c9ab6f5 15372 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
15373 if (rc) {
15374 error = -EINVAL;
0c9ab6f5 15375 goto fail_fcf_scan;
6fb120a7 15376 }
ecfd03c6 15377 /* Issue the mailbox command asynchronously */
6fb120a7 15378 mboxq->vport = phba->pport;
0c9ab6f5 15379 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
15380
15381 spin_lock_irq(&phba->hbalock);
15382 phba->hba_flag |= FCF_TS_INPROG;
15383 spin_unlock_irq(&phba->hbalock);
15384
6fb120a7 15385 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 15386 if (rc == MBX_NOT_FINISHED)
6fb120a7 15387 error = -EIO;
ecfd03c6 15388 else {
38b92ef8
JS
15389 /* Reset eligible FCF count for new scan */
15390 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 15391 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 15392 error = 0;
32b9793f 15393 }
0c9ab6f5 15394fail_fcf_scan:
4d9ab994
JS
15395 if (error) {
15396 if (mboxq)
15397 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 15398 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 15399 spin_lock_irq(&phba->hbalock);
a93ff37a 15400 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
15401 spin_unlock_irq(&phba->hbalock);
15402 }
6fb120a7
JS
15403 return error;
15404}
a0c87cbd 15405
0c9ab6f5 15406/**
a93ff37a 15407 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
15408 * @phba: pointer to lpfc hba data structure.
15409 * @fcf_index: FCF table entry offset.
15410 *
15411 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 15412 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 15413 *
25985edc 15414 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15415 * otherwise.
15416 **/
15417int
15418lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15419{
15420 int rc = 0, error;
15421 LPFC_MBOXQ_t *mboxq;
15422
15423 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15424 if (!mboxq) {
15425 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15426 "2763 Failed to allocate mbox for "
15427 "READ_FCF cmd\n");
15428 error = -ENOMEM;
15429 goto fail_fcf_read;
15430 }
15431 /* Construct the read FCF record mailbox command */
15432 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15433 if (rc) {
15434 error = -EINVAL;
15435 goto fail_fcf_read;
15436 }
15437 /* Issue the mailbox command asynchronously */
15438 mboxq->vport = phba->pport;
15439 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15440 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15441 if (rc == MBX_NOT_FINISHED)
15442 error = -EIO;
15443 else
15444 error = 0;
15445
15446fail_fcf_read:
15447 if (error && mboxq)
15448 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15449 return error;
15450}
15451
15452/**
15453 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15454 * @phba: pointer to lpfc hba data structure.
15455 * @fcf_index: FCF table entry offset.
15456 *
15457 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 15458 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 15459 *
25985edc 15460 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
15461 * otherwise.
15462 **/
15463int
15464lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15465{
15466 int rc = 0, error;
15467 LPFC_MBOXQ_t *mboxq;
15468
15469 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15470 if (!mboxq) {
15471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15472 "2758 Failed to allocate mbox for "
15473 "READ_FCF cmd\n");
15474 error = -ENOMEM;
15475 goto fail_fcf_read;
15476 }
15477 /* Construct the read FCF record mailbox command */
15478 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15479 if (rc) {
15480 error = -EINVAL;
15481 goto fail_fcf_read;
15482 }
15483 /* Issue the mailbox command asynchronously */
15484 mboxq->vport = phba->pport;
15485 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15486 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15487 if (rc == MBX_NOT_FINISHED)
15488 error = -EIO;
15489 else
15490 error = 0;
15491
15492fail_fcf_read:
15493 if (error && mboxq)
15494 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15495 return error;
15496}
15497
7d791df7
JS
15498/**
15499 * lpfc_check_next_fcf_pri
15500 * phba pointer to the lpfc_hba struct for this port.
15501 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15502 * routine when the rr_bmask is empty. The FCF indecies are put into the
15503 * rr_bmask based on their priority level. Starting from the highest priority
15504 * to the lowest. The most likely FCF candidate will be in the highest
15505 * priority group. When this routine is called it searches the fcf_pri list for
15506 * next lowest priority group and repopulates the rr_bmask with only those
15507 * fcf_indexes.
15508 * returns:
15509 * 1=success 0=failure
15510 **/
15511int
15512lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15513{
15514 uint16_t next_fcf_pri;
15515 uint16_t last_index;
15516 struct lpfc_fcf_pri *fcf_pri;
15517 int rc;
15518 int ret = 0;
15519
15520 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15521 LPFC_SLI4_FCF_TBL_INDX_MAX);
15522 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15523 "3060 Last IDX %d\n", last_index);
2562669c
JS
15524
15525 /* Verify the priority list has 2 or more entries */
15526 spin_lock_irq(&phba->hbalock);
15527 if (list_empty(&phba->fcf.fcf_pri_list) ||
15528 list_is_singular(&phba->fcf.fcf_pri_list)) {
15529 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
15530 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15531 "3061 Last IDX %d\n", last_index);
15532 return 0; /* Empty rr list */
15533 }
2562669c
JS
15534 spin_unlock_irq(&phba->hbalock);
15535
7d791df7
JS
15536 next_fcf_pri = 0;
15537 /*
15538 * Clear the rr_bmask and set all of the bits that are at this
15539 * priority.
15540 */
15541 memset(phba->fcf.fcf_rr_bmask, 0,
15542 sizeof(*phba->fcf.fcf_rr_bmask));
15543 spin_lock_irq(&phba->hbalock);
15544 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15545 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15546 continue;
15547 /*
15548 * the 1st priority that has not FLOGI failed
15549 * will be the highest.
15550 */
15551 if (!next_fcf_pri)
15552 next_fcf_pri = fcf_pri->fcf_rec.priority;
15553 spin_unlock_irq(&phba->hbalock);
15554 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15555 rc = lpfc_sli4_fcf_rr_index_set(phba,
15556 fcf_pri->fcf_rec.fcf_index);
15557 if (rc)
15558 return 0;
15559 }
15560 spin_lock_irq(&phba->hbalock);
15561 }
15562 /*
15563 * if next_fcf_pri was not set above and the list is not empty then
15564 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 15565 * and start at the beginning.
7d791df7
JS
15566 */
15567 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15568 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15569 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15570 /*
15571 * the 1st priority that has not FLOGI failed
15572 * will be the highest.
15573 */
15574 if (!next_fcf_pri)
15575 next_fcf_pri = fcf_pri->fcf_rec.priority;
15576 spin_unlock_irq(&phba->hbalock);
15577 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15578 rc = lpfc_sli4_fcf_rr_index_set(phba,
15579 fcf_pri->fcf_rec.fcf_index);
15580 if (rc)
15581 return 0;
15582 }
15583 spin_lock_irq(&phba->hbalock);
15584 }
15585 } else
15586 ret = 1;
15587 spin_unlock_irq(&phba->hbalock);
15588
15589 return ret;
15590}
0c9ab6f5
JS
15591/**
15592 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15593 * @phba: pointer to lpfc hba data structure.
15594 *
15595 * This routine is to get the next eligible FCF record index in a round
15596 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 15597 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
15598 * shall be returned, otherwise, the next eligible FCF record's index
15599 * shall be returned.
15600 **/
15601uint16_t
15602lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15603{
15604 uint16_t next_fcf_index;
15605
421c6622 15606initial_priority:
3804dc84 15607 /* Search start from next bit of currently registered FCF index */
421c6622
JS
15608 next_fcf_index = phba->fcf.current_rec.fcf_indx;
15609
7d791df7 15610next_priority:
421c6622
JS
15611 /* Determine the next fcf index to check */
15612 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
15613 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15614 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
15615 next_fcf_index);
15616
0c9ab6f5 15617 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
15618 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15619 /*
15620 * If we have wrapped then we need to clear the bits that
15621 * have been tested so that we can detect when we should
15622 * change the priority level.
15623 */
0c9ab6f5
JS
15624 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15625 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
15626 }
15627
3804dc84
JS
15628
15629 /* Check roundrobin failover list empty condition */
7d791df7
JS
15630 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15631 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15632 /*
15633 * If next fcf index is not found check if there are lower
15634 * Priority level fcf's in the fcf_priority list.
15635 * Set up the rr_bmask with all of the avaiable fcf bits
15636 * at that level and continue the selection process.
15637 */
15638 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 15639 goto initial_priority;
3804dc84
JS
15640 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15641 "2844 No roundrobin failover FCF available\n");
7d791df7
JS
15642 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15643 return LPFC_FCOE_FCF_NEXT_NONE;
15644 else {
15645 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15646 "3063 Only FCF available idx %d, flag %x\n",
15647 next_fcf_index,
15648 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15649 return next_fcf_index;
15650 }
3804dc84
JS
15651 }
15652
7d791df7
JS
15653 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15654 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15655 LPFC_FCF_FLOGI_FAILED)
15656 goto next_priority;
15657
3804dc84 15658 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
15659 "2845 Get next roundrobin failover FCF (x%x)\n",
15660 next_fcf_index);
15661
0c9ab6f5
JS
15662 return next_fcf_index;
15663}
15664
15665/**
15666 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15667 * @phba: pointer to lpfc hba data structure.
15668 *
15669 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 15670 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15671 * does not go beyond the range of the driver allocated bmask dimension
15672 * before setting the bit.
15673 *
15674 * Returns 0 if the index bit successfully set, otherwise, it returns
15675 * -EINVAL.
15676 **/
15677int
15678lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15679{
15680 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15681 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15682 "2610 FCF (x%x) reached driver's book "
15683 "keeping dimension:x%x\n",
0c9ab6f5
JS
15684 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15685 return -EINVAL;
15686 }
15687 /* Set the eligible FCF record index bmask */
15688 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15689
3804dc84 15690 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15691 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
15692 "bmask\n", fcf_index);
15693
0c9ab6f5
JS
15694 return 0;
15695}
15696
15697/**
3804dc84 15698 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
15699 * @phba: pointer to lpfc hba data structure.
15700 *
15701 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 15702 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
15703 * does not go beyond the range of the driver allocated bmask dimension
15704 * before clearing the bit.
15705 **/
15706void
15707lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15708{
7d791df7 15709 struct lpfc_fcf_pri *fcf_pri;
0c9ab6f5
JS
15710 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15711 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
15712 "2762 FCF (x%x) reached driver's book "
15713 "keeping dimension:x%x\n",
0c9ab6f5
JS
15714 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15715 return;
15716 }
15717 /* Clear the eligible FCF record index bmask */
7d791df7
JS
15718 spin_lock_irq(&phba->hbalock);
15719 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15720 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15721 list_del_init(&fcf_pri->list);
15722 break;
15723 }
15724 }
15725 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 15726 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
15727
15728 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15729 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 15730 "bmask\n", fcf_index);
0c9ab6f5
JS
15731}
15732
ecfd03c6
JS
15733/**
15734 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15735 * @phba: pointer to lpfc hba data structure.
15736 *
15737 * This routine is the completion routine for the rediscover FCF table mailbox
15738 * command. If the mailbox command returned failure, it will try to stop the
15739 * FCF rediscover wait timer.
15740 **/
15741void
15742lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15743{
15744 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15745 uint32_t shdr_status, shdr_add_status;
15746
15747 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15748
15749 shdr_status = bf_get(lpfc_mbox_hdr_status,
15750 &redisc_fcf->header.cfg_shdr.response);
15751 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15752 &redisc_fcf->header.cfg_shdr.response);
15753 if (shdr_status || shdr_add_status) {
0c9ab6f5 15754 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
15755 "2746 Requesting for FCF rediscovery failed "
15756 "status x%x add_status x%x\n",
15757 shdr_status, shdr_add_status);
0c9ab6f5 15758 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 15759 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15760 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
15761 spin_unlock_irq(&phba->hbalock);
15762 /*
15763 * CVL event triggered FCF rediscover request failed,
15764 * last resort to re-try current registered FCF entry.
15765 */
15766 lpfc_retry_pport_discovery(phba);
15767 } else {
15768 spin_lock_irq(&phba->hbalock);
0c9ab6f5 15769 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
15770 spin_unlock_irq(&phba->hbalock);
15771 /*
15772 * DEAD FCF event triggered FCF rediscover request
15773 * failed, last resort to fail over as a link down
15774 * to FCF registration.
15775 */
15776 lpfc_sli4_fcf_dead_failthrough(phba);
15777 }
0c9ab6f5
JS
15778 } else {
15779 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 15780 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
15781 /*
15782 * Start FCF rediscovery wait timer for pending FCF
15783 * before rescan FCF record table.
15784 */
15785 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 15786 }
ecfd03c6
JS
15787
15788 mempool_free(mbox, phba->mbox_mem_pool);
15789}
15790
15791/**
3804dc84 15792 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
15793 * @phba: pointer to lpfc hba data structure.
15794 *
15795 * This routine is invoked to request for rediscovery of the entire FCF table
15796 * by the port.
15797 **/
15798int
15799lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15800{
15801 LPFC_MBOXQ_t *mbox;
15802 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15803 int rc, length;
15804
0c9ab6f5
JS
15805 /* Cancel retry delay timers to all vports before FCF rediscover */
15806 lpfc_cancel_all_vport_retry_delay_timer(phba);
15807
ecfd03c6
JS
15808 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15809 if (!mbox) {
15810 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15811 "2745 Failed to allocate mbox for "
15812 "requesting FCF rediscover.\n");
15813 return -ENOMEM;
15814 }
15815
15816 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15817 sizeof(struct lpfc_sli4_cfg_mhdr));
15818 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15819 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15820 length, LPFC_SLI4_MBX_EMBED);
15821
15822 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15823 /* Set count to 0 for invalidating the entire FCF database */
15824 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15825
15826 /* Issue the mailbox command asynchronously */
15827 mbox->vport = phba->pport;
15828 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15829 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15830
15831 if (rc == MBX_NOT_FINISHED) {
15832 mempool_free(mbox, phba->mbox_mem_pool);
15833 return -EIO;
15834 }
15835 return 0;
15836}
15837
fc2b989b
JS
15838/**
15839 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15840 * @phba: pointer to lpfc hba data structure.
15841 *
15842 * This function is the failover routine as a last resort to the FCF DEAD
15843 * event when driver failed to perform fast FCF failover.
15844 **/
15845void
15846lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15847{
15848 uint32_t link_state;
15849
15850 /*
15851 * Last resort as FCF DEAD event failover will treat this as
15852 * a link down, but save the link state because we don't want
15853 * it to be changed to Link Down unless it is already down.
15854 */
15855 link_state = phba->link_state;
15856 lpfc_linkdown(phba);
15857 phba->link_state = link_state;
15858
15859 /* Unregister FCF if no devices connected to it */
15860 lpfc_unregister_unused_fcf(phba);
15861}
15862
a0c87cbd 15863/**
026abb87 15864 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 15865 * @phba: pointer to lpfc hba data structure.
026abb87 15866 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 15867 *
026abb87
JS
15868 * This function gets SLI3 port configure region 23 data through memory dump
15869 * mailbox command. When it successfully retrieves data, the size of the data
15870 * will be returned, otherwise, 0 will be returned.
a0c87cbd 15871 **/
026abb87
JS
15872static uint32_t
15873lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
15874{
15875 LPFC_MBOXQ_t *pmb = NULL;
15876 MAILBOX_t *mb;
026abb87 15877 uint32_t offset = 0;
a0c87cbd
JS
15878 int rc;
15879
026abb87
JS
15880 if (!rgn23_data)
15881 return 0;
15882
a0c87cbd
JS
15883 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15884 if (!pmb) {
15885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
15886 "2600 failed to allocate mailbox memory\n");
15887 return 0;
a0c87cbd
JS
15888 }
15889 mb = &pmb->u.mb;
15890
a0c87cbd
JS
15891 do {
15892 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15893 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15894
15895 if (rc != MBX_SUCCESS) {
15896 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
15897 "2601 failed to read config "
15898 "region 23, rc 0x%x Status 0x%x\n",
15899 rc, mb->mbxStatus);
a0c87cbd
JS
15900 mb->un.varDmp.word_cnt = 0;
15901 }
15902 /*
15903 * dump mem may return a zero when finished or we got a
15904 * mailbox error, either way we are done.
15905 */
15906 if (mb->un.varDmp.word_cnt == 0)
15907 break;
15908 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15909 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15910
15911 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
15912 rgn23_data + offset,
15913 mb->un.varDmp.word_cnt);
a0c87cbd
JS
15914 offset += mb->un.varDmp.word_cnt;
15915 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15916
026abb87
JS
15917 mempool_free(pmb, phba->mbox_mem_pool);
15918 return offset;
15919}
15920
15921/**
15922 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15923 * @phba: pointer to lpfc hba data structure.
15924 * @rgn23_data: pointer to configure region 23 data.
15925 *
15926 * This function gets SLI4 port configure region 23 data through memory dump
15927 * mailbox command. When it successfully retrieves data, the size of the data
15928 * will be returned, otherwise, 0 will be returned.
15929 **/
15930static uint32_t
15931lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15932{
15933 LPFC_MBOXQ_t *mboxq = NULL;
15934 struct lpfc_dmabuf *mp = NULL;
15935 struct lpfc_mqe *mqe;
15936 uint32_t data_length = 0;
15937 int rc;
15938
15939 if (!rgn23_data)
15940 return 0;
15941
15942 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15943 if (!mboxq) {
15944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15945 "3105 failed to allocate mailbox memory\n");
15946 return 0;
15947 }
15948
15949 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15950 goto out;
15951 mqe = &mboxq->u.mqe;
15952 mp = (struct lpfc_dmabuf *) mboxq->context1;
15953 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15954 if (rc)
15955 goto out;
15956 data_length = mqe->un.mb_words[5];
15957 if (data_length == 0)
15958 goto out;
15959 if (data_length > DMP_RGN23_SIZE) {
15960 data_length = 0;
15961 goto out;
15962 }
15963 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15964out:
15965 mempool_free(mboxq, phba->mbox_mem_pool);
15966 if (mp) {
15967 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15968 kfree(mp);
15969 }
15970 return data_length;
15971}
15972
15973/**
15974 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15975 * @phba: pointer to lpfc hba data structure.
15976 *
15977 * This function read region 23 and parse TLV for port status to
15978 * decide if the user disaled the port. If the TLV indicates the
15979 * port is disabled, the hba_flag is set accordingly.
15980 **/
15981void
15982lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15983{
15984 uint8_t *rgn23_data = NULL;
15985 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15986 uint32_t offset = 0;
15987
15988 /* Get adapter Region 23 data */
15989 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15990 if (!rgn23_data)
15991 goto out;
15992
15993 if (phba->sli_rev < LPFC_SLI_REV4)
15994 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15995 else {
15996 if_type = bf_get(lpfc_sli_intf_if_type,
15997 &phba->sli4_hba.sli_intf);
15998 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15999 goto out;
16000 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16001 }
a0c87cbd
JS
16002
16003 if (!data_size)
16004 goto out;
16005
16006 /* Check the region signature first */
16007 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16009 "2619 Config region 23 has bad signature\n");
16010 goto out;
16011 }
16012 offset += 4;
16013
16014 /* Check the data structure version */
16015 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16017 "2620 Config region 23 has bad version\n");
16018 goto out;
16019 }
16020 offset += 4;
16021
16022 /* Parse TLV entries in the region */
16023 while (offset < data_size) {
16024 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16025 break;
16026 /*
16027 * If the TLV is not driver specific TLV or driver id is
16028 * not linux driver id, skip the record.
16029 */
16030 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16031 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16032 (rgn23_data[offset + 3] != 0)) {
16033 offset += rgn23_data[offset + 1] * 4 + 4;
16034 continue;
16035 }
16036
16037 /* Driver found a driver specific TLV in the config region */
16038 sub_tlv_len = rgn23_data[offset + 1] * 4;
16039 offset += 4;
16040 tlv_offset = 0;
16041
16042 /*
16043 * Search for configured port state sub-TLV.
16044 */
16045 while ((offset < data_size) &&
16046 (tlv_offset < sub_tlv_len)) {
16047 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16048 offset += 4;
16049 tlv_offset += 4;
16050 break;
16051 }
16052 if (rgn23_data[offset] != PORT_STE_TYPE) {
16053 offset += rgn23_data[offset + 1] * 4 + 4;
16054 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16055 continue;
16056 }
16057
16058 /* This HBA contains PORT_STE configured */
16059 if (!rgn23_data[offset + 2])
16060 phba->hba_flag |= LINK_DISABLED;
16061
16062 goto out;
16063 }
16064 }
026abb87 16065
a0c87cbd 16066out:
a0c87cbd
JS
16067 kfree(rgn23_data);
16068 return;
16069}
695a814e 16070
52d52440
JS
16071/**
16072 * lpfc_wr_object - write an object to the firmware
16073 * @phba: HBA structure that indicates port to create a queue on.
16074 * @dmabuf_list: list of dmabufs to write to the port.
16075 * @size: the total byte value of the objects to write to the port.
16076 * @offset: the current offset to be used to start the transfer.
16077 *
16078 * This routine will create a wr_object mailbox command to send to the port.
16079 * the mailbox command will be constructed using the dma buffers described in
16080 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16081 * BDEs that the imbedded mailbox can support. The @offset variable will be
16082 * used to indicate the starting offset of the transfer and will also return
16083 * the offset after the write object mailbox has completed. @size is used to
16084 * determine the end of the object and whether the eof bit should be set.
16085 *
16086 * Return 0 is successful and offset will contain the the new offset to use
16087 * for the next write.
16088 * Return negative value for error cases.
16089 **/
16090int
16091lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16092 uint32_t size, uint32_t *offset)
16093{
16094 struct lpfc_mbx_wr_object *wr_object;
16095 LPFC_MBOXQ_t *mbox;
16096 int rc = 0, i = 0;
16097 uint32_t shdr_status, shdr_add_status;
16098 uint32_t mbox_tmo;
16099 union lpfc_sli4_cfg_shdr *shdr;
16100 struct lpfc_dmabuf *dmabuf;
16101 uint32_t written = 0;
16102
16103 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16104 if (!mbox)
16105 return -ENOMEM;
16106
16107 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16108 LPFC_MBOX_OPCODE_WRITE_OBJECT,
16109 sizeof(struct lpfc_mbx_wr_object) -
16110 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16111
16112 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
16113 wr_object->u.request.write_offset = *offset;
16114 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
16115 wr_object->u.request.object_name[0] =
16116 cpu_to_le32(wr_object->u.request.object_name[0]);
16117 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
16118 list_for_each_entry(dmabuf, dmabuf_list, list) {
16119 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
16120 break;
16121 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
16122 wr_object->u.request.bde[i].addrHigh =
16123 putPaddrHigh(dmabuf->phys);
16124 if (written + SLI4_PAGE_SIZE >= size) {
16125 wr_object->u.request.bde[i].tus.f.bdeSize =
16126 (size - written);
16127 written += (size - written);
16128 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
16129 } else {
16130 wr_object->u.request.bde[i].tus.f.bdeSize =
16131 SLI4_PAGE_SIZE;
16132 written += SLI4_PAGE_SIZE;
16133 }
16134 i++;
16135 }
16136 wr_object->u.request.bde_count = i;
16137 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
16138 if (!phba->sli4_hba.intr_enable)
16139 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16140 else {
a183a15f 16141 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
16142 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16143 }
16144 /* The IOCTL status is embedded in the mailbox subheader. */
16145 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
16146 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16147 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16148 if (rc != MBX_TIMEOUT)
16149 mempool_free(mbox, phba->mbox_mem_pool);
16150 if (shdr_status || shdr_add_status || rc) {
16151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16152 "3025 Write Object mailbox failed with "
16153 "status x%x add_status x%x, mbx status x%x\n",
16154 shdr_status, shdr_add_status, rc);
16155 rc = -ENXIO;
16156 } else
16157 *offset += wr_object->u.response.actual_write_length;
16158 return rc;
16159}
16160
695a814e
JS
16161/**
16162 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16163 * @vport: pointer to vport data structure.
16164 *
16165 * This function iterate through the mailboxq and clean up all REG_LOGIN
16166 * and REG_VPI mailbox commands associated with the vport. This function
16167 * is called when driver want to restart discovery of the vport due to
16168 * a Clear Virtual Link event.
16169 **/
16170void
16171lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
16172{
16173 struct lpfc_hba *phba = vport->phba;
16174 LPFC_MBOXQ_t *mb, *nextmb;
16175 struct lpfc_dmabuf *mp;
78730cfe 16176 struct lpfc_nodelist *ndlp;
d439d286 16177 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 16178 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 16179 LIST_HEAD(mbox_cmd_list);
63e801ce 16180 uint8_t restart_loop;
695a814e 16181
d439d286 16182 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
16183 spin_lock_irq(&phba->hbalock);
16184 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
16185 if (mb->vport != vport)
16186 continue;
16187
16188 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16189 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16190 continue;
16191
d439d286
JS
16192 list_del(&mb->list);
16193 list_add_tail(&mb->list, &mbox_cmd_list);
16194 }
16195 /* Clean up active mailbox command with the vport */
16196 mb = phba->sli.mbox_active;
16197 if (mb && (mb->vport == vport)) {
16198 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
16199 (mb->u.mb.mbxCommand == MBX_REG_VPI))
16200 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16201 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16202 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
16203 /* Put reference count for delayed processing */
16204 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
16205 /* Unregister the RPI when mailbox complete */
16206 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16207 }
16208 }
63e801ce
JS
16209 /* Cleanup any mailbox completions which are not yet processed */
16210 do {
16211 restart_loop = 0;
16212 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
16213 /*
16214 * If this mailox is already processed or it is
16215 * for another vport ignore it.
16216 */
16217 if ((mb->vport != vport) ||
16218 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
16219 continue;
16220
16221 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16222 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16223 continue;
16224
16225 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16226 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16227 ndlp = (struct lpfc_nodelist *)mb->context2;
16228 /* Unregister the RPI when mailbox complete */
16229 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16230 restart_loop = 1;
16231 spin_unlock_irq(&phba->hbalock);
16232 spin_lock(shost->host_lock);
16233 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16234 spin_unlock(shost->host_lock);
16235 spin_lock_irq(&phba->hbalock);
16236 break;
16237 }
16238 }
16239 } while (restart_loop);
16240
d439d286
JS
16241 spin_unlock_irq(&phba->hbalock);
16242
16243 /* Release the cleaned-up mailbox commands */
16244 while (!list_empty(&mbox_cmd_list)) {
16245 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e
JS
16246 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16247 mp = (struct lpfc_dmabuf *) (mb->context1);
16248 if (mp) {
16249 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
16250 kfree(mp);
16251 }
78730cfe 16252 ndlp = (struct lpfc_nodelist *) mb->context2;
d439d286 16253 mb->context2 = NULL;
78730cfe 16254 if (ndlp) {
ec21b3b0 16255 spin_lock(shost->host_lock);
589a52d6 16256 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 16257 spin_unlock(shost->host_lock);
78730cfe 16258 lpfc_nlp_put(ndlp);
78730cfe 16259 }
695a814e 16260 }
695a814e
JS
16261 mempool_free(mb, phba->mbox_mem_pool);
16262 }
d439d286
JS
16263
16264 /* Release the ndlp with the cleaned-up active mailbox command */
16265 if (act_mbx_ndlp) {
16266 spin_lock(shost->host_lock);
16267 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16268 spin_unlock(shost->host_lock);
16269 lpfc_nlp_put(act_mbx_ndlp);
695a814e 16270 }
695a814e
JS
16271}
16272
2a9bf3d0
JS
16273/**
16274 * lpfc_drain_txq - Drain the txq
16275 * @phba: Pointer to HBA context object.
16276 *
16277 * This function attempt to submit IOCBs on the txq
16278 * to the adapter. For SLI4 adapters, the txq contains
16279 * ELS IOCBs that have been deferred because the there
16280 * are no SGLs. This congestion can occur with large
16281 * vport counts during node discovery.
16282 **/
16283
16284uint32_t
16285lpfc_drain_txq(struct lpfc_hba *phba)
16286{
16287 LIST_HEAD(completions);
16288 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
16289 struct lpfc_iocbq *piocbq = 0;
16290 unsigned long iflags = 0;
16291 char *fail_msg = NULL;
16292 struct lpfc_sglq *sglq;
16293 union lpfc_wqe wqe;
0e9bb8d7 16294 int txq_cnt = 0;
2a9bf3d0
JS
16295
16296 spin_lock_irqsave(&phba->hbalock, iflags);
0e9bb8d7
JS
16297 list_for_each_entry(piocbq, &pring->txq, list) {
16298 txq_cnt++;
16299 }
16300
16301 if (txq_cnt > pring->txq_max)
16302 pring->txq_max = txq_cnt;
2a9bf3d0
JS
16303
16304 spin_unlock_irqrestore(&phba->hbalock, iflags);
16305
0e9bb8d7 16306 while (!list_empty(&pring->txq)) {
2a9bf3d0
JS
16307 spin_lock_irqsave(&phba->hbalock, iflags);
16308
19ca7609 16309 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a
JS
16310 if (!piocbq) {
16311 spin_unlock_irqrestore(&phba->hbalock, iflags);
16312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16313 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 16314 txq_cnt);
a629852a
JS
16315 break;
16316 }
19ca7609 16317 sglq = __lpfc_sli_get_sglq(phba, piocbq);
2a9bf3d0 16318 if (!sglq) {
19ca7609 16319 __lpfc_sli_ringtx_put(phba, pring, piocbq);
2a9bf3d0
JS
16320 spin_unlock_irqrestore(&phba->hbalock, iflags);
16321 break;
2a9bf3d0 16322 }
0e9bb8d7 16323 txq_cnt--;
2a9bf3d0
JS
16324
16325 /* The xri and iocb resources secured,
16326 * attempt to issue request
16327 */
6d368e53 16328 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
16329 piocbq->sli4_xritag = sglq->sli4_xritag;
16330 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16331 fail_msg = "to convert bpl to sgl";
16332 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16333 fail_msg = "to convert iocb to wqe";
16334 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16335 fail_msg = " - Wq is full";
16336 else
16337 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16338
16339 if (fail_msg) {
16340 /* Failed means we can't issue and need to cancel */
16341 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16342 "2822 IOCB failed %s iotag 0x%x "
16343 "xri 0x%x\n",
16344 fail_msg,
16345 piocbq->iotag, piocbq->sli4_xritag);
16346 list_add_tail(&piocbq->list, &completions);
16347 }
16348 spin_unlock_irqrestore(&phba->hbalock, iflags);
16349 }
16350
2a9bf3d0
JS
16351 /* Cancel all the IOCBs that cannot be issued */
16352 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16353 IOERR_SLI_ABORTED);
16354
0e9bb8d7 16355 return txq_cnt;
2a9bf3d0 16356}