scsi: lpfc: Fix build error
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include <linux/nvme-fc-driver.h>
43
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                                   uint32_t);
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74                               uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76                                                          struct lpfc_iocbq *);
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78                                       struct hbq_dmabuf *);
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80                                           struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82                                    struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84                                        int);
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86                                      struct lpfc_queue *eq,
87                                      struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
91                                    struct lpfc_sli_ring *pring,
92                                    struct lpfc_iocbq *cmdiocb);
93
94 static IOCB_t *
95 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
96 {
97         return &iocbq->iocb;
98 }
99
100 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
101 /**
102  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
103  * @srcp: Source memory pointer.
104  * @destp: Destination memory pointer.
105  * @cnt: Number of words required to be copied.
106  *       Must be a multiple of sizeof(uint64_t)
107  *
108  * This function is used for copying data between driver memory
109  * and the SLI WQ. This function also changes the endianness
110  * of each word if native endianness is different from SLI
111  * endianness. This function can be called with or without
112  * lock.
113  **/
114 void
115 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
116 {
117         uint64_t *src = srcp;
118         uint64_t *dest = destp;
119         int i;
120
121         for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
122                 *dest++ = *src++;
123 }
124 #else
125 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
126 #endif
127
128 /**
129  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
130  * @q: The Work Queue to operate on.
131  * @wqe: The work Queue Entry to put on the Work queue.
132  *
133  * This routine will copy the contents of @wqe to the next available entry on
134  * the @q. This function will then ring the Work Queue Doorbell to signal the
135  * HBA to start processing the Work Queue Entry. This function returns 0 if
136  * successful. If no entries are available on @q then this function will return
137  * -ENOMEM.
138  * The caller is expected to hold the hbalock when calling this routine.
139  **/
140 static int
141 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
142 {
143         union lpfc_wqe *temp_wqe;
144         struct lpfc_register doorbell;
145         uint32_t host_index;
146         uint32_t idx;
147         uint32_t i = 0;
148         uint8_t *tmp;
149         u32 if_type;
150
151         /* sanity check on queue memory */
152         if (unlikely(!q))
153                 return -ENOMEM;
154         temp_wqe = lpfc_sli4_qe(q, q->host_index);
155
156         /* If the host has not yet processed the next entry then we are done */
157         idx = ((q->host_index + 1) % q->entry_count);
158         if (idx == q->hba_index) {
159                 q->WQ_overflow++;
160                 return -EBUSY;
161         }
162         q->WQ_posted++;
163         /* set consumption flag every once in a while */
164         if (!((q->host_index + 1) % q->notify_interval))
165                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
166         else
167                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
168         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
169                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
170         lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
171         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
172                 /* write to DPP aperture taking advatage of Combined Writes */
173                 tmp = (uint8_t *)temp_wqe;
174 #ifdef __raw_writeq
175                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
176                         __raw_writeq(*((uint64_t *)(tmp + i)),
177                                         q->dpp_regaddr + i);
178 #else
179                 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
180                         __raw_writel(*((uint32_t *)(tmp + i)),
181                                         q->dpp_regaddr + i);
182 #endif
183         }
184         /* ensure WQE bcopy and DPP flushed before doorbell write */
185         wmb();
186
187         /* Update the host index before invoking device */
188         host_index = q->host_index;
189
190         q->host_index = idx;
191
192         /* Ring Doorbell */
193         doorbell.word0 = 0;
194         if (q->db_format == LPFC_DB_LIST_FORMAT) {
195                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
196                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
197                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
198                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
199                             q->dpp_id);
200                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
201                             q->queue_id);
202                 } else {
203                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
204                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
205
206                         /* Leave bits <23:16> clear for if_type 6 dpp */
207                         if_type = bf_get(lpfc_sli_intf_if_type,
208                                          &q->phba->sli4_hba.sli_intf);
209                         if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
210                                 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
211                                        host_index);
212                 }
213         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
214                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
215                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
216         } else {
217                 return -EINVAL;
218         }
219         writel(doorbell.word0, q->db_regaddr);
220
221         return 0;
222 }
223
224 /**
225  * lpfc_sli4_wq_release - Updates internal hba index for WQ
226  * @q: The Work Queue to operate on.
227  * @index: The index to advance the hba index to.
228  *
229  * This routine will update the HBA index of a queue to reflect consumption of
230  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
231  * an entry the host calls this function to update the queue's internal
232  * pointers. This routine returns the number of entries that were consumed by
233  * the HBA.
234  **/
235 static uint32_t
236 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
237 {
238         uint32_t released = 0;
239
240         /* sanity check on queue memory */
241         if (unlikely(!q))
242                 return 0;
243
244         if (q->hba_index == index)
245                 return 0;
246         do {
247                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248                 released++;
249         } while (q->hba_index != index);
250         return released;
251 }
252
253 /**
254  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
255  * @q: The Mailbox Queue to operate on.
256  * @wqe: The Mailbox Queue Entry to put on the Work queue.
257  *
258  * This routine will copy the contents of @mqe to the next available entry on
259  * the @q. This function will then ring the Work Queue Doorbell to signal the
260  * HBA to start processing the Work Queue Entry. This function returns 0 if
261  * successful. If no entries are available on @q then this function will return
262  * -ENOMEM.
263  * The caller is expected to hold the hbalock when calling this routine.
264  **/
265 static uint32_t
266 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
267 {
268         struct lpfc_mqe *temp_mqe;
269         struct lpfc_register doorbell;
270
271         /* sanity check on queue memory */
272         if (unlikely(!q))
273                 return -ENOMEM;
274         temp_mqe = lpfc_sli4_qe(q, q->host_index);
275
276         /* If the host has not yet processed the next entry then we are done */
277         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
278                 return -ENOMEM;
279         lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
280         /* Save off the mailbox pointer for completion */
281         q->phba->mbox = (MAILBOX_t *)temp_mqe;
282
283         /* Update the host index before invoking device */
284         q->host_index = ((q->host_index + 1) % q->entry_count);
285
286         /* Ring Doorbell */
287         doorbell.word0 = 0;
288         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
289         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
290         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
291         return 0;
292 }
293
294 /**
295  * lpfc_sli4_mq_release - Updates internal hba index for MQ
296  * @q: The Mailbox Queue to operate on.
297  *
298  * This routine will update the HBA index of a queue to reflect consumption of
299  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
300  * an entry the host calls this function to update the queue's internal
301  * pointers. This routine returns the number of entries that were consumed by
302  * the HBA.
303  **/
304 static uint32_t
305 lpfc_sli4_mq_release(struct lpfc_queue *q)
306 {
307         /* sanity check on queue memory */
308         if (unlikely(!q))
309                 return 0;
310
311         /* Clear the mailbox pointer for completion */
312         q->phba->mbox = NULL;
313         q->hba_index = ((q->hba_index + 1) % q->entry_count);
314         return 1;
315 }
316
317 /**
318  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
319  * @q: The Event Queue to get the first valid EQE from
320  *
321  * This routine will get the first valid Event Queue Entry from @q, update
322  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
323  * the Queue (no more work to do), or the Queue is full of EQEs that have been
324  * processed, but not popped back to the HBA then this routine will return NULL.
325  **/
326 static struct lpfc_eqe *
327 lpfc_sli4_eq_get(struct lpfc_queue *q)
328 {
329         struct lpfc_eqe *eqe;
330
331         /* sanity check on queue memory */
332         if (unlikely(!q))
333                 return NULL;
334         eqe = lpfc_sli4_qe(q, q->host_index);
335
336         /* If the next EQE is not valid then we are done */
337         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
338                 return NULL;
339
340         /*
341          * insert barrier for instruction interlock : data from the hardware
342          * must have the valid bit checked before it can be copied and acted
343          * upon. Speculative instructions were allowing a bcopy at the start
344          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
345          * after our return, to copy data before the valid bit check above
346          * was done. As such, some of the copied data was stale. The barrier
347          * ensures the check is before any data is copied.
348          */
349         mb();
350         return eqe;
351 }
352
353 /**
354  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
355  * @q: The Event Queue to disable interrupts
356  *
357  **/
358 inline void
359 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
360 {
361         struct lpfc_register doorbell;
362
363         doorbell.word0 = 0;
364         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
365         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
366         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
367                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
368         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
369         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
370 }
371
372 /**
373  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
374  * @q: The Event Queue to disable interrupts
375  *
376  **/
377 inline void
378 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
379 {
380         struct lpfc_register doorbell;
381
382         doorbell.word0 = 0;
383         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
384         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 }
386
387 /**
388  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
389  * @phba: adapter with EQ
390  * @q: The Event Queue that the host has completed processing for.
391  * @count: Number of elements that have been consumed
392  * @arm: Indicates whether the host wants to arms this CQ.
393  *
394  * This routine will notify the HBA, by ringing the doorbell, that count
395  * number of EQEs have been processed. The @arm parameter indicates whether
396  * the queue should be rearmed when ringing the doorbell.
397  **/
398 void
399 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
400                      uint32_t count, bool arm)
401 {
402         struct lpfc_register doorbell;
403
404         /* sanity check on queue memory */
405         if (unlikely(!q || (count == 0 && !arm)))
406                 return;
407
408         /* ring doorbell for number popped */
409         doorbell.word0 = 0;
410         if (arm) {
411                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
412                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
413         }
414         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
415         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
416         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
417                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
418         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
419         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
420         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
421         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
422                 readl(q->phba->sli4_hba.EQDBregaddr);
423 }
424
425 /**
426  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
427  * @phba: adapter with EQ
428  * @q: The Event Queue that the host has completed processing for.
429  * @count: Number of elements that have been consumed
430  * @arm: Indicates whether the host wants to arms this CQ.
431  *
432  * This routine will notify the HBA, by ringing the doorbell, that count
433  * number of EQEs have been processed. The @arm parameter indicates whether
434  * the queue should be rearmed when ringing the doorbell.
435  **/
436 void
437 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
438                           uint32_t count, bool arm)
439 {
440         struct lpfc_register doorbell;
441
442         /* sanity check on queue memory */
443         if (unlikely(!q || (count == 0 && !arm)))
444                 return;
445
446         /* ring doorbell for number popped */
447         doorbell.word0 = 0;
448         if (arm)
449                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
450         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
451         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
452         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
453         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
454         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
455                 readl(q->phba->sli4_hba.EQDBregaddr);
456 }
457
458 static void
459 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
460                         struct lpfc_eqe *eqe)
461 {
462         if (!phba->sli4_hba.pc_sli4_params.eqav)
463                 bf_set_le32(lpfc_eqe_valid, eqe, 0);
464
465         eq->host_index = ((eq->host_index + 1) % eq->entry_count);
466
467         /* if the index wrapped around, toggle the valid bit */
468         if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
469                 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
470 }
471
472 static void
473 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
474 {
475         struct lpfc_eqe *eqe;
476         uint32_t count = 0;
477
478         /* walk all the EQ entries and drop on the floor */
479         eqe = lpfc_sli4_eq_get(eq);
480         while (eqe) {
481                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
482                 count++;
483                 eqe = lpfc_sli4_eq_get(eq);
484         }
485
486         /* Clear and re-arm the EQ */
487         phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
488 }
489
490 static int
491 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
492 {
493         struct lpfc_eqe *eqe;
494         int count = 0, consumed = 0;
495
496         if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
497                 goto rearm_and_exit;
498
499         eqe = lpfc_sli4_eq_get(eq);
500         while (eqe) {
501                 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
502                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
503
504                 consumed++;
505                 if (!(++count % eq->max_proc_limit))
506                         break;
507
508                 if (!(count % eq->notify_interval)) {
509                         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
510                                                         LPFC_QUEUE_NOARM);
511                         consumed = 0;
512                 }
513
514                 eqe = lpfc_sli4_eq_get(eq);
515         }
516         eq->EQ_processed += count;
517
518         /* Track the max number of EQEs processed in 1 intr */
519         if (count > eq->EQ_max_eqe)
520                 eq->EQ_max_eqe = count;
521
522         eq->queue_claimed = 0;
523
524 rearm_and_exit:
525         /* Always clear and re-arm the EQ */
526         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
527
528         return count;
529 }
530
531 /**
532  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
533  * @q: The Completion Queue to get the first valid CQE from
534  *
535  * This routine will get the first valid Completion Queue Entry from @q, update
536  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
537  * the Queue (no more work to do), or the Queue is full of CQEs that have been
538  * processed, but not popped back to the HBA then this routine will return NULL.
539  **/
540 static struct lpfc_cqe *
541 lpfc_sli4_cq_get(struct lpfc_queue *q)
542 {
543         struct lpfc_cqe *cqe;
544
545         /* sanity check on queue memory */
546         if (unlikely(!q))
547                 return NULL;
548         cqe = lpfc_sli4_qe(q, q->host_index);
549
550         /* If the next CQE is not valid then we are done */
551         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
552                 return NULL;
553
554         /*
555          * insert barrier for instruction interlock : data from the hardware
556          * must have the valid bit checked before it can be copied and acted
557          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
558          * instructions allowing action on content before valid bit checked,
559          * add barrier here as well. May not be needed as "content" is a
560          * single 32-bit entity here (vs multi word structure for cq's).
561          */
562         mb();
563         return cqe;
564 }
565
566 static void
567 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
568                         struct lpfc_cqe *cqe)
569 {
570         if (!phba->sli4_hba.pc_sli4_params.cqav)
571                 bf_set_le32(lpfc_cqe_valid, cqe, 0);
572
573         cq->host_index = ((cq->host_index + 1) % cq->entry_count);
574
575         /* if the index wrapped around, toggle the valid bit */
576         if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
577                 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
578 }
579
580 /**
581  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
582  * @phba: the adapter with the CQ
583  * @q: The Completion Queue that the host has completed processing for.
584  * @count: the number of elements that were consumed
585  * @arm: Indicates whether the host wants to arms this CQ.
586  *
587  * This routine will notify the HBA, by ringing the doorbell, that the
588  * CQEs have been processed. The @arm parameter specifies whether the
589  * queue should be rearmed when ringing the doorbell.
590  **/
591 void
592 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
593                      uint32_t count, bool arm)
594 {
595         struct lpfc_register doorbell;
596
597         /* sanity check on queue memory */
598         if (unlikely(!q || (count == 0 && !arm)))
599                 return;
600
601         /* ring doorbell for number popped */
602         doorbell.word0 = 0;
603         if (arm)
604                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
605         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
606         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
607         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
608                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
609         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
610         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
611 }
612
613 /**
614  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
615  * @phba: the adapter with the CQ
616  * @q: The Completion Queue that the host has completed processing for.
617  * @count: the number of elements that were consumed
618  * @arm: Indicates whether the host wants to arms this CQ.
619  *
620  * This routine will notify the HBA, by ringing the doorbell, that the
621  * CQEs have been processed. The @arm parameter specifies whether the
622  * queue should be rearmed when ringing the doorbell.
623  **/
624 void
625 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
626                          uint32_t count, bool arm)
627 {
628         struct lpfc_register doorbell;
629
630         /* sanity check on queue memory */
631         if (unlikely(!q || (count == 0 && !arm)))
632                 return;
633
634         /* ring doorbell for number popped */
635         doorbell.word0 = 0;
636         if (arm)
637                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
638         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
639         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
640         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
641 }
642
643 /**
644  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
645  * @q: The Header Receive Queue to operate on.
646  * @wqe: The Receive Queue Entry to put on the Receive queue.
647  *
648  * This routine will copy the contents of @wqe to the next available entry on
649  * the @q. This function will then ring the Receive Queue Doorbell to signal the
650  * HBA to start processing the Receive Queue Entry. This function returns the
651  * index that the rqe was copied to if successful. If no entries are available
652  * on @q then this function will return -ENOMEM.
653  * The caller is expected to hold the hbalock when calling this routine.
654  **/
655 int
656 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
657                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
658 {
659         struct lpfc_rqe *temp_hrqe;
660         struct lpfc_rqe *temp_drqe;
661         struct lpfc_register doorbell;
662         int hq_put_index;
663         int dq_put_index;
664
665         /* sanity check on queue memory */
666         if (unlikely(!hq) || unlikely(!dq))
667                 return -ENOMEM;
668         hq_put_index = hq->host_index;
669         dq_put_index = dq->host_index;
670         temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
671         temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
672
673         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
674                 return -EINVAL;
675         if (hq_put_index != dq_put_index)
676                 return -EINVAL;
677         /* If the host has not yet processed the next entry then we are done */
678         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
679                 return -EBUSY;
680         lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
681         lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
682
683         /* Update the host index to point to the next slot */
684         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
685         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
686         hq->RQ_buf_posted++;
687
688         /* Ring The Header Receive Queue Doorbell */
689         if (!(hq->host_index % hq->notify_interval)) {
690                 doorbell.word0 = 0;
691                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
692                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
693                                hq->notify_interval);
694                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
695                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
696                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
697                                hq->notify_interval);
698                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
699                                hq->host_index);
700                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
701                 } else {
702                         return -EINVAL;
703                 }
704                 writel(doorbell.word0, hq->db_regaddr);
705         }
706         return hq_put_index;
707 }
708
709 /**
710  * lpfc_sli4_rq_release - Updates internal hba index for RQ
711  * @q: The Header Receive Queue to operate on.
712  *
713  * This routine will update the HBA index of a queue to reflect consumption of
714  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
715  * consumed an entry the host calls this function to update the queue's
716  * internal pointers. This routine returns the number of entries that were
717  * consumed by the HBA.
718  **/
719 static uint32_t
720 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
721 {
722         /* sanity check on queue memory */
723         if (unlikely(!hq) || unlikely(!dq))
724                 return 0;
725
726         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
727                 return 0;
728         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
729         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
730         return 1;
731 }
732
733 /**
734  * lpfc_cmd_iocb - Get next command iocb entry in the ring
735  * @phba: Pointer to HBA context object.
736  * @pring: Pointer to driver SLI ring object.
737  *
738  * This function returns pointer to next command iocb entry
739  * in the command ring. The caller must hold hbalock to prevent
740  * other threads consume the next command iocb.
741  * SLI-2/SLI-3 provide different sized iocbs.
742  **/
743 static inline IOCB_t *
744 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
745 {
746         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
747                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
748 }
749
750 /**
751  * lpfc_resp_iocb - Get next response iocb entry in the ring
752  * @phba: Pointer to HBA context object.
753  * @pring: Pointer to driver SLI ring object.
754  *
755  * This function returns pointer to next response iocb entry
756  * in the response ring. The caller must hold hbalock to make sure
757  * that no other thread consume the next response iocb.
758  * SLI-2/SLI-3 provide different sized iocbs.
759  **/
760 static inline IOCB_t *
761 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
762 {
763         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
764                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
765 }
766
767 /**
768  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
769  * @phba: Pointer to HBA context object.
770  *
771  * This function is called with hbalock held. This function
772  * allocates a new driver iocb object from the iocb pool. If the
773  * allocation is successful, it returns pointer to the newly
774  * allocated iocb object else it returns NULL.
775  **/
776 struct lpfc_iocbq *
777 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
778 {
779         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
780         struct lpfc_iocbq * iocbq = NULL;
781
782         lockdep_assert_held(&phba->hbalock);
783
784         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
785         if (iocbq)
786                 phba->iocb_cnt++;
787         if (phba->iocb_cnt > phba->iocb_max)
788                 phba->iocb_max = phba->iocb_cnt;
789         return iocbq;
790 }
791
792 /**
793  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
794  * @phba: Pointer to HBA context object.
795  * @xritag: XRI value.
796  *
797  * This function clears the sglq pointer from the array of acive
798  * sglq's. The xritag that is passed in is used to index into the
799  * array. Before the xritag can be used it needs to be adjusted
800  * by subtracting the xribase.
801  *
802  * Returns sglq ponter = success, NULL = Failure.
803  **/
804 struct lpfc_sglq *
805 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
806 {
807         struct lpfc_sglq *sglq;
808
809         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
810         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
811         return sglq;
812 }
813
814 /**
815  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
816  * @phba: Pointer to HBA context object.
817  * @xritag: XRI value.
818  *
819  * This function returns the sglq pointer from the array of acive
820  * sglq's. The xritag that is passed in is used to index into the
821  * array. Before the xritag can be used it needs to be adjusted
822  * by subtracting the xribase.
823  *
824  * Returns sglq ponter = success, NULL = Failure.
825  **/
826 struct lpfc_sglq *
827 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
828 {
829         struct lpfc_sglq *sglq;
830
831         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
832         return sglq;
833 }
834
835 /**
836  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
837  * @phba: Pointer to HBA context object.
838  * @xritag: xri used in this exchange.
839  * @rrq: The RRQ to be cleared.
840  *
841  **/
842 void
843 lpfc_clr_rrq_active(struct lpfc_hba *phba,
844                     uint16_t xritag,
845                     struct lpfc_node_rrq *rrq)
846 {
847         struct lpfc_nodelist *ndlp = NULL;
848
849         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
850                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
851
852         /* The target DID could have been swapped (cable swap)
853          * we should use the ndlp from the findnode if it is
854          * available.
855          */
856         if ((!ndlp) && rrq->ndlp)
857                 ndlp = rrq->ndlp;
858
859         if (!ndlp)
860                 goto out;
861
862         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
863                 rrq->send_rrq = 0;
864                 rrq->xritag = 0;
865                 rrq->rrq_stop_time = 0;
866         }
867 out:
868         mempool_free(rrq, phba->rrq_pool);
869 }
870
871 /**
872  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
873  * @phba: Pointer to HBA context object.
874  *
875  * This function is called with hbalock held. This function
876  * Checks if stop_time (ratov from setting rrq active) has
877  * been reached, if it has and the send_rrq flag is set then
878  * it will call lpfc_send_rrq. If the send_rrq flag is not set
879  * then it will just call the routine to clear the rrq and
880  * free the rrq resource.
881  * The timer is set to the next rrq that is going to expire before
882  * leaving the routine.
883  *
884  **/
885 void
886 lpfc_handle_rrq_active(struct lpfc_hba *phba)
887 {
888         struct lpfc_node_rrq *rrq;
889         struct lpfc_node_rrq *nextrrq;
890         unsigned long next_time;
891         unsigned long iflags;
892         LIST_HEAD(send_rrq);
893
894         spin_lock_irqsave(&phba->hbalock, iflags);
895         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
896         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
897         list_for_each_entry_safe(rrq, nextrrq,
898                                  &phba->active_rrq_list, list) {
899                 if (time_after(jiffies, rrq->rrq_stop_time))
900                         list_move(&rrq->list, &send_rrq);
901                 else if (time_before(rrq->rrq_stop_time, next_time))
902                         next_time = rrq->rrq_stop_time;
903         }
904         spin_unlock_irqrestore(&phba->hbalock, iflags);
905         if ((!list_empty(&phba->active_rrq_list)) &&
906             (!(phba->pport->load_flag & FC_UNLOADING)))
907                 mod_timer(&phba->rrq_tmr, next_time);
908         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
909                 list_del(&rrq->list);
910                 if (!rrq->send_rrq)
911                         /* this call will free the rrq */
912                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
913                 else if (lpfc_send_rrq(phba, rrq)) {
914                         /* if we send the rrq then the completion handler
915                         *  will clear the bit in the xribitmap.
916                         */
917                         lpfc_clr_rrq_active(phba, rrq->xritag,
918                                             rrq);
919                 }
920         }
921 }
922
923 /**
924  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
925  * @vport: Pointer to vport context object.
926  * @xri: The xri used in the exchange.
927  * @did: The targets DID for this exchange.
928  *
929  * returns NULL = rrq not found in the phba->active_rrq_list.
930  *         rrq = rrq for this xri and target.
931  **/
932 struct lpfc_node_rrq *
933 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
934 {
935         struct lpfc_hba *phba = vport->phba;
936         struct lpfc_node_rrq *rrq;
937         struct lpfc_node_rrq *nextrrq;
938         unsigned long iflags;
939
940         if (phba->sli_rev != LPFC_SLI_REV4)
941                 return NULL;
942         spin_lock_irqsave(&phba->hbalock, iflags);
943         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
944                 if (rrq->vport == vport && rrq->xritag == xri &&
945                                 rrq->nlp_DID == did){
946                         list_del(&rrq->list);
947                         spin_unlock_irqrestore(&phba->hbalock, iflags);
948                         return rrq;
949                 }
950         }
951         spin_unlock_irqrestore(&phba->hbalock, iflags);
952         return NULL;
953 }
954
955 /**
956  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
957  * @vport: Pointer to vport context object.
958  * @ndlp: Pointer to the lpfc_node_list structure.
959  * If ndlp is NULL Remove all active RRQs for this vport from the
960  * phba->active_rrq_list and clear the rrq.
961  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
962  **/
963 void
964 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
965
966 {
967         struct lpfc_hba *phba = vport->phba;
968         struct lpfc_node_rrq *rrq;
969         struct lpfc_node_rrq *nextrrq;
970         unsigned long iflags;
971         LIST_HEAD(rrq_list);
972
973         if (phba->sli_rev != LPFC_SLI_REV4)
974                 return;
975         if (!ndlp) {
976                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
977                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
978         }
979         spin_lock_irqsave(&phba->hbalock, iflags);
980         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
981                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
982                         list_move(&rrq->list, &rrq_list);
983         spin_unlock_irqrestore(&phba->hbalock, iflags);
984
985         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
986                 list_del(&rrq->list);
987                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
988         }
989 }
990
991 /**
992  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
993  * @phba: Pointer to HBA context object.
994  * @ndlp: Targets nodelist pointer for this exchange.
995  * @xritag the xri in the bitmap to test.
996  *
997  * This function is called with hbalock held. This function
998  * returns 0 = rrq not active for this xri
999  *         1 = rrq is valid for this xri.
1000  **/
1001 int
1002 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1003                         uint16_t  xritag)
1004 {
1005         lockdep_assert_held(&phba->hbalock);
1006         if (!ndlp)
1007                 return 0;
1008         if (!ndlp->active_rrqs_xri_bitmap)
1009                 return 0;
1010         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1011                 return 1;
1012         else
1013                 return 0;
1014 }
1015
1016 /**
1017  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1018  * @phba: Pointer to HBA context object.
1019  * @ndlp: nodelist pointer for this target.
1020  * @xritag: xri used in this exchange.
1021  * @rxid: Remote Exchange ID.
1022  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1023  *
1024  * This function takes the hbalock.
1025  * The active bit is always set in the active rrq xri_bitmap even
1026  * if there is no slot avaiable for the other rrq information.
1027  *
1028  * returns 0 rrq actived for this xri
1029  *         < 0 No memory or invalid ndlp.
1030  **/
1031 int
1032 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1033                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1034 {
1035         unsigned long iflags;
1036         struct lpfc_node_rrq *rrq;
1037         int empty;
1038
1039         if (!ndlp)
1040                 return -EINVAL;
1041
1042         if (!phba->cfg_enable_rrq)
1043                 return -EINVAL;
1044
1045         spin_lock_irqsave(&phba->hbalock, iflags);
1046         if (phba->pport->load_flag & FC_UNLOADING) {
1047                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1048                 goto out;
1049         }
1050
1051         /*
1052          * set the active bit even if there is no mem available.
1053          */
1054         if (NLP_CHK_FREE_REQ(ndlp))
1055                 goto out;
1056
1057         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1058                 goto out;
1059
1060         if (!ndlp->active_rrqs_xri_bitmap)
1061                 goto out;
1062
1063         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1064                 goto out;
1065
1066         spin_unlock_irqrestore(&phba->hbalock, iflags);
1067         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1068         if (!rrq) {
1069                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1070                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1071                                 " DID:0x%x Send:%d\n",
1072                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1073                 return -EINVAL;
1074         }
1075         if (phba->cfg_enable_rrq == 1)
1076                 rrq->send_rrq = send_rrq;
1077         else
1078                 rrq->send_rrq = 0;
1079         rrq->xritag = xritag;
1080         rrq->rrq_stop_time = jiffies +
1081                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1082         rrq->ndlp = ndlp;
1083         rrq->nlp_DID = ndlp->nlp_DID;
1084         rrq->vport = ndlp->vport;
1085         rrq->rxid = rxid;
1086         spin_lock_irqsave(&phba->hbalock, iflags);
1087         empty = list_empty(&phba->active_rrq_list);
1088         list_add_tail(&rrq->list, &phba->active_rrq_list);
1089         phba->hba_flag |= HBA_RRQ_ACTIVE;
1090         if (empty)
1091                 lpfc_worker_wake_up(phba);
1092         spin_unlock_irqrestore(&phba->hbalock, iflags);
1093         return 0;
1094 out:
1095         spin_unlock_irqrestore(&phba->hbalock, iflags);
1096         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1097                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1098                         " DID:0x%x Send:%d\n",
1099                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1100         return -EINVAL;
1101 }
1102
1103 /**
1104  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1105  * @phba: Pointer to HBA context object.
1106  * @piocb: Pointer to the iocbq.
1107  *
1108  * This function is called with the ring lock held. This function
1109  * gets a new driver sglq object from the sglq list. If the
1110  * list is not empty then it is successful, it returns pointer to the newly
1111  * allocated sglq object else it returns NULL.
1112  **/
1113 static struct lpfc_sglq *
1114 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1115 {
1116         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1117         struct lpfc_sglq *sglq = NULL;
1118         struct lpfc_sglq *start_sglq = NULL;
1119         struct lpfc_io_buf *lpfc_cmd;
1120         struct lpfc_nodelist *ndlp;
1121         int found = 0;
1122
1123         lockdep_assert_held(&phba->hbalock);
1124
1125         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1126                 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1127                 ndlp = lpfc_cmd->rdata->pnode;
1128         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1129                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1130                 ndlp = piocbq->context_un.ndlp;
1131         } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1132                 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1133                         ndlp = NULL;
1134                 else
1135                         ndlp = piocbq->context_un.ndlp;
1136         } else {
1137                 ndlp = piocbq->context1;
1138         }
1139
1140         spin_lock(&phba->sli4_hba.sgl_list_lock);
1141         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1142         start_sglq = sglq;
1143         while (!found) {
1144                 if (!sglq)
1145                         break;
1146                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1147                     test_bit(sglq->sli4_lxritag,
1148                     ndlp->active_rrqs_xri_bitmap)) {
1149                         /* This xri has an rrq outstanding for this DID.
1150                          * put it back in the list and get another xri.
1151                          */
1152                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1153                         sglq = NULL;
1154                         list_remove_head(lpfc_els_sgl_list, sglq,
1155                                                 struct lpfc_sglq, list);
1156                         if (sglq == start_sglq) {
1157                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1158                                 sglq = NULL;
1159                                 break;
1160                         } else
1161                                 continue;
1162                 }
1163                 sglq->ndlp = ndlp;
1164                 found = 1;
1165                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1166                 sglq->state = SGL_ALLOCATED;
1167         }
1168         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1169         return sglq;
1170 }
1171
1172 /**
1173  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1174  * @phba: Pointer to HBA context object.
1175  * @piocb: Pointer to the iocbq.
1176  *
1177  * This function is called with the sgl_list lock held. This function
1178  * gets a new driver sglq object from the sglq list. If the
1179  * list is not empty then it is successful, it returns pointer to the newly
1180  * allocated sglq object else it returns NULL.
1181  **/
1182 struct lpfc_sglq *
1183 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1184 {
1185         struct list_head *lpfc_nvmet_sgl_list;
1186         struct lpfc_sglq *sglq = NULL;
1187
1188         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1189
1190         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1191
1192         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1193         if (!sglq)
1194                 return NULL;
1195         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1196         sglq->state = SGL_ALLOCATED;
1197         return sglq;
1198 }
1199
1200 /**
1201  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1202  * @phba: Pointer to HBA context object.
1203  *
1204  * This function is called with no lock held. This function
1205  * allocates a new driver iocb object from the iocb pool. If the
1206  * allocation is successful, it returns pointer to the newly
1207  * allocated iocb object else it returns NULL.
1208  **/
1209 struct lpfc_iocbq *
1210 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1211 {
1212         struct lpfc_iocbq * iocbq = NULL;
1213         unsigned long iflags;
1214
1215         spin_lock_irqsave(&phba->hbalock, iflags);
1216         iocbq = __lpfc_sli_get_iocbq(phba);
1217         spin_unlock_irqrestore(&phba->hbalock, iflags);
1218         return iocbq;
1219 }
1220
1221 /**
1222  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1223  * @phba: Pointer to HBA context object.
1224  * @iocbq: Pointer to driver iocb object.
1225  *
1226  * This function is called with hbalock held to release driver
1227  * iocb object to the iocb pool. The iotag in the iocb object
1228  * does not change for each use of the iocb object. This function
1229  * clears all other fields of the iocb object when it is freed.
1230  * The sqlq structure that holds the xritag and phys and virtual
1231  * mappings for the scatter gather list is retrieved from the
1232  * active array of sglq. The get of the sglq pointer also clears
1233  * the entry in the array. If the status of the IO indiactes that
1234  * this IO was aborted then the sglq entry it put on the
1235  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1236  * IO has good status or fails for any other reason then the sglq
1237  * entry is added to the free list (lpfc_els_sgl_list).
1238  **/
1239 static void
1240 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1241 {
1242         struct lpfc_sglq *sglq;
1243         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1244         unsigned long iflag = 0;
1245         struct lpfc_sli_ring *pring;
1246
1247         lockdep_assert_held(&phba->hbalock);
1248
1249         if (iocbq->sli4_xritag == NO_XRI)
1250                 sglq = NULL;
1251         else
1252                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1253
1254
1255         if (sglq)  {
1256                 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1257                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1258                                           iflag);
1259                         sglq->state = SGL_FREED;
1260                         sglq->ndlp = NULL;
1261                         list_add_tail(&sglq->list,
1262                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1263                         spin_unlock_irqrestore(
1264                                 &phba->sli4_hba.sgl_list_lock, iflag);
1265                         goto out;
1266                 }
1267
1268                 pring = phba->sli4_hba.els_wq->pring;
1269                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1270                         (sglq->state != SGL_XRI_ABORTED)) {
1271                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1272                                           iflag);
1273                         list_add(&sglq->list,
1274                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1275                         spin_unlock_irqrestore(
1276                                 &phba->sli4_hba.sgl_list_lock, iflag);
1277                 } else {
1278                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1279                                           iflag);
1280                         sglq->state = SGL_FREED;
1281                         sglq->ndlp = NULL;
1282                         list_add_tail(&sglq->list,
1283                                       &phba->sli4_hba.lpfc_els_sgl_list);
1284                         spin_unlock_irqrestore(
1285                                 &phba->sli4_hba.sgl_list_lock, iflag);
1286
1287                         /* Check if TXQ queue needs to be serviced */
1288                         if (!list_empty(&pring->txq))
1289                                 lpfc_worker_wake_up(phba);
1290                 }
1291         }
1292
1293 out:
1294         /*
1295          * Clean all volatile data fields, preserve iotag and node struct.
1296          */
1297         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1298         iocbq->sli4_lxritag = NO_XRI;
1299         iocbq->sli4_xritag = NO_XRI;
1300         iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1301                               LPFC_IO_NVME_LS);
1302         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1303 }
1304
1305
1306 /**
1307  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1308  * @phba: Pointer to HBA context object.
1309  * @iocbq: Pointer to driver iocb object.
1310  *
1311  * This function is called with hbalock held to release driver
1312  * iocb object to the iocb pool. The iotag in the iocb object
1313  * does not change for each use of the iocb object. This function
1314  * clears all other fields of the iocb object when it is freed.
1315  **/
1316 static void
1317 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1318 {
1319         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1320
1321         lockdep_assert_held(&phba->hbalock);
1322
1323         /*
1324          * Clean all volatile data fields, preserve iotag and node struct.
1325          */
1326         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1327         iocbq->sli4_xritag = NO_XRI;
1328         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1329 }
1330
1331 /**
1332  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1333  * @phba: Pointer to HBA context object.
1334  * @iocbq: Pointer to driver iocb object.
1335  *
1336  * This function is called with hbalock held to release driver
1337  * iocb object to the iocb pool. The iotag in the iocb object
1338  * does not change for each use of the iocb object. This function
1339  * clears all other fields of the iocb object when it is freed.
1340  **/
1341 static void
1342 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1343 {
1344         lockdep_assert_held(&phba->hbalock);
1345
1346         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1347         phba->iocb_cnt--;
1348 }
1349
1350 /**
1351  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1352  * @phba: Pointer to HBA context object.
1353  * @iocbq: Pointer to driver iocb object.
1354  *
1355  * This function is called with no lock held to release the iocb to
1356  * iocb pool.
1357  **/
1358 void
1359 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1360 {
1361         unsigned long iflags;
1362
1363         /*
1364          * Clean all volatile data fields, preserve iotag and node struct.
1365          */
1366         spin_lock_irqsave(&phba->hbalock, iflags);
1367         __lpfc_sli_release_iocbq(phba, iocbq);
1368         spin_unlock_irqrestore(&phba->hbalock, iflags);
1369 }
1370
1371 /**
1372  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1373  * @phba: Pointer to HBA context object.
1374  * @iocblist: List of IOCBs.
1375  * @ulpstatus: ULP status in IOCB command field.
1376  * @ulpWord4: ULP word-4 in IOCB command field.
1377  *
1378  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1379  * on the list by invoking the complete callback function associated with the
1380  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1381  * fields.
1382  **/
1383 void
1384 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1385                       uint32_t ulpstatus, uint32_t ulpWord4)
1386 {
1387         struct lpfc_iocbq *piocb;
1388
1389         while (!list_empty(iocblist)) {
1390                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1391                 if (!piocb->iocb_cmpl)
1392                         lpfc_sli_release_iocbq(phba, piocb);
1393                 else {
1394                         piocb->iocb.ulpStatus = ulpstatus;
1395                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1396                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1397                 }
1398         }
1399         return;
1400 }
1401
1402 /**
1403  * lpfc_sli_iocb_cmd_type - Get the iocb type
1404  * @iocb_cmnd: iocb command code.
1405  *
1406  * This function is called by ring event handler function to get the iocb type.
1407  * This function translates the iocb command to an iocb command type used to
1408  * decide the final disposition of each completed IOCB.
1409  * The function returns
1410  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1411  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1412  * LPFC_ABORT_IOCB   if it is an abort iocb
1413  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1414  *
1415  * The caller is not required to hold any lock.
1416  **/
1417 static lpfc_iocb_type
1418 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1419 {
1420         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1421
1422         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1423                 return 0;
1424
1425         switch (iocb_cmnd) {
1426         case CMD_XMIT_SEQUENCE_CR:
1427         case CMD_XMIT_SEQUENCE_CX:
1428         case CMD_XMIT_BCAST_CN:
1429         case CMD_XMIT_BCAST_CX:
1430         case CMD_ELS_REQUEST_CR:
1431         case CMD_ELS_REQUEST_CX:
1432         case CMD_CREATE_XRI_CR:
1433         case CMD_CREATE_XRI_CX:
1434         case CMD_GET_RPI_CN:
1435         case CMD_XMIT_ELS_RSP_CX:
1436         case CMD_GET_RPI_CR:
1437         case CMD_FCP_IWRITE_CR:
1438         case CMD_FCP_IWRITE_CX:
1439         case CMD_FCP_IREAD_CR:
1440         case CMD_FCP_IREAD_CX:
1441         case CMD_FCP_ICMND_CR:
1442         case CMD_FCP_ICMND_CX:
1443         case CMD_FCP_TSEND_CX:
1444         case CMD_FCP_TRSP_CX:
1445         case CMD_FCP_TRECEIVE_CX:
1446         case CMD_FCP_AUTO_TRSP_CX:
1447         case CMD_ADAPTER_MSG:
1448         case CMD_ADAPTER_DUMP:
1449         case CMD_XMIT_SEQUENCE64_CR:
1450         case CMD_XMIT_SEQUENCE64_CX:
1451         case CMD_XMIT_BCAST64_CN:
1452         case CMD_XMIT_BCAST64_CX:
1453         case CMD_ELS_REQUEST64_CR:
1454         case CMD_ELS_REQUEST64_CX:
1455         case CMD_FCP_IWRITE64_CR:
1456         case CMD_FCP_IWRITE64_CX:
1457         case CMD_FCP_IREAD64_CR:
1458         case CMD_FCP_IREAD64_CX:
1459         case CMD_FCP_ICMND64_CR:
1460         case CMD_FCP_ICMND64_CX:
1461         case CMD_FCP_TSEND64_CX:
1462         case CMD_FCP_TRSP64_CX:
1463         case CMD_FCP_TRECEIVE64_CX:
1464         case CMD_GEN_REQUEST64_CR:
1465         case CMD_GEN_REQUEST64_CX:
1466         case CMD_XMIT_ELS_RSP64_CX:
1467         case DSSCMD_IWRITE64_CR:
1468         case DSSCMD_IWRITE64_CX:
1469         case DSSCMD_IREAD64_CR:
1470         case DSSCMD_IREAD64_CX:
1471                 type = LPFC_SOL_IOCB;
1472                 break;
1473         case CMD_ABORT_XRI_CN:
1474         case CMD_ABORT_XRI_CX:
1475         case CMD_CLOSE_XRI_CN:
1476         case CMD_CLOSE_XRI_CX:
1477         case CMD_XRI_ABORTED_CX:
1478         case CMD_ABORT_MXRI64_CN:
1479         case CMD_XMIT_BLS_RSP64_CX:
1480                 type = LPFC_ABORT_IOCB;
1481                 break;
1482         case CMD_RCV_SEQUENCE_CX:
1483         case CMD_RCV_ELS_REQ_CX:
1484         case CMD_RCV_SEQUENCE64_CX:
1485         case CMD_RCV_ELS_REQ64_CX:
1486         case CMD_ASYNC_STATUS:
1487         case CMD_IOCB_RCV_SEQ64_CX:
1488         case CMD_IOCB_RCV_ELS64_CX:
1489         case CMD_IOCB_RCV_CONT64_CX:
1490         case CMD_IOCB_RET_XRI64_CX:
1491                 type = LPFC_UNSOL_IOCB;
1492                 break;
1493         case CMD_IOCB_XMIT_MSEQ64_CR:
1494         case CMD_IOCB_XMIT_MSEQ64_CX:
1495         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1496         case CMD_IOCB_RCV_ELS_LIST64_CX:
1497         case CMD_IOCB_CLOSE_EXTENDED_CN:
1498         case CMD_IOCB_ABORT_EXTENDED_CN:
1499         case CMD_IOCB_RET_HBQE64_CN:
1500         case CMD_IOCB_FCP_IBIDIR64_CR:
1501         case CMD_IOCB_FCP_IBIDIR64_CX:
1502         case CMD_IOCB_FCP_ITASKMGT64_CX:
1503         case CMD_IOCB_LOGENTRY_CN:
1504         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1505                 printk("%s - Unhandled SLI-3 Command x%x\n",
1506                                 __func__, iocb_cmnd);
1507                 type = LPFC_UNKNOWN_IOCB;
1508                 break;
1509         default:
1510                 type = LPFC_UNKNOWN_IOCB;
1511                 break;
1512         }
1513
1514         return type;
1515 }
1516
1517 /**
1518  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1519  * @phba: Pointer to HBA context object.
1520  *
1521  * This function is called from SLI initialization code
1522  * to configure every ring of the HBA's SLI interface. The
1523  * caller is not required to hold any lock. This function issues
1524  * a config_ring mailbox command for each ring.
1525  * This function returns zero if successful else returns a negative
1526  * error code.
1527  **/
1528 static int
1529 lpfc_sli_ring_map(struct lpfc_hba *phba)
1530 {
1531         struct lpfc_sli *psli = &phba->sli;
1532         LPFC_MBOXQ_t *pmb;
1533         MAILBOX_t *pmbox;
1534         int i, rc, ret = 0;
1535
1536         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1537         if (!pmb)
1538                 return -ENOMEM;
1539         pmbox = &pmb->u.mb;
1540         phba->link_state = LPFC_INIT_MBX_CMDS;
1541         for (i = 0; i < psli->num_rings; i++) {
1542                 lpfc_config_ring(phba, i, pmb);
1543                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1544                 if (rc != MBX_SUCCESS) {
1545                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1546                                         "0446 Adapter failed to init (%d), "
1547                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1548                                         "ring %d\n",
1549                                         rc, pmbox->mbxCommand,
1550                                         pmbox->mbxStatus, i);
1551                         phba->link_state = LPFC_HBA_ERROR;
1552                         ret = -ENXIO;
1553                         break;
1554                 }
1555         }
1556         mempool_free(pmb, phba->mbox_mem_pool);
1557         return ret;
1558 }
1559
1560 /**
1561  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1562  * @phba: Pointer to HBA context object.
1563  * @pring: Pointer to driver SLI ring object.
1564  * @piocb: Pointer to the driver iocb object.
1565  *
1566  * This function is called with hbalock held. The function adds the
1567  * new iocb to txcmplq of the given ring. This function always returns
1568  * 0. If this function is called for ELS ring, this function checks if
1569  * there is a vport associated with the ELS command. This function also
1570  * starts els_tmofunc timer if this is an ELS command.
1571  **/
1572 static int
1573 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1574                         struct lpfc_iocbq *piocb)
1575 {
1576         lockdep_assert_held(&phba->hbalock);
1577
1578         BUG_ON(!piocb);
1579
1580         list_add_tail(&piocb->list, &pring->txcmplq);
1581         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1582         pring->txcmplq_cnt++;
1583
1584         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1585            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1586            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1587                 BUG_ON(!piocb->vport);
1588                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1589                         mod_timer(&piocb->vport->els_tmofunc,
1590                                   jiffies +
1591                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1592         }
1593
1594         return 0;
1595 }
1596
1597 /**
1598  * lpfc_sli_ringtx_get - Get first element of the txq
1599  * @phba: Pointer to HBA context object.
1600  * @pring: Pointer to driver SLI ring object.
1601  *
1602  * This function is called with hbalock held to get next
1603  * iocb in txq of the given ring. If there is any iocb in
1604  * the txq, the function returns first iocb in the list after
1605  * removing the iocb from the list, else it returns NULL.
1606  **/
1607 struct lpfc_iocbq *
1608 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1609 {
1610         struct lpfc_iocbq *cmd_iocb;
1611
1612         lockdep_assert_held(&phba->hbalock);
1613
1614         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1615         return cmd_iocb;
1616 }
1617
1618 /**
1619  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1620  * @phba: Pointer to HBA context object.
1621  * @pring: Pointer to driver SLI ring object.
1622  *
1623  * This function is called with hbalock held and the caller must post the
1624  * iocb without releasing the lock. If the caller releases the lock,
1625  * iocb slot returned by the function is not guaranteed to be available.
1626  * The function returns pointer to the next available iocb slot if there
1627  * is available slot in the ring, else it returns NULL.
1628  * If the get index of the ring is ahead of the put index, the function
1629  * will post an error attention event to the worker thread to take the
1630  * HBA to offline state.
1631  **/
1632 static IOCB_t *
1633 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1634 {
1635         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1636         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1637
1638         lockdep_assert_held(&phba->hbalock);
1639
1640         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1641            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1642                 pring->sli.sli3.next_cmdidx = 0;
1643
1644         if (unlikely(pring->sli.sli3.local_getidx ==
1645                 pring->sli.sli3.next_cmdidx)) {
1646
1647                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1648
1649                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1650                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1651                                         "0315 Ring %d issue: portCmdGet %d "
1652                                         "is bigger than cmd ring %d\n",
1653                                         pring->ringno,
1654                                         pring->sli.sli3.local_getidx,
1655                                         max_cmd_idx);
1656
1657                         phba->link_state = LPFC_HBA_ERROR;
1658                         /*
1659                          * All error attention handlers are posted to
1660                          * worker thread
1661                          */
1662                         phba->work_ha |= HA_ERATT;
1663                         phba->work_hs = HS_FFER3;
1664
1665                         lpfc_worker_wake_up(phba);
1666
1667                         return NULL;
1668                 }
1669
1670                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1671                         return NULL;
1672         }
1673
1674         return lpfc_cmd_iocb(phba, pring);
1675 }
1676
1677 /**
1678  * lpfc_sli_next_iotag - Get an iotag for the iocb
1679  * @phba: Pointer to HBA context object.
1680  * @iocbq: Pointer to driver iocb object.
1681  *
1682  * This function gets an iotag for the iocb. If there is no unused iotag and
1683  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1684  * array and assigns a new iotag.
1685  * The function returns the allocated iotag if successful, else returns zero.
1686  * Zero is not a valid iotag.
1687  * The caller is not required to hold any lock.
1688  **/
1689 uint16_t
1690 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1691 {
1692         struct lpfc_iocbq **new_arr;
1693         struct lpfc_iocbq **old_arr;
1694         size_t new_len;
1695         struct lpfc_sli *psli = &phba->sli;
1696         uint16_t iotag;
1697
1698         spin_lock_irq(&phba->hbalock);
1699         iotag = psli->last_iotag;
1700         if(++iotag < psli->iocbq_lookup_len) {
1701                 psli->last_iotag = iotag;
1702                 psli->iocbq_lookup[iotag] = iocbq;
1703                 spin_unlock_irq(&phba->hbalock);
1704                 iocbq->iotag = iotag;
1705                 return iotag;
1706         } else if (psli->iocbq_lookup_len < (0xffff
1707                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1708                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1709                 spin_unlock_irq(&phba->hbalock);
1710                 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1711                                   GFP_KERNEL);
1712                 if (new_arr) {
1713                         spin_lock_irq(&phba->hbalock);
1714                         old_arr = psli->iocbq_lookup;
1715                         if (new_len <= psli->iocbq_lookup_len) {
1716                                 /* highly unprobable case */
1717                                 kfree(new_arr);
1718                                 iotag = psli->last_iotag;
1719                                 if(++iotag < psli->iocbq_lookup_len) {
1720                                         psli->last_iotag = iotag;
1721                                         psli->iocbq_lookup[iotag] = iocbq;
1722                                         spin_unlock_irq(&phba->hbalock);
1723                                         iocbq->iotag = iotag;
1724                                         return iotag;
1725                                 }
1726                                 spin_unlock_irq(&phba->hbalock);
1727                                 return 0;
1728                         }
1729                         if (psli->iocbq_lookup)
1730                                 memcpy(new_arr, old_arr,
1731                                        ((psli->last_iotag  + 1) *
1732                                         sizeof (struct lpfc_iocbq *)));
1733                         psli->iocbq_lookup = new_arr;
1734                         psli->iocbq_lookup_len = new_len;
1735                         psli->last_iotag = iotag;
1736                         psli->iocbq_lookup[iotag] = iocbq;
1737                         spin_unlock_irq(&phba->hbalock);
1738                         iocbq->iotag = iotag;
1739                         kfree(old_arr);
1740                         return iotag;
1741                 }
1742         } else
1743                 spin_unlock_irq(&phba->hbalock);
1744
1745         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1746                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1747                         psli->last_iotag);
1748
1749         return 0;
1750 }
1751
1752 /**
1753  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1754  * @phba: Pointer to HBA context object.
1755  * @pring: Pointer to driver SLI ring object.
1756  * @iocb: Pointer to iocb slot in the ring.
1757  * @nextiocb: Pointer to driver iocb object which need to be
1758  *            posted to firmware.
1759  *
1760  * This function is called with hbalock held to post a new iocb to
1761  * the firmware. This function copies the new iocb to ring iocb slot and
1762  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1763  * a completion call back for this iocb else the function will free the
1764  * iocb object.
1765  **/
1766 static void
1767 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1768                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1769 {
1770         lockdep_assert_held(&phba->hbalock);
1771         /*
1772          * Set up an iotag
1773          */
1774         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1775
1776
1777         if (pring->ringno == LPFC_ELS_RING) {
1778                 lpfc_debugfs_slow_ring_trc(phba,
1779                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1780                         *(((uint32_t *) &nextiocb->iocb) + 4),
1781                         *(((uint32_t *) &nextiocb->iocb) + 6),
1782                         *(((uint32_t *) &nextiocb->iocb) + 7));
1783         }
1784
1785         /*
1786          * Issue iocb command to adapter
1787          */
1788         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1789         wmb();
1790         pring->stats.iocb_cmd++;
1791
1792         /*
1793          * If there is no completion routine to call, we can release the
1794          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1795          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1796          */
1797         if (nextiocb->iocb_cmpl)
1798                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1799         else
1800                 __lpfc_sli_release_iocbq(phba, nextiocb);
1801
1802         /*
1803          * Let the HBA know what IOCB slot will be the next one the
1804          * driver will put a command into.
1805          */
1806         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1807         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1808 }
1809
1810 /**
1811  * lpfc_sli_update_full_ring - Update the chip attention register
1812  * @phba: Pointer to HBA context object.
1813  * @pring: Pointer to driver SLI ring object.
1814  *
1815  * The caller is not required to hold any lock for calling this function.
1816  * This function updates the chip attention bits for the ring to inform firmware
1817  * that there are pending work to be done for this ring and requests an
1818  * interrupt when there is space available in the ring. This function is
1819  * called when the driver is unable to post more iocbs to the ring due
1820  * to unavailability of space in the ring.
1821  **/
1822 static void
1823 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1824 {
1825         int ringno = pring->ringno;
1826
1827         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1828
1829         wmb();
1830
1831         /*
1832          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1833          * The HBA will tell us when an IOCB entry is available.
1834          */
1835         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1836         readl(phba->CAregaddr); /* flush */
1837
1838         pring->stats.iocb_cmd_full++;
1839 }
1840
1841 /**
1842  * lpfc_sli_update_ring - Update chip attention register
1843  * @phba: Pointer to HBA context object.
1844  * @pring: Pointer to driver SLI ring object.
1845  *
1846  * This function updates the chip attention register bit for the
1847  * given ring to inform HBA that there is more work to be done
1848  * in this ring. The caller is not required to hold any lock.
1849  **/
1850 static void
1851 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1852 {
1853         int ringno = pring->ringno;
1854
1855         /*
1856          * Tell the HBA that there is work to do in this ring.
1857          */
1858         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1859                 wmb();
1860                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1861                 readl(phba->CAregaddr); /* flush */
1862         }
1863 }
1864
1865 /**
1866  * lpfc_sli_resume_iocb - Process iocbs in the txq
1867  * @phba: Pointer to HBA context object.
1868  * @pring: Pointer to driver SLI ring object.
1869  *
1870  * This function is called with hbalock held to post pending iocbs
1871  * in the txq to the firmware. This function is called when driver
1872  * detects space available in the ring.
1873  **/
1874 static void
1875 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1876 {
1877         IOCB_t *iocb;
1878         struct lpfc_iocbq *nextiocb;
1879
1880         lockdep_assert_held(&phba->hbalock);
1881
1882         /*
1883          * Check to see if:
1884          *  (a) there is anything on the txq to send
1885          *  (b) link is up
1886          *  (c) link attention events can be processed (fcp ring only)
1887          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1888          */
1889
1890         if (lpfc_is_link_up(phba) &&
1891             (!list_empty(&pring->txq)) &&
1892             (pring->ringno != LPFC_FCP_RING ||
1893              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1894
1895                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1896                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1897                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1898
1899                 if (iocb)
1900                         lpfc_sli_update_ring(phba, pring);
1901                 else
1902                         lpfc_sli_update_full_ring(phba, pring);
1903         }
1904
1905         return;
1906 }
1907
1908 /**
1909  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1910  * @phba: Pointer to HBA context object.
1911  * @hbqno: HBQ number.
1912  *
1913  * This function is called with hbalock held to get the next
1914  * available slot for the given HBQ. If there is free slot
1915  * available for the HBQ it will return pointer to the next available
1916  * HBQ entry else it will return NULL.
1917  **/
1918 static struct lpfc_hbq_entry *
1919 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1920 {
1921         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1922
1923         lockdep_assert_held(&phba->hbalock);
1924
1925         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1926             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1927                 hbqp->next_hbqPutIdx = 0;
1928
1929         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1930                 uint32_t raw_index = phba->hbq_get[hbqno];
1931                 uint32_t getidx = le32_to_cpu(raw_index);
1932
1933                 hbqp->local_hbqGetIdx = getidx;
1934
1935                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1936                         lpfc_printf_log(phba, KERN_ERR,
1937                                         LOG_SLI | LOG_VPORT,
1938                                         "1802 HBQ %d: local_hbqGetIdx "
1939                                         "%u is > than hbqp->entry_count %u\n",
1940                                         hbqno, hbqp->local_hbqGetIdx,
1941                                         hbqp->entry_count);
1942
1943                         phba->link_state = LPFC_HBA_ERROR;
1944                         return NULL;
1945                 }
1946
1947                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1948                         return NULL;
1949         }
1950
1951         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1952                         hbqp->hbqPutIdx;
1953 }
1954
1955 /**
1956  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1957  * @phba: Pointer to HBA context object.
1958  *
1959  * This function is called with no lock held to free all the
1960  * hbq buffers while uninitializing the SLI interface. It also
1961  * frees the HBQ buffers returned by the firmware but not yet
1962  * processed by the upper layers.
1963  **/
1964 void
1965 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1966 {
1967         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1968         struct hbq_dmabuf *hbq_buf;
1969         unsigned long flags;
1970         int i, hbq_count;
1971
1972         hbq_count = lpfc_sli_hbq_count();
1973         /* Return all memory used by all HBQs */
1974         spin_lock_irqsave(&phba->hbalock, flags);
1975         for (i = 0; i < hbq_count; ++i) {
1976                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1977                                 &phba->hbqs[i].hbq_buffer_list, list) {
1978                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1979                         list_del(&hbq_buf->dbuf.list);
1980                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1981                 }
1982                 phba->hbqs[i].buffer_count = 0;
1983         }
1984
1985         /* Mark the HBQs not in use */
1986         phba->hbq_in_use = 0;
1987         spin_unlock_irqrestore(&phba->hbalock, flags);
1988 }
1989
1990 /**
1991  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1992  * @phba: Pointer to HBA context object.
1993  * @hbqno: HBQ number.
1994  * @hbq_buf: Pointer to HBQ buffer.
1995  *
1996  * This function is called with the hbalock held to post a
1997  * hbq buffer to the firmware. If the function finds an empty
1998  * slot in the HBQ, it will post the buffer. The function will return
1999  * pointer to the hbq entry if it successfully post the buffer
2000  * else it will return NULL.
2001  **/
2002 static int
2003 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2004                          struct hbq_dmabuf *hbq_buf)
2005 {
2006         lockdep_assert_held(&phba->hbalock);
2007         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2008 }
2009
2010 /**
2011  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2012  * @phba: Pointer to HBA context object.
2013  * @hbqno: HBQ number.
2014  * @hbq_buf: Pointer to HBQ buffer.
2015  *
2016  * This function is called with the hbalock held to post a hbq buffer to the
2017  * firmware. If the function finds an empty slot in the HBQ, it will post the
2018  * buffer and place it on the hbq_buffer_list. The function will return zero if
2019  * it successfully post the buffer else it will return an error.
2020  **/
2021 static int
2022 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2023                             struct hbq_dmabuf *hbq_buf)
2024 {
2025         struct lpfc_hbq_entry *hbqe;
2026         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2027
2028         lockdep_assert_held(&phba->hbalock);
2029         /* Get next HBQ entry slot to use */
2030         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2031         if (hbqe) {
2032                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2033
2034                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2035                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2036                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2037                 hbqe->bde.tus.f.bdeFlags = 0;
2038                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2039                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2040                                 /* Sync SLIM */
2041                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2042                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2043                                 /* flush */
2044                 readl(phba->hbq_put + hbqno);
2045                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2046                 return 0;
2047         } else
2048                 return -ENOMEM;
2049 }
2050
2051 /**
2052  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2053  * @phba: Pointer to HBA context object.
2054  * @hbqno: HBQ number.
2055  * @hbq_buf: Pointer to HBQ buffer.
2056  *
2057  * This function is called with the hbalock held to post an RQE to the SLI4
2058  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2059  * the hbq_buffer_list and return zero, otherwise it will return an error.
2060  **/
2061 static int
2062 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2063                             struct hbq_dmabuf *hbq_buf)
2064 {
2065         int rc;
2066         struct lpfc_rqe hrqe;
2067         struct lpfc_rqe drqe;
2068         struct lpfc_queue *hrq;
2069         struct lpfc_queue *drq;
2070
2071         if (hbqno != LPFC_ELS_HBQ)
2072                 return 1;
2073         hrq = phba->sli4_hba.hdr_rq;
2074         drq = phba->sli4_hba.dat_rq;
2075
2076         lockdep_assert_held(&phba->hbalock);
2077         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2078         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2079         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2080         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2081         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2082         if (rc < 0)
2083                 return rc;
2084         hbq_buf->tag = (rc | (hbqno << 16));
2085         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2086         return 0;
2087 }
2088
2089 /* HBQ for ELS and CT traffic. */
2090 static struct lpfc_hbq_init lpfc_els_hbq = {
2091         .rn = 1,
2092         .entry_count = 256,
2093         .mask_count = 0,
2094         .profile = 0,
2095         .ring_mask = (1 << LPFC_ELS_RING),
2096         .buffer_count = 0,
2097         .init_count = 40,
2098         .add_count = 40,
2099 };
2100
2101 /* Array of HBQs */
2102 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2103         &lpfc_els_hbq,
2104 };
2105
2106 /**
2107  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2108  * @phba: Pointer to HBA context object.
2109  * @hbqno: HBQ number.
2110  * @count: Number of HBQ buffers to be posted.
2111  *
2112  * This function is called with no lock held to post more hbq buffers to the
2113  * given HBQ. The function returns the number of HBQ buffers successfully
2114  * posted.
2115  **/
2116 static int
2117 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2118 {
2119         uint32_t i, posted = 0;
2120         unsigned long flags;
2121         struct hbq_dmabuf *hbq_buffer;
2122         LIST_HEAD(hbq_buf_list);
2123         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2124                 return 0;
2125
2126         if ((phba->hbqs[hbqno].buffer_count + count) >
2127             lpfc_hbq_defs[hbqno]->entry_count)
2128                 count = lpfc_hbq_defs[hbqno]->entry_count -
2129                                         phba->hbqs[hbqno].buffer_count;
2130         if (!count)
2131                 return 0;
2132         /* Allocate HBQ entries */
2133         for (i = 0; i < count; i++) {
2134                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2135                 if (!hbq_buffer)
2136                         break;
2137                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2138         }
2139         /* Check whether HBQ is still in use */
2140         spin_lock_irqsave(&phba->hbalock, flags);
2141         if (!phba->hbq_in_use)
2142                 goto err;
2143         while (!list_empty(&hbq_buf_list)) {
2144                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2145                                  dbuf.list);
2146                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2147                                       (hbqno << 16));
2148                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2149                         phba->hbqs[hbqno].buffer_count++;
2150                         posted++;
2151                 } else
2152                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2153         }
2154         spin_unlock_irqrestore(&phba->hbalock, flags);
2155         return posted;
2156 err:
2157         spin_unlock_irqrestore(&phba->hbalock, flags);
2158         while (!list_empty(&hbq_buf_list)) {
2159                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2160                                  dbuf.list);
2161                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2162         }
2163         return 0;
2164 }
2165
2166 /**
2167  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2168  * @phba: Pointer to HBA context object.
2169  * @qno: HBQ number.
2170  *
2171  * This function posts more buffers to the HBQ. This function
2172  * is called with no lock held. The function returns the number of HBQ entries
2173  * successfully allocated.
2174  **/
2175 int
2176 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2177 {
2178         if (phba->sli_rev == LPFC_SLI_REV4)
2179                 return 0;
2180         else
2181                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2182                                          lpfc_hbq_defs[qno]->add_count);
2183 }
2184
2185 /**
2186  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2187  * @phba: Pointer to HBA context object.
2188  * @qno:  HBQ queue number.
2189  *
2190  * This function is called from SLI initialization code path with
2191  * no lock held to post initial HBQ buffers to firmware. The
2192  * function returns the number of HBQ entries successfully allocated.
2193  **/
2194 static int
2195 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2196 {
2197         if (phba->sli_rev == LPFC_SLI_REV4)
2198                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2199                                         lpfc_hbq_defs[qno]->entry_count);
2200         else
2201                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2202                                          lpfc_hbq_defs[qno]->init_count);
2203 }
2204
2205 /**
2206  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2207  * @phba: Pointer to HBA context object.
2208  * @hbqno: HBQ number.
2209  *
2210  * This function removes the first hbq buffer on an hbq list and returns a
2211  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2212  **/
2213 static struct hbq_dmabuf *
2214 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2215 {
2216         struct lpfc_dmabuf *d_buf;
2217
2218         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2219         if (!d_buf)
2220                 return NULL;
2221         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2222 }
2223
2224 /**
2225  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2226  * @phba: Pointer to HBA context object.
2227  * @hbqno: HBQ number.
2228  *
2229  * This function removes the first RQ buffer on an RQ buffer list and returns a
2230  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2231  **/
2232 static struct rqb_dmabuf *
2233 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2234 {
2235         struct lpfc_dmabuf *h_buf;
2236         struct lpfc_rqb *rqbp;
2237
2238         rqbp = hrq->rqbp;
2239         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2240                          struct lpfc_dmabuf, list);
2241         if (!h_buf)
2242                 return NULL;
2243         rqbp->buffer_count--;
2244         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2245 }
2246
2247 /**
2248  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2249  * @phba: Pointer to HBA context object.
2250  * @tag: Tag of the hbq buffer.
2251  *
2252  * This function searches for the hbq buffer associated with the given tag in
2253  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2254  * otherwise it returns NULL.
2255  **/
2256 static struct hbq_dmabuf *
2257 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2258 {
2259         struct lpfc_dmabuf *d_buf;
2260         struct hbq_dmabuf *hbq_buf;
2261         uint32_t hbqno;
2262
2263         hbqno = tag >> 16;
2264         if (hbqno >= LPFC_MAX_HBQS)
2265                 return NULL;
2266
2267         spin_lock_irq(&phba->hbalock);
2268         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2269                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2270                 if (hbq_buf->tag == tag) {
2271                         spin_unlock_irq(&phba->hbalock);
2272                         return hbq_buf;
2273                 }
2274         }
2275         spin_unlock_irq(&phba->hbalock);
2276         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2277                         "1803 Bad hbq tag. Data: x%x x%x\n",
2278                         tag, phba->hbqs[tag >> 16].buffer_count);
2279         return NULL;
2280 }
2281
2282 /**
2283  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2284  * @phba: Pointer to HBA context object.
2285  * @hbq_buffer: Pointer to HBQ buffer.
2286  *
2287  * This function is called with hbalock. This function gives back
2288  * the hbq buffer to firmware. If the HBQ does not have space to
2289  * post the buffer, it will free the buffer.
2290  **/
2291 void
2292 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2293 {
2294         uint32_t hbqno;
2295
2296         if (hbq_buffer) {
2297                 hbqno = hbq_buffer->tag >> 16;
2298                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2299                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2300         }
2301 }
2302
2303 /**
2304  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2305  * @mbxCommand: mailbox command code.
2306  *
2307  * This function is called by the mailbox event handler function to verify
2308  * that the completed mailbox command is a legitimate mailbox command. If the
2309  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2310  * and the mailbox event handler will take the HBA offline.
2311  **/
2312 static int
2313 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2314 {
2315         uint8_t ret;
2316
2317         switch (mbxCommand) {
2318         case MBX_LOAD_SM:
2319         case MBX_READ_NV:
2320         case MBX_WRITE_NV:
2321         case MBX_WRITE_VPARMS:
2322         case MBX_RUN_BIU_DIAG:
2323         case MBX_INIT_LINK:
2324         case MBX_DOWN_LINK:
2325         case MBX_CONFIG_LINK:
2326         case MBX_CONFIG_RING:
2327         case MBX_RESET_RING:
2328         case MBX_READ_CONFIG:
2329         case MBX_READ_RCONFIG:
2330         case MBX_READ_SPARM:
2331         case MBX_READ_STATUS:
2332         case MBX_READ_RPI:
2333         case MBX_READ_XRI:
2334         case MBX_READ_REV:
2335         case MBX_READ_LNK_STAT:
2336         case MBX_REG_LOGIN:
2337         case MBX_UNREG_LOGIN:
2338         case MBX_CLEAR_LA:
2339         case MBX_DUMP_MEMORY:
2340         case MBX_DUMP_CONTEXT:
2341         case MBX_RUN_DIAGS:
2342         case MBX_RESTART:
2343         case MBX_UPDATE_CFG:
2344         case MBX_DOWN_LOAD:
2345         case MBX_DEL_LD_ENTRY:
2346         case MBX_RUN_PROGRAM:
2347         case MBX_SET_MASK:
2348         case MBX_SET_VARIABLE:
2349         case MBX_UNREG_D_ID:
2350         case MBX_KILL_BOARD:
2351         case MBX_CONFIG_FARP:
2352         case MBX_BEACON:
2353         case MBX_LOAD_AREA:
2354         case MBX_RUN_BIU_DIAG64:
2355         case MBX_CONFIG_PORT:
2356         case MBX_READ_SPARM64:
2357         case MBX_READ_RPI64:
2358         case MBX_REG_LOGIN64:
2359         case MBX_READ_TOPOLOGY:
2360         case MBX_WRITE_WWN:
2361         case MBX_SET_DEBUG:
2362         case MBX_LOAD_EXP_ROM:
2363         case MBX_ASYNCEVT_ENABLE:
2364         case MBX_REG_VPI:
2365         case MBX_UNREG_VPI:
2366         case MBX_HEARTBEAT:
2367         case MBX_PORT_CAPABILITIES:
2368         case MBX_PORT_IOV_CONTROL:
2369         case MBX_SLI4_CONFIG:
2370         case MBX_SLI4_REQ_FTRS:
2371         case MBX_REG_FCFI:
2372         case MBX_UNREG_FCFI:
2373         case MBX_REG_VFI:
2374         case MBX_UNREG_VFI:
2375         case MBX_INIT_VPI:
2376         case MBX_INIT_VFI:
2377         case MBX_RESUME_RPI:
2378         case MBX_READ_EVENT_LOG_STATUS:
2379         case MBX_READ_EVENT_LOG:
2380         case MBX_SECURITY_MGMT:
2381         case MBX_AUTH_PORT:
2382         case MBX_ACCESS_VDATA:
2383                 ret = mbxCommand;
2384                 break;
2385         default:
2386                 ret = MBX_SHUTDOWN;
2387                 break;
2388         }
2389         return ret;
2390 }
2391
2392 /**
2393  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2394  * @phba: Pointer to HBA context object.
2395  * @pmboxq: Pointer to mailbox command.
2396  *
2397  * This is completion handler function for mailbox commands issued from
2398  * lpfc_sli_issue_mbox_wait function. This function is called by the
2399  * mailbox event handler function with no lock held. This function
2400  * will wake up thread waiting on the wait queue pointed by context1
2401  * of the mailbox.
2402  **/
2403 void
2404 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2405 {
2406         unsigned long drvr_flag;
2407         struct completion *pmbox_done;
2408
2409         /*
2410          * If pmbox_done is empty, the driver thread gave up waiting and
2411          * continued running.
2412          */
2413         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2414         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2415         pmbox_done = (struct completion *)pmboxq->context3;
2416         if (pmbox_done)
2417                 complete(pmbox_done);
2418         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2419         return;
2420 }
2421
2422
2423 /**
2424  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2425  * @phba: Pointer to HBA context object.
2426  * @pmb: Pointer to mailbox object.
2427  *
2428  * This function is the default mailbox completion handler. It
2429  * frees the memory resources associated with the completed mailbox
2430  * command. If the completed command is a REG_LOGIN mailbox command,
2431  * this function will issue a UREG_LOGIN to re-claim the RPI.
2432  **/
2433 void
2434 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2435 {
2436         struct lpfc_vport  *vport = pmb->vport;
2437         struct lpfc_dmabuf *mp;
2438         struct lpfc_nodelist *ndlp;
2439         struct Scsi_Host *shost;
2440         uint16_t rpi, vpi;
2441         int rc;
2442
2443         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2444
2445         if (mp) {
2446                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2447                 kfree(mp);
2448         }
2449
2450         /*
2451          * If a REG_LOGIN succeeded  after node is destroyed or node
2452          * is in re-discovery driver need to cleanup the RPI.
2453          */
2454         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2455             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2456             !pmb->u.mb.mbxStatus) {
2457                 rpi = pmb->u.mb.un.varWords[0];
2458                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2459                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2460                 pmb->vport = vport;
2461                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2462                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2463                 if (rc != MBX_NOT_FINISHED)
2464                         return;
2465         }
2466
2467         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2468                 !(phba->pport->load_flag & FC_UNLOADING) &&
2469                 !pmb->u.mb.mbxStatus) {
2470                 shost = lpfc_shost_from_vport(vport);
2471                 spin_lock_irq(shost->host_lock);
2472                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2473                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2474                 spin_unlock_irq(shost->host_lock);
2475         }
2476
2477         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2478                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2479                 lpfc_nlp_put(ndlp);
2480                 pmb->ctx_buf = NULL;
2481                 pmb->ctx_ndlp = NULL;
2482         }
2483
2484         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2485                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2486
2487                 /* Check to see if there are any deferred events to process */
2488                 if (ndlp) {
2489                         lpfc_printf_vlog(
2490                                 vport,
2491                                 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2492                                 "1438 UNREG cmpl deferred mbox x%x "
2493                                 "on NPort x%x Data: x%x x%x %p\n",
2494                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2495                                 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2496
2497                         if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2498                             (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2499                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2500                                 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2501                                 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2502                         } else {
2503                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2504                         }
2505                         pmb->ctx_ndlp = NULL;
2506                 }
2507         }
2508
2509         /* Check security permission status on INIT_LINK mailbox command */
2510         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2511             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2512                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2513                                 "2860 SLI authentication is required "
2514                                 "for INIT_LINK but has not done yet\n");
2515
2516         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2517                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2518         else
2519                 mempool_free(pmb, phba->mbox_mem_pool);
2520 }
2521  /**
2522  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2523  * @phba: Pointer to HBA context object.
2524  * @pmb: Pointer to mailbox object.
2525  *
2526  * This function is the unreg rpi mailbox completion handler. It
2527  * frees the memory resources associated with the completed mailbox
2528  * command. An additional refrenece is put on the ndlp to prevent
2529  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2530  * the unreg mailbox command completes, this routine puts the
2531  * reference back.
2532  *
2533  **/
2534 void
2535 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2536 {
2537         struct lpfc_vport  *vport = pmb->vport;
2538         struct lpfc_nodelist *ndlp;
2539
2540         ndlp = pmb->ctx_ndlp;
2541         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2542                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2543                     (bf_get(lpfc_sli_intf_if_type,
2544                      &phba->sli4_hba.sli_intf) >=
2545                      LPFC_SLI_INTF_IF_TYPE_2)) {
2546                         if (ndlp) {
2547                                 lpfc_printf_vlog(
2548                                         vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2549                                          "0010 UNREG_LOGIN vpi:%x "
2550                                          "rpi:%x DID:%x defer x%x flg x%x "
2551                                          "map:%x %p\n",
2552                                          vport->vpi, ndlp->nlp_rpi,
2553                                          ndlp->nlp_DID, ndlp->nlp_defer_did,
2554                                          ndlp->nlp_flag,
2555                                          ndlp->nlp_usg_map, ndlp);
2556                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2557                                 lpfc_nlp_put(ndlp);
2558
2559                                 /* Check to see if there are any deferred
2560                                  * events to process
2561                                  */
2562                                 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2563                                     (ndlp->nlp_defer_did !=
2564                                     NLP_EVT_NOTHING_PENDING)) {
2565                                         lpfc_printf_vlog(
2566                                                 vport, KERN_INFO, LOG_DISCOVERY,
2567                                                 "4111 UNREG cmpl deferred "
2568                                                 "clr x%x on "
2569                                                 "NPort x%x Data: x%x %p\n",
2570                                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2571                                                 ndlp->nlp_defer_did, ndlp);
2572                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2573                                         ndlp->nlp_defer_did =
2574                                                 NLP_EVT_NOTHING_PENDING;
2575                                         lpfc_issue_els_plogi(
2576                                                 vport, ndlp->nlp_DID, 0);
2577                                 } else {
2578                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2579                                 }
2580                         }
2581                 }
2582         }
2583
2584         mempool_free(pmb, phba->mbox_mem_pool);
2585 }
2586
2587 /**
2588  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2589  * @phba: Pointer to HBA context object.
2590  *
2591  * This function is called with no lock held. This function processes all
2592  * the completed mailbox commands and gives it to upper layers. The interrupt
2593  * service routine processes mailbox completion interrupt and adds completed
2594  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2595  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2596  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2597  * function returns the mailbox commands to the upper layer by calling the
2598  * completion handler function of each mailbox.
2599  **/
2600 int
2601 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2602 {
2603         MAILBOX_t *pmbox;
2604         LPFC_MBOXQ_t *pmb;
2605         int rc;
2606         LIST_HEAD(cmplq);
2607
2608         phba->sli.slistat.mbox_event++;
2609
2610         /* Get all completed mailboxe buffers into the cmplq */
2611         spin_lock_irq(&phba->hbalock);
2612         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2613         spin_unlock_irq(&phba->hbalock);
2614
2615         /* Get a Mailbox buffer to setup mailbox commands for callback */
2616         do {
2617                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2618                 if (pmb == NULL)
2619                         break;
2620
2621                 pmbox = &pmb->u.mb;
2622
2623                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2624                         if (pmb->vport) {
2625                                 lpfc_debugfs_disc_trc(pmb->vport,
2626                                         LPFC_DISC_TRC_MBOX_VPORT,
2627                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2628                                         (uint32_t)pmbox->mbxCommand,
2629                                         pmbox->un.varWords[0],
2630                                         pmbox->un.varWords[1]);
2631                         }
2632                         else {
2633                                 lpfc_debugfs_disc_trc(phba->pport,
2634                                         LPFC_DISC_TRC_MBOX,
2635                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2636                                         (uint32_t)pmbox->mbxCommand,
2637                                         pmbox->un.varWords[0],
2638                                         pmbox->un.varWords[1]);
2639                         }
2640                 }
2641
2642                 /*
2643                  * It is a fatal error if unknown mbox command completion.
2644                  */
2645                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2646                     MBX_SHUTDOWN) {
2647                         /* Unknown mailbox command compl */
2648                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2649                                         "(%d):0323 Unknown Mailbox command "
2650                                         "x%x (x%x/x%x) Cmpl\n",
2651                                         pmb->vport ? pmb->vport->vpi : 0,
2652                                         pmbox->mbxCommand,
2653                                         lpfc_sli_config_mbox_subsys_get(phba,
2654                                                                         pmb),
2655                                         lpfc_sli_config_mbox_opcode_get(phba,
2656                                                                         pmb));
2657                         phba->link_state = LPFC_HBA_ERROR;
2658                         phba->work_hs = HS_FFER3;
2659                         lpfc_handle_eratt(phba);
2660                         continue;
2661                 }
2662
2663                 if (pmbox->mbxStatus) {
2664                         phba->sli.slistat.mbox_stat_err++;
2665                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2666                                 /* Mbox cmd cmpl error - RETRYing */
2667                                 lpfc_printf_log(phba, KERN_INFO,
2668                                         LOG_MBOX | LOG_SLI,
2669                                         "(%d):0305 Mbox cmd cmpl "
2670                                         "error - RETRYing Data: x%x "
2671                                         "(x%x/x%x) x%x x%x x%x\n",
2672                                         pmb->vport ? pmb->vport->vpi : 0,
2673                                         pmbox->mbxCommand,
2674                                         lpfc_sli_config_mbox_subsys_get(phba,
2675                                                                         pmb),
2676                                         lpfc_sli_config_mbox_opcode_get(phba,
2677                                                                         pmb),
2678                                         pmbox->mbxStatus,
2679                                         pmbox->un.varWords[0],
2680                                         pmb->vport->port_state);
2681                                 pmbox->mbxStatus = 0;
2682                                 pmbox->mbxOwner = OWN_HOST;
2683                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2684                                 if (rc != MBX_NOT_FINISHED)
2685                                         continue;
2686                         }
2687                 }
2688
2689                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2690                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2691                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2692                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2693                                 "x%x x%x x%x\n",
2694                                 pmb->vport ? pmb->vport->vpi : 0,
2695                                 pmbox->mbxCommand,
2696                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2697                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2698                                 pmb->mbox_cmpl,
2699                                 *((uint32_t *) pmbox),
2700                                 pmbox->un.varWords[0],
2701                                 pmbox->un.varWords[1],
2702                                 pmbox->un.varWords[2],
2703                                 pmbox->un.varWords[3],
2704                                 pmbox->un.varWords[4],
2705                                 pmbox->un.varWords[5],
2706                                 pmbox->un.varWords[6],
2707                                 pmbox->un.varWords[7],
2708                                 pmbox->un.varWords[8],
2709                                 pmbox->un.varWords[9],
2710                                 pmbox->un.varWords[10]);
2711
2712                 if (pmb->mbox_cmpl)
2713                         pmb->mbox_cmpl(phba,pmb);
2714         } while (1);
2715         return 0;
2716 }
2717
2718 /**
2719  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2720  * @phba: Pointer to HBA context object.
2721  * @pring: Pointer to driver SLI ring object.
2722  * @tag: buffer tag.
2723  *
2724  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2725  * is set in the tag the buffer is posted for a particular exchange,
2726  * the function will return the buffer without replacing the buffer.
2727  * If the buffer is for unsolicited ELS or CT traffic, this function
2728  * returns the buffer and also posts another buffer to the firmware.
2729  **/
2730 static struct lpfc_dmabuf *
2731 lpfc_sli_get_buff(struct lpfc_hba *phba,
2732                   struct lpfc_sli_ring *pring,
2733                   uint32_t tag)
2734 {
2735         struct hbq_dmabuf *hbq_entry;
2736
2737         if (tag & QUE_BUFTAG_BIT)
2738                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2739         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2740         if (!hbq_entry)
2741                 return NULL;
2742         return &hbq_entry->dbuf;
2743 }
2744
2745 /**
2746  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2747  * @phba: Pointer to HBA context object.
2748  * @pring: Pointer to driver SLI ring object.
2749  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2750  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2751  * @fch_type: the type for the first frame of the sequence.
2752  *
2753  * This function is called with no lock held. This function uses the r_ctl and
2754  * type of the received sequence to find the correct callback function to call
2755  * to process the sequence.
2756  **/
2757 static int
2758 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2759                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2760                          uint32_t fch_type)
2761 {
2762         int i;
2763
2764         switch (fch_type) {
2765         case FC_TYPE_NVME:
2766                 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2767                 return 1;
2768         default:
2769                 break;
2770         }
2771
2772         /* unSolicited Responses */
2773         if (pring->prt[0].profile) {
2774                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2775                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2776                                                                         saveq);
2777                 return 1;
2778         }
2779         /* We must search, based on rctl / type
2780            for the right routine */
2781         for (i = 0; i < pring->num_mask; i++) {
2782                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2783                     (pring->prt[i].type == fch_type)) {
2784                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2785                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2786                                                 (phba, pring, saveq);
2787                         return 1;
2788                 }
2789         }
2790         return 0;
2791 }
2792
2793 /**
2794  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2795  * @phba: Pointer to HBA context object.
2796  * @pring: Pointer to driver SLI ring object.
2797  * @saveq: Pointer to the unsolicited iocb.
2798  *
2799  * This function is called with no lock held by the ring event handler
2800  * when there is an unsolicited iocb posted to the response ring by the
2801  * firmware. This function gets the buffer associated with the iocbs
2802  * and calls the event handler for the ring. This function handles both
2803  * qring buffers and hbq buffers.
2804  * When the function returns 1 the caller can free the iocb object otherwise
2805  * upper layer functions will free the iocb objects.
2806  **/
2807 static int
2808 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2809                             struct lpfc_iocbq *saveq)
2810 {
2811         IOCB_t           * irsp;
2812         WORD5            * w5p;
2813         uint32_t           Rctl, Type;
2814         struct lpfc_iocbq *iocbq;
2815         struct lpfc_dmabuf *dmzbuf;
2816
2817         irsp = &(saveq->iocb);
2818
2819         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2820                 if (pring->lpfc_sli_rcv_async_status)
2821                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2822                 else
2823                         lpfc_printf_log(phba,
2824                                         KERN_WARNING,
2825                                         LOG_SLI,
2826                                         "0316 Ring %d handler: unexpected "
2827                                         "ASYNC_STATUS iocb received evt_code "
2828                                         "0x%x\n",
2829                                         pring->ringno,
2830                                         irsp->un.asyncstat.evt_code);
2831                 return 1;
2832         }
2833
2834         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2835                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2836                 if (irsp->ulpBdeCount > 0) {
2837                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2838                                         irsp->un.ulpWord[3]);
2839                         lpfc_in_buf_free(phba, dmzbuf);
2840                 }
2841
2842                 if (irsp->ulpBdeCount > 1) {
2843                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2844                                         irsp->unsli3.sli3Words[3]);
2845                         lpfc_in_buf_free(phba, dmzbuf);
2846                 }
2847
2848                 if (irsp->ulpBdeCount > 2) {
2849                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2850                                 irsp->unsli3.sli3Words[7]);
2851                         lpfc_in_buf_free(phba, dmzbuf);
2852                 }
2853
2854                 return 1;
2855         }
2856
2857         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2858                 if (irsp->ulpBdeCount != 0) {
2859                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2860                                                 irsp->un.ulpWord[3]);
2861                         if (!saveq->context2)
2862                                 lpfc_printf_log(phba,
2863                                         KERN_ERR,
2864                                         LOG_SLI,
2865                                         "0341 Ring %d Cannot find buffer for "
2866                                         "an unsolicited iocb. tag 0x%x\n",
2867                                         pring->ringno,
2868                                         irsp->un.ulpWord[3]);
2869                 }
2870                 if (irsp->ulpBdeCount == 2) {
2871                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2872                                                 irsp->unsli3.sli3Words[7]);
2873                         if (!saveq->context3)
2874                                 lpfc_printf_log(phba,
2875                                         KERN_ERR,
2876                                         LOG_SLI,
2877                                         "0342 Ring %d Cannot find buffer for an"
2878                                         " unsolicited iocb. tag 0x%x\n",
2879                                         pring->ringno,
2880                                         irsp->unsli3.sli3Words[7]);
2881                 }
2882                 list_for_each_entry(iocbq, &saveq->list, list) {
2883                         irsp = &(iocbq->iocb);
2884                         if (irsp->ulpBdeCount != 0) {
2885                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2886                                                         irsp->un.ulpWord[3]);
2887                                 if (!iocbq->context2)
2888                                         lpfc_printf_log(phba,
2889                                                 KERN_ERR,
2890                                                 LOG_SLI,
2891                                                 "0343 Ring %d Cannot find "
2892                                                 "buffer for an unsolicited iocb"
2893                                                 ". tag 0x%x\n", pring->ringno,
2894                                                 irsp->un.ulpWord[3]);
2895                         }
2896                         if (irsp->ulpBdeCount == 2) {
2897                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2898                                                 irsp->unsli3.sli3Words[7]);
2899                                 if (!iocbq->context3)
2900                                         lpfc_printf_log(phba,
2901                                                 KERN_ERR,
2902                                                 LOG_SLI,
2903                                                 "0344 Ring %d Cannot find "
2904                                                 "buffer for an unsolicited "
2905                                                 "iocb. tag 0x%x\n",
2906                                                 pring->ringno,
2907                                                 irsp->unsli3.sli3Words[7]);
2908                         }
2909                 }
2910         }
2911         if (irsp->ulpBdeCount != 0 &&
2912             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2913              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2914                 int found = 0;
2915
2916                 /* search continue save q for same XRI */
2917                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2918                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2919                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2920                                 list_add_tail(&saveq->list, &iocbq->list);
2921                                 found = 1;
2922                                 break;
2923                         }
2924                 }
2925                 if (!found)
2926                         list_add_tail(&saveq->clist,
2927                                       &pring->iocb_continue_saveq);
2928                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2929                         list_del_init(&iocbq->clist);
2930                         saveq = iocbq;
2931                         irsp = &(saveq->iocb);
2932                 } else
2933                         return 0;
2934         }
2935         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2936             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2937             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2938                 Rctl = FC_RCTL_ELS_REQ;
2939                 Type = FC_TYPE_ELS;
2940         } else {
2941                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2942                 Rctl = w5p->hcsw.Rctl;
2943                 Type = w5p->hcsw.Type;
2944
2945                 /* Firmware Workaround */
2946                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2947                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2948                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2949                         Rctl = FC_RCTL_ELS_REQ;
2950                         Type = FC_TYPE_ELS;
2951                         w5p->hcsw.Rctl = Rctl;
2952                         w5p->hcsw.Type = Type;
2953                 }
2954         }
2955
2956         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2957                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2958                                 "0313 Ring %d handler: unexpected Rctl x%x "
2959                                 "Type x%x received\n",
2960                                 pring->ringno, Rctl, Type);
2961
2962         return 1;
2963 }
2964
2965 /**
2966  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2967  * @phba: Pointer to HBA context object.
2968  * @pring: Pointer to driver SLI ring object.
2969  * @prspiocb: Pointer to response iocb object.
2970  *
2971  * This function looks up the iocb_lookup table to get the command iocb
2972  * corresponding to the given response iocb using the iotag of the
2973  * response iocb. This function is called with the hbalock held
2974  * for sli3 devices or the ring_lock for sli4 devices.
2975  * This function returns the command iocb object if it finds the command
2976  * iocb else returns NULL.
2977  **/
2978 static struct lpfc_iocbq *
2979 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2980                       struct lpfc_sli_ring *pring,
2981                       struct lpfc_iocbq *prspiocb)
2982 {
2983         struct lpfc_iocbq *cmd_iocb = NULL;
2984         uint16_t iotag;
2985         lockdep_assert_held(&phba->hbalock);
2986
2987         iotag = prspiocb->iocb.ulpIoTag;
2988
2989         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2990                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2991                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2992                         /* remove from txcmpl queue list */
2993                         list_del_init(&cmd_iocb->list);
2994                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2995                         pring->txcmplq_cnt--;
2996                         return cmd_iocb;
2997                 }
2998         }
2999
3000         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001                         "0317 iotag x%x is out of "
3002                         "range: max iotag x%x wd0 x%x\n",
3003                         iotag, phba->sli.last_iotag,
3004                         *(((uint32_t *) &prspiocb->iocb) + 7));
3005         return NULL;
3006 }
3007
3008 /**
3009  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3010  * @phba: Pointer to HBA context object.
3011  * @pring: Pointer to driver SLI ring object.
3012  * @iotag: IOCB tag.
3013  *
3014  * This function looks up the iocb_lookup table to get the command iocb
3015  * corresponding to the given iotag. This function is called with the
3016  * hbalock held.
3017  * This function returns the command iocb object if it finds the command
3018  * iocb else returns NULL.
3019  **/
3020 static struct lpfc_iocbq *
3021 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3022                              struct lpfc_sli_ring *pring, uint16_t iotag)
3023 {
3024         struct lpfc_iocbq *cmd_iocb = NULL;
3025
3026         lockdep_assert_held(&phba->hbalock);
3027         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3028                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3029                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3030                         /* remove from txcmpl queue list */
3031                         list_del_init(&cmd_iocb->list);
3032                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3033                         pring->txcmplq_cnt--;
3034                         return cmd_iocb;
3035                 }
3036         }
3037
3038         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3039                         "0372 iotag x%x lookup error: max iotag (x%x) "
3040                         "iocb_flag x%x\n",
3041                         iotag, phba->sli.last_iotag,
3042                         cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3043         return NULL;
3044 }
3045
3046 /**
3047  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3048  * @phba: Pointer to HBA context object.
3049  * @pring: Pointer to driver SLI ring object.
3050  * @saveq: Pointer to the response iocb to be processed.
3051  *
3052  * This function is called by the ring event handler for non-fcp
3053  * rings when there is a new response iocb in the response ring.
3054  * The caller is not required to hold any locks. This function
3055  * gets the command iocb associated with the response iocb and
3056  * calls the completion handler for the command iocb. If there
3057  * is no completion handler, the function will free the resources
3058  * associated with command iocb. If the response iocb is for
3059  * an already aborted command iocb, the status of the completion
3060  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3061  * This function always returns 1.
3062  **/
3063 static int
3064 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3065                           struct lpfc_iocbq *saveq)
3066 {
3067         struct lpfc_iocbq *cmdiocbp;
3068         int rc = 1;
3069         unsigned long iflag;
3070
3071         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
3072         if (phba->sli_rev == LPFC_SLI_REV4)
3073                 spin_lock_irqsave(&pring->ring_lock, iflag);
3074         else
3075                 spin_lock_irqsave(&phba->hbalock, iflag);
3076         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3077         if (phba->sli_rev == LPFC_SLI_REV4)
3078                 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3079         else
3080                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3081
3082         if (cmdiocbp) {
3083                 if (cmdiocbp->iocb_cmpl) {
3084                         /*
3085                          * If an ELS command failed send an event to mgmt
3086                          * application.
3087                          */
3088                         if (saveq->iocb.ulpStatus &&
3089                              (pring->ringno == LPFC_ELS_RING) &&
3090                              (cmdiocbp->iocb.ulpCommand ==
3091                                 CMD_ELS_REQUEST64_CR))
3092                                 lpfc_send_els_failure_event(phba,
3093                                         cmdiocbp, saveq);
3094
3095                         /*
3096                          * Post all ELS completions to the worker thread.
3097                          * All other are passed to the completion callback.
3098                          */
3099                         if (pring->ringno == LPFC_ELS_RING) {
3100                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3101                                     (cmdiocbp->iocb_flag &
3102                                                         LPFC_DRIVER_ABORTED)) {
3103                                         spin_lock_irqsave(&phba->hbalock,
3104                                                           iflag);
3105                                         cmdiocbp->iocb_flag &=
3106                                                 ~LPFC_DRIVER_ABORTED;
3107                                         spin_unlock_irqrestore(&phba->hbalock,
3108                                                                iflag);
3109                                         saveq->iocb.ulpStatus =
3110                                                 IOSTAT_LOCAL_REJECT;
3111                                         saveq->iocb.un.ulpWord[4] =
3112                                                 IOERR_SLI_ABORTED;
3113
3114                                         /* Firmware could still be in progress
3115                                          * of DMAing payload, so don't free data
3116                                          * buffer till after a hbeat.
3117                                          */
3118                                         spin_lock_irqsave(&phba->hbalock,
3119                                                           iflag);
3120                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3121                                         spin_unlock_irqrestore(&phba->hbalock,
3122                                                                iflag);
3123                                 }
3124                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3125                                         if (saveq->iocb_flag &
3126                                             LPFC_EXCHANGE_BUSY) {
3127                                                 /* Set cmdiocb flag for the
3128                                                  * exchange busy so sgl (xri)
3129                                                  * will not be released until
3130                                                  * the abort xri is received
3131                                                  * from hba.
3132                                                  */
3133                                                 spin_lock_irqsave(
3134                                                         &phba->hbalock, iflag);
3135                                                 cmdiocbp->iocb_flag |=
3136                                                         LPFC_EXCHANGE_BUSY;
3137                                                 spin_unlock_irqrestore(
3138                                                         &phba->hbalock, iflag);
3139                                         }
3140                                         if (cmdiocbp->iocb_flag &
3141                                             LPFC_DRIVER_ABORTED) {
3142                                                 /*
3143                                                  * Clear LPFC_DRIVER_ABORTED
3144                                                  * bit in case it was driver
3145                                                  * initiated abort.
3146                                                  */
3147                                                 spin_lock_irqsave(
3148                                                         &phba->hbalock, iflag);
3149                                                 cmdiocbp->iocb_flag &=
3150                                                         ~LPFC_DRIVER_ABORTED;
3151                                                 spin_unlock_irqrestore(
3152                                                         &phba->hbalock, iflag);
3153                                                 cmdiocbp->iocb.ulpStatus =
3154                                                         IOSTAT_LOCAL_REJECT;
3155                                                 cmdiocbp->iocb.un.ulpWord[4] =
3156                                                         IOERR_ABORT_REQUESTED;
3157                                                 /*
3158                                                  * For SLI4, irsiocb contains
3159                                                  * NO_XRI in sli_xritag, it
3160                                                  * shall not affect releasing
3161                                                  * sgl (xri) process.
3162                                                  */
3163                                                 saveq->iocb.ulpStatus =
3164                                                         IOSTAT_LOCAL_REJECT;
3165                                                 saveq->iocb.un.ulpWord[4] =
3166                                                         IOERR_SLI_ABORTED;
3167                                                 spin_lock_irqsave(
3168                                                         &phba->hbalock, iflag);
3169                                                 saveq->iocb_flag |=
3170                                                         LPFC_DELAY_MEM_FREE;
3171                                                 spin_unlock_irqrestore(
3172                                                         &phba->hbalock, iflag);
3173                                         }
3174                                 }
3175                         }
3176                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3177                 } else
3178                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3179         } else {
3180                 /*
3181                  * Unknown initiating command based on the response iotag.
3182                  * This could be the case on the ELS ring because of
3183                  * lpfc_els_abort().
3184                  */
3185                 if (pring->ringno != LPFC_ELS_RING) {
3186                         /*
3187                          * Ring <ringno> handler: unexpected completion IoTag
3188                          * <IoTag>
3189                          */
3190                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3191                                          "0322 Ring %d handler: "
3192                                          "unexpected completion IoTag x%x "
3193                                          "Data: x%x x%x x%x x%x\n",
3194                                          pring->ringno,
3195                                          saveq->iocb.ulpIoTag,
3196                                          saveq->iocb.ulpStatus,
3197                                          saveq->iocb.un.ulpWord[4],
3198                                          saveq->iocb.ulpCommand,
3199                                          saveq->iocb.ulpContext);
3200                 }
3201         }
3202
3203         return rc;
3204 }
3205
3206 /**
3207  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3208  * @phba: Pointer to HBA context object.
3209  * @pring: Pointer to driver SLI ring object.
3210  *
3211  * This function is called from the iocb ring event handlers when
3212  * put pointer is ahead of the get pointer for a ring. This function signal
3213  * an error attention condition to the worker thread and the worker
3214  * thread will transition the HBA to offline state.
3215  **/
3216 static void
3217 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3218 {
3219         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3220         /*
3221          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3222          * rsp ring <portRspMax>
3223          */
3224         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3225                         "0312 Ring %d handler: portRspPut %d "
3226                         "is bigger than rsp ring %d\n",
3227                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3228                         pring->sli.sli3.numRiocb);
3229
3230         phba->link_state = LPFC_HBA_ERROR;
3231
3232         /*
3233          * All error attention handlers are posted to
3234          * worker thread
3235          */
3236         phba->work_ha |= HA_ERATT;
3237         phba->work_hs = HS_FFER3;
3238
3239         lpfc_worker_wake_up(phba);
3240
3241         return;
3242 }
3243
3244 /**
3245  * lpfc_poll_eratt - Error attention polling timer timeout handler
3246  * @ptr: Pointer to address of HBA context object.
3247  *
3248  * This function is invoked by the Error Attention polling timer when the
3249  * timer times out. It will check the SLI Error Attention register for
3250  * possible attention events. If so, it will post an Error Attention event
3251  * and wake up worker thread to process it. Otherwise, it will set up the
3252  * Error Attention polling timer for the next poll.
3253  **/
3254 void lpfc_poll_eratt(struct timer_list *t)
3255 {
3256         struct lpfc_hba *phba;
3257         uint32_t eratt = 0;
3258         uint64_t sli_intr, cnt;
3259
3260         phba = from_timer(phba, t, eratt_poll);
3261
3262         /* Here we will also keep track of interrupts per sec of the hba */
3263         sli_intr = phba->sli.slistat.sli_intr;
3264
3265         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3266                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3267                         sli_intr);
3268         else
3269                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3270
3271         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3272         do_div(cnt, phba->eratt_poll_interval);
3273         phba->sli.slistat.sli_ips = cnt;
3274
3275         phba->sli.slistat.sli_prev_intr = sli_intr;
3276
3277         /* Check chip HA register for error event */
3278         eratt = lpfc_sli_check_eratt(phba);
3279
3280         if (eratt)
3281                 /* Tell the worker thread there is work to do */
3282                 lpfc_worker_wake_up(phba);
3283         else
3284                 /* Restart the timer for next eratt poll */
3285                 mod_timer(&phba->eratt_poll,
3286                           jiffies +
3287                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3288         return;
3289 }
3290
3291
3292 /**
3293  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3294  * @phba: Pointer to HBA context object.
3295  * @pring: Pointer to driver SLI ring object.
3296  * @mask: Host attention register mask for this ring.
3297  *
3298  * This function is called from the interrupt context when there is a ring
3299  * event for the fcp ring. The caller does not hold any lock.
3300  * The function processes each response iocb in the response ring until it
3301  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3302  * LE bit set. The function will call the completion handler of the command iocb
3303  * if the response iocb indicates a completion for a command iocb or it is
3304  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3305  * function if this is an unsolicited iocb.
3306  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3307  * to check it explicitly.
3308  */
3309 int
3310 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3311                                 struct lpfc_sli_ring *pring, uint32_t mask)
3312 {
3313         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3314         IOCB_t *irsp = NULL;
3315         IOCB_t *entry = NULL;
3316         struct lpfc_iocbq *cmdiocbq = NULL;
3317         struct lpfc_iocbq rspiocbq;
3318         uint32_t status;
3319         uint32_t portRspPut, portRspMax;
3320         int rc = 1;
3321         lpfc_iocb_type type;
3322         unsigned long iflag;
3323         uint32_t rsp_cmpl = 0;
3324
3325         spin_lock_irqsave(&phba->hbalock, iflag);
3326         pring->stats.iocb_event++;
3327
3328         /*
3329          * The next available response entry should never exceed the maximum
3330          * entries.  If it does, treat it as an adapter hardware error.
3331          */
3332         portRspMax = pring->sli.sli3.numRiocb;
3333         portRspPut = le32_to_cpu(pgp->rspPutInx);
3334         if (unlikely(portRspPut >= portRspMax)) {
3335                 lpfc_sli_rsp_pointers_error(phba, pring);
3336                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3337                 return 1;
3338         }
3339         if (phba->fcp_ring_in_use) {
3340                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3341                 return 1;
3342         } else
3343                 phba->fcp_ring_in_use = 1;
3344
3345         rmb();
3346         while (pring->sli.sli3.rspidx != portRspPut) {
3347                 /*
3348                  * Fetch an entry off the ring and copy it into a local data
3349                  * structure.  The copy involves a byte-swap since the
3350                  * network byte order and pci byte orders are different.
3351                  */
3352                 entry = lpfc_resp_iocb(phba, pring);
3353                 phba->last_completion_time = jiffies;
3354
3355                 if (++pring->sli.sli3.rspidx >= portRspMax)
3356                         pring->sli.sli3.rspidx = 0;
3357
3358                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3359                                       (uint32_t *) &rspiocbq.iocb,
3360                                       phba->iocb_rsp_size);
3361                 INIT_LIST_HEAD(&(rspiocbq.list));
3362                 irsp = &rspiocbq.iocb;
3363
3364                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3365                 pring->stats.iocb_rsp++;
3366                 rsp_cmpl++;
3367
3368                 if (unlikely(irsp->ulpStatus)) {
3369                         /*
3370                          * If resource errors reported from HBA, reduce
3371                          * queuedepths of the SCSI device.
3372                          */
3373                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3374                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3375                              IOERR_NO_RESOURCES)) {
3376                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3377                                 phba->lpfc_rampdown_queue_depth(phba);
3378                                 spin_lock_irqsave(&phba->hbalock, iflag);
3379                         }
3380
3381                         /* Rsp ring <ringno> error: IOCB */
3382                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3383                                         "0336 Rsp Ring %d error: IOCB Data: "
3384                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3385                                         pring->ringno,
3386                                         irsp->un.ulpWord[0],
3387                                         irsp->un.ulpWord[1],
3388                                         irsp->un.ulpWord[2],
3389                                         irsp->un.ulpWord[3],
3390                                         irsp->un.ulpWord[4],
3391                                         irsp->un.ulpWord[5],
3392                                         *(uint32_t *)&irsp->un1,
3393                                         *((uint32_t *)&irsp->un1 + 1));
3394                 }
3395
3396                 switch (type) {
3397                 case LPFC_ABORT_IOCB:
3398                 case LPFC_SOL_IOCB:
3399                         /*
3400                          * Idle exchange closed via ABTS from port.  No iocb
3401                          * resources need to be recovered.
3402                          */
3403                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3404                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3405                                                 "0333 IOCB cmd 0x%x"
3406                                                 " processed. Skipping"
3407                                                 " completion\n",
3408                                                 irsp->ulpCommand);
3409                                 break;
3410                         }
3411
3412                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3413                                                          &rspiocbq);
3414                         if (unlikely(!cmdiocbq))
3415                                 break;
3416                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3417                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3418                         if (cmdiocbq->iocb_cmpl) {
3419                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3420                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3421                                                       &rspiocbq);
3422                                 spin_lock_irqsave(&phba->hbalock, iflag);
3423                         }
3424                         break;
3425                 case LPFC_UNSOL_IOCB:
3426                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3427                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3428                         spin_lock_irqsave(&phba->hbalock, iflag);
3429                         break;
3430                 default:
3431                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3432                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3433                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3434                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3435                                        MAX_MSG_DATA);
3436                                 dev_warn(&((phba->pcidev)->dev),
3437                                          "lpfc%d: %s\n",
3438                                          phba->brd_no, adaptermsg);
3439                         } else {
3440                                 /* Unknown IOCB command */
3441                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3442                                                 "0334 Unknown IOCB command "
3443                                                 "Data: x%x, x%x x%x x%x x%x\n",
3444                                                 type, irsp->ulpCommand,
3445                                                 irsp->ulpStatus,
3446                                                 irsp->ulpIoTag,
3447                                                 irsp->ulpContext);
3448                         }
3449                         break;
3450                 }
3451
3452                 /*
3453                  * The response IOCB has been processed.  Update the ring
3454                  * pointer in SLIM.  If the port response put pointer has not
3455                  * been updated, sync the pgp->rspPutInx and fetch the new port
3456                  * response put pointer.
3457                  */
3458                 writel(pring->sli.sli3.rspidx,
3459                         &phba->host_gp[pring->ringno].rspGetInx);
3460
3461                 if (pring->sli.sli3.rspidx == portRspPut)
3462                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3463         }
3464
3465         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3466                 pring->stats.iocb_rsp_full++;
3467                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3468                 writel(status, phba->CAregaddr);
3469                 readl(phba->CAregaddr);
3470         }
3471         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3472                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3473                 pring->stats.iocb_cmd_empty++;
3474
3475                 /* Force update of the local copy of cmdGetInx */
3476                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3477                 lpfc_sli_resume_iocb(phba, pring);
3478
3479                 if ((pring->lpfc_sli_cmd_available))
3480                         (pring->lpfc_sli_cmd_available) (phba, pring);
3481
3482         }
3483
3484         phba->fcp_ring_in_use = 0;
3485         spin_unlock_irqrestore(&phba->hbalock, iflag);
3486         return rc;
3487 }
3488
3489 /**
3490  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3491  * @phba: Pointer to HBA context object.
3492  * @pring: Pointer to driver SLI ring object.
3493  * @rspiocbp: Pointer to driver response IOCB object.
3494  *
3495  * This function is called from the worker thread when there is a slow-path
3496  * response IOCB to process. This function chains all the response iocbs until
3497  * seeing the iocb with the LE bit set. The function will call
3498  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3499  * completion of a command iocb. The function will call the
3500  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3501  * The function frees the resources or calls the completion handler if this
3502  * iocb is an abort completion. The function returns NULL when the response
3503  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3504  * this function shall chain the iocb on to the iocb_continueq and return the
3505  * response iocb passed in.
3506  **/
3507 static struct lpfc_iocbq *
3508 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3509                         struct lpfc_iocbq *rspiocbp)
3510 {
3511         struct lpfc_iocbq *saveq;
3512         struct lpfc_iocbq *cmdiocbp;
3513         struct lpfc_iocbq *next_iocb;
3514         IOCB_t *irsp = NULL;
3515         uint32_t free_saveq;
3516         uint8_t iocb_cmd_type;
3517         lpfc_iocb_type type;
3518         unsigned long iflag;
3519         int rc;
3520
3521         spin_lock_irqsave(&phba->hbalock, iflag);
3522         /* First add the response iocb to the countinueq list */
3523         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3524         pring->iocb_continueq_cnt++;
3525
3526         /* Now, determine whether the list is completed for processing */
3527         irsp = &rspiocbp->iocb;
3528         if (irsp->ulpLe) {
3529                 /*
3530                  * By default, the driver expects to free all resources
3531                  * associated with this iocb completion.
3532                  */
3533                 free_saveq = 1;
3534                 saveq = list_get_first(&pring->iocb_continueq,
3535                                        struct lpfc_iocbq, list);
3536                 irsp = &(saveq->iocb);
3537                 list_del_init(&pring->iocb_continueq);
3538                 pring->iocb_continueq_cnt = 0;
3539
3540                 pring->stats.iocb_rsp++;
3541
3542                 /*
3543                  * If resource errors reported from HBA, reduce
3544                  * queuedepths of the SCSI device.
3545                  */
3546                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3547                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3548                      IOERR_NO_RESOURCES)) {
3549                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3550                         phba->lpfc_rampdown_queue_depth(phba);
3551                         spin_lock_irqsave(&phba->hbalock, iflag);
3552                 }
3553
3554                 if (irsp->ulpStatus) {
3555                         /* Rsp ring <ringno> error: IOCB */
3556                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3557                                         "0328 Rsp Ring %d error: "
3558                                         "IOCB Data: "
3559                                         "x%x x%x x%x x%x "
3560                                         "x%x x%x x%x x%x "
3561                                         "x%x x%x x%x x%x "
3562                                         "x%x x%x x%x x%x\n",
3563                                         pring->ringno,
3564                                         irsp->un.ulpWord[0],
3565                                         irsp->un.ulpWord[1],
3566                                         irsp->un.ulpWord[2],
3567                                         irsp->un.ulpWord[3],
3568                                         irsp->un.ulpWord[4],
3569                                         irsp->un.ulpWord[5],
3570                                         *(((uint32_t *) irsp) + 6),
3571                                         *(((uint32_t *) irsp) + 7),
3572                                         *(((uint32_t *) irsp) + 8),
3573                                         *(((uint32_t *) irsp) + 9),
3574                                         *(((uint32_t *) irsp) + 10),
3575                                         *(((uint32_t *) irsp) + 11),
3576                                         *(((uint32_t *) irsp) + 12),
3577                                         *(((uint32_t *) irsp) + 13),
3578                                         *(((uint32_t *) irsp) + 14),
3579                                         *(((uint32_t *) irsp) + 15));
3580                 }
3581
3582                 /*
3583                  * Fetch the IOCB command type and call the correct completion
3584                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3585                  * get freed back to the lpfc_iocb_list by the discovery
3586                  * kernel thread.
3587                  */
3588                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3589                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3590                 switch (type) {
3591                 case LPFC_SOL_IOCB:
3592                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3593                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3594                         spin_lock_irqsave(&phba->hbalock, iflag);
3595                         break;
3596
3597                 case LPFC_UNSOL_IOCB:
3598                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3599                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3600                         spin_lock_irqsave(&phba->hbalock, iflag);
3601                         if (!rc)
3602                                 free_saveq = 0;
3603                         break;
3604
3605                 case LPFC_ABORT_IOCB:
3606                         cmdiocbp = NULL;
3607                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3608                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3609                                                                  saveq);
3610                         if (cmdiocbp) {
3611                                 /* Call the specified completion routine */
3612                                 if (cmdiocbp->iocb_cmpl) {
3613                                         spin_unlock_irqrestore(&phba->hbalock,
3614                                                                iflag);
3615                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3616                                                               saveq);
3617                                         spin_lock_irqsave(&phba->hbalock,
3618                                                           iflag);
3619                                 } else
3620                                         __lpfc_sli_release_iocbq(phba,
3621                                                                  cmdiocbp);
3622                         }
3623                         break;
3624
3625                 case LPFC_UNKNOWN_IOCB:
3626                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3627                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3628                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3629                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3630                                        MAX_MSG_DATA);
3631                                 dev_warn(&((phba->pcidev)->dev),
3632                                          "lpfc%d: %s\n",
3633                                          phba->brd_no, adaptermsg);
3634                         } else {
3635                                 /* Unknown IOCB command */
3636                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3637                                                 "0335 Unknown IOCB "
3638                                                 "command Data: x%x "
3639                                                 "x%x x%x x%x\n",
3640                                                 irsp->ulpCommand,
3641                                                 irsp->ulpStatus,
3642                                                 irsp->ulpIoTag,
3643                                                 irsp->ulpContext);
3644                         }
3645                         break;
3646                 }
3647
3648                 if (free_saveq) {
3649                         list_for_each_entry_safe(rspiocbp, next_iocb,
3650                                                  &saveq->list, list) {
3651                                 list_del_init(&rspiocbp->list);
3652                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3653                         }
3654                         __lpfc_sli_release_iocbq(phba, saveq);
3655                 }
3656                 rspiocbp = NULL;
3657         }
3658         spin_unlock_irqrestore(&phba->hbalock, iflag);
3659         return rspiocbp;
3660 }
3661
3662 /**
3663  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3664  * @phba: Pointer to HBA context object.
3665  * @pring: Pointer to driver SLI ring object.
3666  * @mask: Host attention register mask for this ring.
3667  *
3668  * This routine wraps the actual slow_ring event process routine from the
3669  * API jump table function pointer from the lpfc_hba struct.
3670  **/
3671 void
3672 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3673                                 struct lpfc_sli_ring *pring, uint32_t mask)
3674 {
3675         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3676 }
3677
3678 /**
3679  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3680  * @phba: Pointer to HBA context object.
3681  * @pring: Pointer to driver SLI ring object.
3682  * @mask: Host attention register mask for this ring.
3683  *
3684  * This function is called from the worker thread when there is a ring event
3685  * for non-fcp rings. The caller does not hold any lock. The function will
3686  * remove each response iocb in the response ring and calls the handle
3687  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3688  **/
3689 static void
3690 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3691                                    struct lpfc_sli_ring *pring, uint32_t mask)
3692 {
3693         struct lpfc_pgp *pgp;
3694         IOCB_t *entry;
3695         IOCB_t *irsp = NULL;
3696         struct lpfc_iocbq *rspiocbp = NULL;
3697         uint32_t portRspPut, portRspMax;
3698         unsigned long iflag;
3699         uint32_t status;
3700
3701         pgp = &phba->port_gp[pring->ringno];
3702         spin_lock_irqsave(&phba->hbalock, iflag);
3703         pring->stats.iocb_event++;
3704
3705         /*
3706          * The next available response entry should never exceed the maximum
3707          * entries.  If it does, treat it as an adapter hardware error.
3708          */
3709         portRspMax = pring->sli.sli3.numRiocb;
3710         portRspPut = le32_to_cpu(pgp->rspPutInx);
3711         if (portRspPut >= portRspMax) {
3712                 /*
3713                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3714                  * rsp ring <portRspMax>
3715                  */
3716                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3717                                 "0303 Ring %d handler: portRspPut %d "
3718                                 "is bigger than rsp ring %d\n",
3719                                 pring->ringno, portRspPut, portRspMax);
3720
3721                 phba->link_state = LPFC_HBA_ERROR;
3722                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3723
3724                 phba->work_hs = HS_FFER3;
3725                 lpfc_handle_eratt(phba);
3726
3727                 return;
3728         }
3729
3730         rmb();
3731         while (pring->sli.sli3.rspidx != portRspPut) {
3732                 /*
3733                  * Build a completion list and call the appropriate handler.
3734                  * The process is to get the next available response iocb, get
3735                  * a free iocb from the list, copy the response data into the
3736                  * free iocb, insert to the continuation list, and update the
3737                  * next response index to slim.  This process makes response
3738                  * iocb's in the ring available to DMA as fast as possible but
3739                  * pays a penalty for a copy operation.  Since the iocb is
3740                  * only 32 bytes, this penalty is considered small relative to
3741                  * the PCI reads for register values and a slim write.  When
3742                  * the ulpLe field is set, the entire Command has been
3743                  * received.
3744                  */
3745                 entry = lpfc_resp_iocb(phba, pring);
3746
3747                 phba->last_completion_time = jiffies;
3748                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3749                 if (rspiocbp == NULL) {
3750                         printk(KERN_ERR "%s: out of buffers! Failing "
3751                                "completion.\n", __func__);
3752                         break;
3753                 }
3754
3755                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3756                                       phba->iocb_rsp_size);
3757                 irsp = &rspiocbp->iocb;
3758
3759                 if (++pring->sli.sli3.rspidx >= portRspMax)
3760                         pring->sli.sli3.rspidx = 0;
3761
3762                 if (pring->ringno == LPFC_ELS_RING) {
3763                         lpfc_debugfs_slow_ring_trc(phba,
3764                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3765                                 *(((uint32_t *) irsp) + 4),
3766                                 *(((uint32_t *) irsp) + 6),
3767                                 *(((uint32_t *) irsp) + 7));
3768                 }
3769
3770                 writel(pring->sli.sli3.rspidx,
3771                         &phba->host_gp[pring->ringno].rspGetInx);
3772
3773                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3774                 /* Handle the response IOCB */
3775                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3776                 spin_lock_irqsave(&phba->hbalock, iflag);
3777
3778                 /*
3779                  * If the port response put pointer has not been updated, sync
3780                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3781                  * response put pointer.
3782                  */
3783                 if (pring->sli.sli3.rspidx == portRspPut) {
3784                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3785                 }
3786         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3787
3788         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3789                 /* At least one response entry has been freed */
3790                 pring->stats.iocb_rsp_full++;
3791                 /* SET RxRE_RSP in Chip Att register */
3792                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3793                 writel(status, phba->CAregaddr);
3794                 readl(phba->CAregaddr); /* flush */
3795         }
3796         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3797                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3798                 pring->stats.iocb_cmd_empty++;
3799
3800                 /* Force update of the local copy of cmdGetInx */
3801                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3802                 lpfc_sli_resume_iocb(phba, pring);
3803
3804                 if ((pring->lpfc_sli_cmd_available))
3805                         (pring->lpfc_sli_cmd_available) (phba, pring);
3806
3807         }
3808
3809         spin_unlock_irqrestore(&phba->hbalock, iflag);
3810         return;
3811 }
3812
3813 /**
3814  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3815  * @phba: Pointer to HBA context object.
3816  * @pring: Pointer to driver SLI ring object.
3817  * @mask: Host attention register mask for this ring.
3818  *
3819  * This function is called from the worker thread when there is a pending
3820  * ELS response iocb on the driver internal slow-path response iocb worker
3821  * queue. The caller does not hold any lock. The function will remove each
3822  * response iocb from the response worker queue and calls the handle
3823  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3824  **/
3825 static void
3826 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3827                                    struct lpfc_sli_ring *pring, uint32_t mask)
3828 {
3829         struct lpfc_iocbq *irspiocbq;
3830         struct hbq_dmabuf *dmabuf;
3831         struct lpfc_cq_event *cq_event;
3832         unsigned long iflag;
3833         int count = 0;
3834
3835         spin_lock_irqsave(&phba->hbalock, iflag);
3836         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3837         spin_unlock_irqrestore(&phba->hbalock, iflag);
3838         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3839                 /* Get the response iocb from the head of work queue */
3840                 spin_lock_irqsave(&phba->hbalock, iflag);
3841                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3842                                  cq_event, struct lpfc_cq_event, list);
3843                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3844
3845                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3846                 case CQE_CODE_COMPL_WQE:
3847                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3848                                                  cq_event);
3849                         /* Translate ELS WCQE to response IOCBQ */
3850                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3851                                                                    irspiocbq);
3852                         if (irspiocbq)
3853                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3854                                                            irspiocbq);
3855                         count++;
3856                         break;
3857                 case CQE_CODE_RECEIVE:
3858                 case CQE_CODE_RECEIVE_V1:
3859                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3860                                               cq_event);
3861                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3862                         count++;
3863                         break;
3864                 default:
3865                         break;
3866                 }
3867
3868                 /* Limit the number of events to 64 to avoid soft lockups */
3869                 if (count == 64)
3870                         break;
3871         }
3872 }
3873
3874 /**
3875  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3876  * @phba: Pointer to HBA context object.
3877  * @pring: Pointer to driver SLI ring object.
3878  *
3879  * This function aborts all iocbs in the given ring and frees all the iocb
3880  * objects in txq. This function issues an abort iocb for all the iocb commands
3881  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3882  * the return of this function. The caller is not required to hold any locks.
3883  **/
3884 void
3885 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3886 {
3887         LIST_HEAD(completions);
3888         struct lpfc_iocbq *iocb, *next_iocb;
3889
3890         if (pring->ringno == LPFC_ELS_RING) {
3891                 lpfc_fabric_abort_hba(phba);
3892         }
3893
3894         /* Error everything on txq and txcmplq
3895          * First do the txq.
3896          */
3897         if (phba->sli_rev >= LPFC_SLI_REV4) {
3898                 spin_lock_irq(&pring->ring_lock);
3899                 list_splice_init(&pring->txq, &completions);
3900                 pring->txq_cnt = 0;
3901                 spin_unlock_irq(&pring->ring_lock);
3902
3903                 spin_lock_irq(&phba->hbalock);
3904                 /* Next issue ABTS for everything on the txcmplq */
3905                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3906                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3907                 spin_unlock_irq(&phba->hbalock);
3908         } else {
3909                 spin_lock_irq(&phba->hbalock);
3910                 list_splice_init(&pring->txq, &completions);
3911                 pring->txq_cnt = 0;
3912
3913                 /* Next issue ABTS for everything on the txcmplq */
3914                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3915                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3916                 spin_unlock_irq(&phba->hbalock);
3917         }
3918
3919         /* Cancel all the IOCBs from the completions list */
3920         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3921                               IOERR_SLI_ABORTED);
3922 }
3923
3924 /**
3925  * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3926  * @phba: Pointer to HBA context object.
3927  * @pring: Pointer to driver SLI ring object.
3928  *
3929  * This function aborts all iocbs in the given ring and frees all the iocb
3930  * objects in txq. This function issues an abort iocb for all the iocb commands
3931  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3932  * the return of this function. The caller is not required to hold any locks.
3933  **/
3934 void
3935 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3936 {
3937         LIST_HEAD(completions);
3938         struct lpfc_iocbq *iocb, *next_iocb;
3939
3940         if (pring->ringno == LPFC_ELS_RING)
3941                 lpfc_fabric_abort_hba(phba);
3942
3943         spin_lock_irq(&phba->hbalock);
3944         /* Next issue ABTS for everything on the txcmplq */
3945         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3946                 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3947         spin_unlock_irq(&phba->hbalock);
3948 }
3949
3950
3951 /**
3952  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3953  * @phba: Pointer to HBA context object.
3954  * @pring: Pointer to driver SLI ring object.
3955  *
3956  * This function aborts all iocbs in FCP rings and frees all the iocb
3957  * objects in txq. This function issues an abort iocb for all the iocb commands
3958  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3959  * the return of this function. The caller is not required to hold any locks.
3960  **/
3961 void
3962 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3963 {
3964         struct lpfc_sli *psli = &phba->sli;
3965         struct lpfc_sli_ring  *pring;
3966         uint32_t i;
3967
3968         /* Look on all the FCP Rings for the iotag */
3969         if (phba->sli_rev >= LPFC_SLI_REV4) {
3970                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3971                         pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
3972                         lpfc_sli_abort_iocb_ring(phba, pring);
3973                 }
3974         } else {
3975                 pring = &psli->sli3_ring[LPFC_FCP_RING];
3976                 lpfc_sli_abort_iocb_ring(phba, pring);
3977         }
3978 }
3979
3980 /**
3981  * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3982  * @phba: Pointer to HBA context object.
3983  *
3984  * This function aborts all wqes in NVME rings. This function issues an
3985  * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3986  * the txcmplq is not guaranteed to complete before the return of this
3987  * function. The caller is not required to hold any locks.
3988  **/
3989 void
3990 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3991 {
3992         struct lpfc_sli_ring  *pring;
3993         uint32_t i;
3994
3995         if ((phba->sli_rev < LPFC_SLI_REV4) ||
3996             !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3997                 return;
3998
3999         /* Abort all IO on each NVME ring. */
4000         for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001                 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4002                 lpfc_sli_abort_wqe_ring(phba, pring);
4003         }
4004 }
4005
4006
4007 /**
4008  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
4009  * @phba: Pointer to HBA context object.
4010  *
4011  * This function flushes all iocbs in the fcp ring and frees all the iocb
4012  * objects in txq and txcmplq. This function will not issue abort iocbs
4013  * for all the iocb commands in txcmplq, they will just be returned with
4014  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4015  * slot has been permanently disabled.
4016  **/
4017 void
4018 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4019 {
4020         LIST_HEAD(txq);
4021         LIST_HEAD(txcmplq);
4022         struct lpfc_sli *psli = &phba->sli;
4023         struct lpfc_sli_ring  *pring;
4024         uint32_t i;
4025         struct lpfc_iocbq *piocb, *next_iocb;
4026
4027         spin_lock_irq(&phba->hbalock);
4028         /* Indicate the I/O queues are flushed */
4029         phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
4030         spin_unlock_irq(&phba->hbalock);
4031
4032         /* Look on all the FCP Rings for the iotag */
4033         if (phba->sli_rev >= LPFC_SLI_REV4) {
4034                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4035                         pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
4036
4037                         spin_lock_irq(&pring->ring_lock);
4038                         /* Retrieve everything on txq */
4039                         list_splice_init(&pring->txq, &txq);
4040                         list_for_each_entry_safe(piocb, next_iocb,
4041                                                  &pring->txcmplq, list)
4042                                 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4043                         /* Retrieve everything on the txcmplq */
4044                         list_splice_init(&pring->txcmplq, &txcmplq);
4045                         pring->txq_cnt = 0;
4046                         pring->txcmplq_cnt = 0;
4047                         spin_unlock_irq(&pring->ring_lock);
4048
4049                         /* Flush the txq */
4050                         lpfc_sli_cancel_iocbs(phba, &txq,
4051                                               IOSTAT_LOCAL_REJECT,
4052                                               IOERR_SLI_DOWN);
4053                         /* Flush the txcmpq */
4054                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
4055                                               IOSTAT_LOCAL_REJECT,
4056                                               IOERR_SLI_DOWN);
4057                 }
4058         } else {
4059                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4060
4061                 spin_lock_irq(&phba->hbalock);
4062                 /* Retrieve everything on txq */
4063                 list_splice_init(&pring->txq, &txq);
4064                 list_for_each_entry_safe(piocb, next_iocb,
4065                                          &pring->txcmplq, list)
4066                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4067                 /* Retrieve everything on the txcmplq */
4068                 list_splice_init(&pring->txcmplq, &txcmplq);
4069                 pring->txq_cnt = 0;
4070                 pring->txcmplq_cnt = 0;
4071                 spin_unlock_irq(&phba->hbalock);
4072
4073                 /* Flush the txq */
4074                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4075                                       IOERR_SLI_DOWN);
4076                 /* Flush the txcmpq */
4077                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4078                                       IOERR_SLI_DOWN);
4079         }
4080 }
4081
4082 /**
4083  * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4084  * @phba: Pointer to HBA context object.
4085  *
4086  * This function flushes all wqes in the nvme rings and frees all resources
4087  * in the txcmplq. This function does not issue abort wqes for the IO
4088  * commands in txcmplq, they will just be returned with
4089  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4090  * slot has been permanently disabled.
4091  **/
4092 void
4093 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4094 {
4095         LIST_HEAD(txcmplq);
4096         struct lpfc_sli_ring  *pring;
4097         uint32_t i;
4098         struct lpfc_iocbq *piocb, *next_iocb;
4099
4100         if ((phba->sli_rev < LPFC_SLI_REV4) ||
4101             !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4102                 return;
4103
4104         /* Hint to other driver operations that a flush is in progress. */
4105         spin_lock_irq(&phba->hbalock);
4106         phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4107         spin_unlock_irq(&phba->hbalock);
4108
4109         /* Cycle through all NVME rings and complete each IO with
4110          * a local driver reason code.  This is a flush so no
4111          * abort exchange to FW.
4112          */
4113         for (i = 0; i < phba->cfg_hdw_queue; i++) {
4114                 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4115
4116                 spin_lock_irq(&pring->ring_lock);
4117                 list_for_each_entry_safe(piocb, next_iocb,
4118                                          &pring->txcmplq, list)
4119                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4120                 /* Retrieve everything on the txcmplq */
4121                 list_splice_init(&pring->txcmplq, &txcmplq);
4122                 pring->txcmplq_cnt = 0;
4123                 spin_unlock_irq(&pring->ring_lock);
4124
4125                 /* Flush the txcmpq &&&PAE */
4126                 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4127                                       IOSTAT_LOCAL_REJECT,
4128                                       IOERR_SLI_DOWN);
4129         }
4130 }
4131
4132 /**
4133  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4134  * @phba: Pointer to HBA context object.
4135  * @mask: Bit mask to be checked.
4136  *
4137  * This function reads the host status register and compares
4138  * with the provided bit mask to check if HBA completed
4139  * the restart. This function will wait in a loop for the
4140  * HBA to complete restart. If the HBA does not restart within
4141  * 15 iterations, the function will reset the HBA again. The
4142  * function returns 1 when HBA fail to restart otherwise returns
4143  * zero.
4144  **/
4145 static int
4146 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4147 {
4148         uint32_t status;
4149         int i = 0;
4150         int retval = 0;
4151
4152         /* Read the HBA Host Status Register */
4153         if (lpfc_readl(phba->HSregaddr, &status))
4154                 return 1;
4155
4156         /*
4157          * Check status register every 100ms for 5 retries, then every
4158          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4159          * every 2.5 sec for 4.
4160          * Break our of the loop if errors occurred during init.
4161          */
4162         while (((status & mask) != mask) &&
4163                !(status & HS_FFERM) &&
4164                i++ < 20) {
4165
4166                 if (i <= 5)
4167                         msleep(10);
4168                 else if (i <= 10)
4169                         msleep(500);
4170                 else
4171                         msleep(2500);
4172
4173                 if (i == 15) {
4174                                 /* Do post */
4175                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4176                         lpfc_sli_brdrestart(phba);
4177                 }
4178                 /* Read the HBA Host Status Register */
4179                 if (lpfc_readl(phba->HSregaddr, &status)) {
4180                         retval = 1;
4181                         break;
4182                 }
4183         }
4184
4185         /* Check to see if any errors occurred during init */
4186         if ((status & HS_FFERM) || (i >= 20)) {
4187                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4188                                 "2751 Adapter failed to restart, "
4189                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4190                                 status,
4191                                 readl(phba->MBslimaddr + 0xa8),
4192                                 readl(phba->MBslimaddr + 0xac));
4193                 phba->link_state = LPFC_HBA_ERROR;
4194                 retval = 1;
4195         }
4196
4197         return retval;
4198 }
4199
4200 /**
4201  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4202  * @phba: Pointer to HBA context object.
4203  * @mask: Bit mask to be checked.
4204  *
4205  * This function checks the host status register to check if HBA is
4206  * ready. This function will wait in a loop for the HBA to be ready
4207  * If the HBA is not ready , the function will will reset the HBA PCI
4208  * function again. The function returns 1 when HBA fail to be ready
4209  * otherwise returns zero.
4210  **/
4211 static int
4212 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4213 {
4214         uint32_t status;
4215         int retval = 0;
4216
4217         /* Read the HBA Host Status Register */
4218         status = lpfc_sli4_post_status_check(phba);
4219
4220         if (status) {
4221                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4222                 lpfc_sli_brdrestart(phba);
4223                 status = lpfc_sli4_post_status_check(phba);
4224         }
4225
4226         /* Check to see if any errors occurred during init */
4227         if (status) {
4228                 phba->link_state = LPFC_HBA_ERROR;
4229                 retval = 1;
4230         } else
4231                 phba->sli4_hba.intr_enable = 0;
4232
4233         return retval;
4234 }
4235
4236 /**
4237  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4238  * @phba: Pointer to HBA context object.
4239  * @mask: Bit mask to be checked.
4240  *
4241  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4242  * from the API jump table function pointer from the lpfc_hba struct.
4243  **/
4244 int
4245 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4246 {
4247         return phba->lpfc_sli_brdready(phba, mask);
4248 }
4249
4250 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4251
4252 /**
4253  * lpfc_reset_barrier - Make HBA ready for HBA reset
4254  * @phba: Pointer to HBA context object.
4255  *
4256  * This function is called before resetting an HBA. This function is called
4257  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4258  **/
4259 void lpfc_reset_barrier(struct lpfc_hba *phba)
4260 {
4261         uint32_t __iomem *resp_buf;
4262         uint32_t __iomem *mbox_buf;
4263         volatile uint32_t mbox;
4264         uint32_t hc_copy, ha_copy, resp_data;
4265         int  i;
4266         uint8_t hdrtype;
4267
4268         lockdep_assert_held(&phba->hbalock);
4269
4270         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4271         if (hdrtype != 0x80 ||
4272             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4273              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4274                 return;
4275
4276         /*
4277          * Tell the other part of the chip to suspend temporarily all
4278          * its DMA activity.
4279          */
4280         resp_buf = phba->MBslimaddr;
4281
4282         /* Disable the error attention */
4283         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4284                 return;
4285         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4286         readl(phba->HCregaddr); /* flush */
4287         phba->link_flag |= LS_IGNORE_ERATT;
4288
4289         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4290                 return;
4291         if (ha_copy & HA_ERATT) {
4292                 /* Clear Chip error bit */
4293                 writel(HA_ERATT, phba->HAregaddr);
4294                 phba->pport->stopped = 1;
4295         }
4296
4297         mbox = 0;
4298         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4299         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4300
4301         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4302         mbox_buf = phba->MBslimaddr;
4303         writel(mbox, mbox_buf);
4304
4305         for (i = 0; i < 50; i++) {
4306                 if (lpfc_readl((resp_buf + 1), &resp_data))
4307                         return;
4308                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4309                         mdelay(1);
4310                 else
4311                         break;
4312         }
4313         resp_data = 0;
4314         if (lpfc_readl((resp_buf + 1), &resp_data))
4315                 return;
4316         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4317                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4318                     phba->pport->stopped)
4319                         goto restore_hc;
4320                 else
4321                         goto clear_errat;
4322         }
4323
4324         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4325         resp_data = 0;
4326         for (i = 0; i < 500; i++) {
4327                 if (lpfc_readl(resp_buf, &resp_data))
4328                         return;
4329                 if (resp_data != mbox)
4330                         mdelay(1);
4331                 else
4332                         break;
4333         }
4334
4335 clear_errat:
4336
4337         while (++i < 500) {
4338                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4339                         return;
4340                 if (!(ha_copy & HA_ERATT))
4341                         mdelay(1);
4342                 else
4343                         break;
4344         }
4345
4346         if (readl(phba->HAregaddr) & HA_ERATT) {
4347                 writel(HA_ERATT, phba->HAregaddr);
4348                 phba->pport->stopped = 1;
4349         }
4350
4351 restore_hc:
4352         phba->link_flag &= ~LS_IGNORE_ERATT;
4353         writel(hc_copy, phba->HCregaddr);
4354         readl(phba->HCregaddr); /* flush */
4355 }
4356
4357 /**
4358  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4359  * @phba: Pointer to HBA context object.
4360  *
4361  * This function issues a kill_board mailbox command and waits for
4362  * the error attention interrupt. This function is called for stopping
4363  * the firmware processing. The caller is not required to hold any
4364  * locks. This function calls lpfc_hba_down_post function to free
4365  * any pending commands after the kill. The function will return 1 when it
4366  * fails to kill the board else will return 0.
4367  **/
4368 int
4369 lpfc_sli_brdkill(struct lpfc_hba *phba)
4370 {
4371         struct lpfc_sli *psli;
4372         LPFC_MBOXQ_t *pmb;
4373         uint32_t status;
4374         uint32_t ha_copy;
4375         int retval;
4376         int i = 0;
4377
4378         psli = &phba->sli;
4379
4380         /* Kill HBA */
4381         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4382                         "0329 Kill HBA Data: x%x x%x\n",
4383                         phba->pport->port_state, psli->sli_flag);
4384
4385         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4386         if (!pmb)
4387                 return 1;
4388
4389         /* Disable the error attention */
4390         spin_lock_irq(&phba->hbalock);
4391         if (lpfc_readl(phba->HCregaddr, &status)) {
4392                 spin_unlock_irq(&phba->hbalock);
4393                 mempool_free(pmb, phba->mbox_mem_pool);
4394                 return 1;
4395         }
4396         status &= ~HC_ERINT_ENA;
4397         writel(status, phba->HCregaddr);
4398         readl(phba->HCregaddr); /* flush */
4399         phba->link_flag |= LS_IGNORE_ERATT;
4400         spin_unlock_irq(&phba->hbalock);
4401
4402         lpfc_kill_board(phba, pmb);
4403         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4404         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4405
4406         if (retval != MBX_SUCCESS) {
4407                 if (retval != MBX_BUSY)
4408                         mempool_free(pmb, phba->mbox_mem_pool);
4409                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4410                                 "2752 KILL_BOARD command failed retval %d\n",
4411                                 retval);
4412                 spin_lock_irq(&phba->hbalock);
4413                 phba->link_flag &= ~LS_IGNORE_ERATT;
4414                 spin_unlock_irq(&phba->hbalock);
4415                 return 1;
4416         }
4417
4418         spin_lock_irq(&phba->hbalock);
4419         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4420         spin_unlock_irq(&phba->hbalock);
4421
4422         mempool_free(pmb, phba->mbox_mem_pool);
4423
4424         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4425          * attention every 100ms for 3 seconds. If we don't get ERATT after
4426          * 3 seconds we still set HBA_ERROR state because the status of the
4427          * board is now undefined.
4428          */
4429         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4430                 return 1;
4431         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4432                 mdelay(100);
4433                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4434                         return 1;
4435         }
4436
4437         del_timer_sync(&psli->mbox_tmo);
4438         if (ha_copy & HA_ERATT) {
4439                 writel(HA_ERATT, phba->HAregaddr);
4440                 phba->pport->stopped = 1;
4441         }
4442         spin_lock_irq(&phba->hbalock);
4443         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4444         psli->mbox_active = NULL;
4445         phba->link_flag &= ~LS_IGNORE_ERATT;
4446         spin_unlock_irq(&phba->hbalock);
4447
4448         lpfc_hba_down_post(phba);
4449         phba->link_state = LPFC_HBA_ERROR;
4450
4451         return ha_copy & HA_ERATT ? 0 : 1;
4452 }
4453
4454 /**
4455  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4456  * @phba: Pointer to HBA context object.
4457  *
4458  * This function resets the HBA by writing HC_INITFF to the control
4459  * register. After the HBA resets, this function resets all the iocb ring
4460  * indices. This function disables PCI layer parity checking during
4461  * the reset.
4462  * This function returns 0 always.
4463  * The caller is not required to hold any locks.
4464  **/
4465 int
4466 lpfc_sli_brdreset(struct lpfc_hba *phba)
4467 {
4468         struct lpfc_sli *psli;
4469         struct lpfc_sli_ring *pring;
4470         uint16_t cfg_value;
4471         int i;
4472
4473         psli = &phba->sli;
4474
4475         /* Reset HBA */
4476         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4477                         "0325 Reset HBA Data: x%x x%x\n",
4478                         (phba->pport) ? phba->pport->port_state : 0,
4479                         psli->sli_flag);
4480
4481         /* perform board reset */
4482         phba->fc_eventTag = 0;
4483         phba->link_events = 0;
4484         if (phba->pport) {
4485                 phba->pport->fc_myDID = 0;
4486                 phba->pport->fc_prevDID = 0;
4487         }
4488
4489         /* Turn off parity checking and serr during the physical reset */
4490         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4491                 return -EIO;
4492
4493         pci_write_config_word(phba->pcidev, PCI_COMMAND,
4494                               (cfg_value &
4495                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4496
4497         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4498
4499         /* Now toggle INITFF bit in the Host Control Register */
4500         writel(HC_INITFF, phba->HCregaddr);
4501         mdelay(1);
4502         readl(phba->HCregaddr); /* flush */
4503         writel(0, phba->HCregaddr);
4504         readl(phba->HCregaddr); /* flush */
4505
4506         /* Restore PCI cmd register */
4507         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4508
4509         /* Initialize relevant SLI info */
4510         for (i = 0; i < psli->num_rings; i++) {
4511                 pring = &psli->sli3_ring[i];
4512                 pring->flag = 0;
4513                 pring->sli.sli3.rspidx = 0;
4514                 pring->sli.sli3.next_cmdidx  = 0;
4515                 pring->sli.sli3.local_getidx = 0;
4516                 pring->sli.sli3.cmdidx = 0;
4517                 pring->missbufcnt = 0;
4518         }
4519
4520         phba->link_state = LPFC_WARM_START;
4521         return 0;
4522 }
4523
4524 /**
4525  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4526  * @phba: Pointer to HBA context object.
4527  *
4528  * This function resets a SLI4 HBA. This function disables PCI layer parity
4529  * checking during resets the device. The caller is not required to hold
4530  * any locks.
4531  *
4532  * This function returns 0 always.
4533  **/
4534 int
4535 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4536 {
4537         struct lpfc_sli *psli = &phba->sli;
4538         uint16_t cfg_value;
4539         int rc = 0;
4540
4541         /* Reset HBA */
4542         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4543                         "0295 Reset HBA Data: x%x x%x x%x\n",
4544                         phba->pport->port_state, psli->sli_flag,
4545                         phba->hba_flag);
4546
4547         /* perform board reset */
4548         phba->fc_eventTag = 0;
4549         phba->link_events = 0;
4550         phba->pport->fc_myDID = 0;
4551         phba->pport->fc_prevDID = 0;
4552
4553         spin_lock_irq(&phba->hbalock);
4554         psli->sli_flag &= ~(LPFC_PROCESS_LA);
4555         phba->fcf.fcf_flag = 0;
4556         spin_unlock_irq(&phba->hbalock);
4557
4558         /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4559         if (phba->hba_flag & HBA_FW_DUMP_OP) {
4560                 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4561                 return rc;
4562         }
4563
4564         /* Now physically reset the device */
4565         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4566                         "0389 Performing PCI function reset!\n");
4567
4568         /* Turn off parity checking and serr during the physical reset */
4569         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4570                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4571                                 "3205 PCI read Config failed\n");
4572                 return -EIO;
4573         }
4574
4575         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4576                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4577
4578         /* Perform FCoE PCI function reset before freeing queue memory */
4579         rc = lpfc_pci_function_reset(phba);
4580
4581         /* Restore PCI cmd register */
4582         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4583
4584         return rc;
4585 }
4586
4587 /**
4588  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4589  * @phba: Pointer to HBA context object.
4590  *
4591  * This function is called in the SLI initialization code path to
4592  * restart the HBA. The caller is not required to hold any lock.
4593  * This function writes MBX_RESTART mailbox command to the SLIM and
4594  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4595  * function to free any pending commands. The function enables
4596  * POST only during the first initialization. The function returns zero.
4597  * The function does not guarantee completion of MBX_RESTART mailbox
4598  * command before the return of this function.
4599  **/
4600 static int
4601 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4602 {
4603         MAILBOX_t *mb;
4604         struct lpfc_sli *psli;
4605         volatile uint32_t word0;
4606         void __iomem *to_slim;
4607         uint32_t hba_aer_enabled;
4608
4609         spin_lock_irq(&phba->hbalock);
4610
4611         /* Take PCIe device Advanced Error Reporting (AER) state */
4612         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4613
4614         psli = &phba->sli;
4615
4616         /* Restart HBA */
4617         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4618                         "0337 Restart HBA Data: x%x x%x\n",
4619                         (phba->pport) ? phba->pport->port_state : 0,
4620                         psli->sli_flag);
4621
4622         word0 = 0;
4623         mb = (MAILBOX_t *) &word0;
4624         mb->mbxCommand = MBX_RESTART;
4625         mb->mbxHc = 1;
4626
4627         lpfc_reset_barrier(phba);
4628
4629         to_slim = phba->MBslimaddr;
4630         writel(*(uint32_t *) mb, to_slim);
4631         readl(to_slim); /* flush */
4632
4633         /* Only skip post after fc_ffinit is completed */
4634         if (phba->pport && phba->pport->port_state)
4635                 word0 = 1;      /* This is really setting up word1 */
4636         else
4637                 word0 = 0;      /* This is really setting up word1 */
4638         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4639         writel(*(uint32_t *) mb, to_slim);
4640         readl(to_slim); /* flush */
4641
4642         lpfc_sli_brdreset(phba);
4643         if (phba->pport)
4644                 phba->pport->stopped = 0;
4645         phba->link_state = LPFC_INIT_START;
4646         phba->hba_flag = 0;
4647         spin_unlock_irq(&phba->hbalock);
4648
4649         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4650         psli->stats_start = ktime_get_seconds();
4651
4652         /* Give the INITFF and Post time to settle. */
4653         mdelay(100);
4654
4655         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4656         if (hba_aer_enabled)
4657                 pci_disable_pcie_error_reporting(phba->pcidev);
4658
4659         lpfc_hba_down_post(phba);
4660
4661         return 0;
4662 }
4663
4664 /**
4665  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4666  * @phba: Pointer to HBA context object.
4667  *
4668  * This function is called in the SLI initialization code path to restart
4669  * a SLI4 HBA. The caller is not required to hold any lock.
4670  * At the end of the function, it calls lpfc_hba_down_post function to
4671  * free any pending commands.
4672  **/
4673 static int
4674 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4675 {
4676         struct lpfc_sli *psli = &phba->sli;
4677         uint32_t hba_aer_enabled;
4678         int rc;
4679
4680         /* Restart HBA */
4681         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4682                         "0296 Restart HBA Data: x%x x%x\n",
4683                         phba->pport->port_state, psli->sli_flag);
4684
4685         /* Take PCIe device Advanced Error Reporting (AER) state */
4686         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4687
4688         rc = lpfc_sli4_brdreset(phba);
4689         if (rc)
4690                 return rc;
4691
4692         spin_lock_irq(&phba->hbalock);
4693         phba->pport->stopped = 0;
4694         phba->link_state = LPFC_INIT_START;
4695         phba->hba_flag = 0;
4696         spin_unlock_irq(&phba->hbalock);
4697
4698         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4699         psli->stats_start = ktime_get_seconds();
4700
4701         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4702         if (hba_aer_enabled)
4703                 pci_disable_pcie_error_reporting(phba->pcidev);
4704
4705         lpfc_hba_down_post(phba);
4706         lpfc_sli4_queue_destroy(phba);
4707
4708         return rc;
4709 }
4710
4711 /**
4712  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4713  * @phba: Pointer to HBA context object.
4714  *
4715  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4716  * API jump table function pointer from the lpfc_hba struct.
4717 **/
4718 int
4719 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4720 {
4721         return phba->lpfc_sli_brdrestart(phba);
4722 }
4723
4724 /**
4725  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4726  * @phba: Pointer to HBA context object.
4727  *
4728  * This function is called after a HBA restart to wait for successful
4729  * restart of the HBA. Successful restart of the HBA is indicated by
4730  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4731  * iteration, the function will restart the HBA again. The function returns
4732  * zero if HBA successfully restarted else returns negative error code.
4733  **/
4734 int
4735 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4736 {
4737         uint32_t status, i = 0;
4738
4739         /* Read the HBA Host Status Register */
4740         if (lpfc_readl(phba->HSregaddr, &status))
4741                 return -EIO;
4742
4743         /* Check status register to see what current state is */
4744         i = 0;
4745         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4746
4747                 /* Check every 10ms for 10 retries, then every 100ms for 90
4748                  * retries, then every 1 sec for 50 retires for a total of
4749                  * ~60 seconds before reset the board again and check every
4750                  * 1 sec for 50 retries. The up to 60 seconds before the
4751                  * board ready is required by the Falcon FIPS zeroization
4752                  * complete, and any reset the board in between shall cause
4753                  * restart of zeroization, further delay the board ready.
4754                  */
4755                 if (i++ >= 200) {
4756                         /* Adapter failed to init, timeout, status reg
4757                            <status> */
4758                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4759                                         "0436 Adapter failed to init, "
4760                                         "timeout, status reg x%x, "
4761                                         "FW Data: A8 x%x AC x%x\n", status,
4762                                         readl(phba->MBslimaddr + 0xa8),
4763                                         readl(phba->MBslimaddr + 0xac));
4764                         phba->link_state = LPFC_HBA_ERROR;
4765                         return -ETIMEDOUT;
4766                 }
4767
4768                 /* Check to see if any errors occurred during init */
4769                 if (status & HS_FFERM) {
4770                         /* ERROR: During chipset initialization */
4771                         /* Adapter failed to init, chipset, status reg
4772                            <status> */
4773                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4774                                         "0437 Adapter failed to init, "
4775                                         "chipset, status reg x%x, "
4776                                         "FW Data: A8 x%x AC x%x\n", status,
4777                                         readl(phba->MBslimaddr + 0xa8),
4778                                         readl(phba->MBslimaddr + 0xac));
4779                         phba->link_state = LPFC_HBA_ERROR;
4780                         return -EIO;
4781                 }
4782
4783                 if (i <= 10)
4784                         msleep(10);
4785                 else if (i <= 100)
4786                         msleep(100);
4787                 else
4788                         msleep(1000);
4789
4790                 if (i == 150) {
4791                         /* Do post */
4792                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4793                         lpfc_sli_brdrestart(phba);
4794                 }
4795                 /* Read the HBA Host Status Register */
4796                 if (lpfc_readl(phba->HSregaddr, &status))
4797                         return -EIO;
4798         }
4799
4800         /* Check to see if any errors occurred during init */
4801         if (status & HS_FFERM) {
4802                 /* ERROR: During chipset initialization */
4803                 /* Adapter failed to init, chipset, status reg <status> */
4804                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4805                                 "0438 Adapter failed to init, chipset, "
4806                                 "status reg x%x, "
4807                                 "FW Data: A8 x%x AC x%x\n", status,
4808                                 readl(phba->MBslimaddr + 0xa8),
4809                                 readl(phba->MBslimaddr + 0xac));
4810                 phba->link_state = LPFC_HBA_ERROR;
4811                 return -EIO;
4812         }
4813
4814         /* Clear all interrupt enable conditions */
4815         writel(0, phba->HCregaddr);
4816         readl(phba->HCregaddr); /* flush */
4817
4818         /* setup host attn register */
4819         writel(0xffffffff, phba->HAregaddr);
4820         readl(phba->HAregaddr); /* flush */
4821         return 0;
4822 }
4823
4824 /**
4825  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4826  *
4827  * This function calculates and returns the number of HBQs required to be
4828  * configured.
4829  **/
4830 int
4831 lpfc_sli_hbq_count(void)
4832 {
4833         return ARRAY_SIZE(lpfc_hbq_defs);
4834 }
4835
4836 /**
4837  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4838  *
4839  * This function adds the number of hbq entries in every HBQ to get
4840  * the total number of hbq entries required for the HBA and returns
4841  * the total count.
4842  **/
4843 static int
4844 lpfc_sli_hbq_entry_count(void)
4845 {
4846         int  hbq_count = lpfc_sli_hbq_count();
4847         int  count = 0;
4848         int  i;
4849
4850         for (i = 0; i < hbq_count; ++i)
4851                 count += lpfc_hbq_defs[i]->entry_count;
4852         return count;
4853 }
4854
4855 /**
4856  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4857  *
4858  * This function calculates amount of memory required for all hbq entries
4859  * to be configured and returns the total memory required.
4860  **/
4861 int
4862 lpfc_sli_hbq_size(void)
4863 {
4864         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4865 }
4866
4867 /**
4868  * lpfc_sli_hbq_setup - configure and initialize HBQs
4869  * @phba: Pointer to HBA context object.
4870  *
4871  * This function is called during the SLI initialization to configure
4872  * all the HBQs and post buffers to the HBQ. The caller is not
4873  * required to hold any locks. This function will return zero if successful
4874  * else it will return negative error code.
4875  **/
4876 static int
4877 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4878 {
4879         int  hbq_count = lpfc_sli_hbq_count();
4880         LPFC_MBOXQ_t *pmb;
4881         MAILBOX_t *pmbox;
4882         uint32_t hbqno;
4883         uint32_t hbq_entry_index;
4884
4885                                 /* Get a Mailbox buffer to setup mailbox
4886                                  * commands for HBA initialization
4887                                  */
4888         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4889
4890         if (!pmb)
4891                 return -ENOMEM;
4892
4893         pmbox = &pmb->u.mb;
4894
4895         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4896         phba->link_state = LPFC_INIT_MBX_CMDS;
4897         phba->hbq_in_use = 1;
4898
4899         hbq_entry_index = 0;
4900         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4901                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4902                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4903                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4904                 phba->hbqs[hbqno].entry_count =
4905                         lpfc_hbq_defs[hbqno]->entry_count;
4906                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4907                         hbq_entry_index, pmb);
4908                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4909
4910                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4911                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4912                            mbxStatus <status>, ring <num> */
4913
4914                         lpfc_printf_log(phba, KERN_ERR,
4915                                         LOG_SLI | LOG_VPORT,
4916                                         "1805 Adapter failed to init. "
4917                                         "Data: x%x x%x x%x\n",
4918                                         pmbox->mbxCommand,
4919                                         pmbox->mbxStatus, hbqno);
4920
4921                         phba->link_state = LPFC_HBA_ERROR;
4922                         mempool_free(pmb, phba->mbox_mem_pool);
4923                         return -ENXIO;
4924                 }
4925         }
4926         phba->hbq_count = hbq_count;
4927
4928         mempool_free(pmb, phba->mbox_mem_pool);
4929
4930         /* Initially populate or replenish the HBQs */
4931         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4932                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4933         return 0;
4934 }
4935
4936 /**
4937  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4938  * @phba: Pointer to HBA context object.
4939  *
4940  * This function is called during the SLI initialization to configure
4941  * all the HBQs and post buffers to the HBQ. The caller is not
4942  * required to hold any locks. This function will return zero if successful
4943  * else it will return negative error code.
4944  **/
4945 static int
4946 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4947 {
4948         phba->hbq_in_use = 1;
4949         phba->hbqs[LPFC_ELS_HBQ].entry_count =
4950                 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4951         phba->hbq_count = 1;
4952         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4953         /* Initially populate or replenish the HBQs */
4954         return 0;
4955 }
4956
4957 /**
4958  * lpfc_sli_config_port - Issue config port mailbox command
4959  * @phba: Pointer to HBA context object.
4960  * @sli_mode: sli mode - 2/3
4961  *
4962  * This function is called by the sli initialization code path
4963  * to issue config_port mailbox command. This function restarts the
4964  * HBA firmware and issues a config_port mailbox command to configure
4965  * the SLI interface in the sli mode specified by sli_mode
4966  * variable. The caller is not required to hold any locks.
4967  * The function returns 0 if successful, else returns negative error
4968  * code.
4969  **/
4970 int
4971 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4972 {
4973         LPFC_MBOXQ_t *pmb;
4974         uint32_t resetcount = 0, rc = 0, done = 0;
4975
4976         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4977         if (!pmb) {
4978                 phba->link_state = LPFC_HBA_ERROR;
4979                 return -ENOMEM;
4980         }
4981
4982         phba->sli_rev = sli_mode;
4983         while (resetcount < 2 && !done) {
4984                 spin_lock_irq(&phba->hbalock);
4985                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4986                 spin_unlock_irq(&phba->hbalock);
4987                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4988                 lpfc_sli_brdrestart(phba);
4989                 rc = lpfc_sli_chipset_init(phba);
4990                 if (rc)
4991                         break;
4992
4993                 spin_lock_irq(&phba->hbalock);
4994                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4995                 spin_unlock_irq(&phba->hbalock);
4996                 resetcount++;
4997
4998                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4999                  * value of 0 means the call was successful.  Any other
5000                  * nonzero value is a failure, but if ERESTART is returned,
5001                  * the driver may reset the HBA and try again.
5002                  */
5003                 rc = lpfc_config_port_prep(phba);
5004                 if (rc == -ERESTART) {
5005                         phba->link_state = LPFC_LINK_UNKNOWN;
5006                         continue;
5007                 } else if (rc)
5008                         break;
5009
5010                 phba->link_state = LPFC_INIT_MBX_CMDS;
5011                 lpfc_config_port(phba, pmb);
5012                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5013                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5014                                         LPFC_SLI3_HBQ_ENABLED |
5015                                         LPFC_SLI3_CRP_ENABLED |
5016                                         LPFC_SLI3_DSS_ENABLED);
5017                 if (rc != MBX_SUCCESS) {
5018                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019                                 "0442 Adapter failed to init, mbxCmd x%x "
5020                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5021                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5022                         spin_lock_irq(&phba->hbalock);
5023                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5024                         spin_unlock_irq(&phba->hbalock);
5025                         rc = -ENXIO;
5026                 } else {
5027                         /* Allow asynchronous mailbox command to go through */
5028                         spin_lock_irq(&phba->hbalock);
5029                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5030                         spin_unlock_irq(&phba->hbalock);
5031                         done = 1;
5032
5033                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5034                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
5035                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5036                                         "3110 Port did not grant ASABT\n");
5037                 }
5038         }
5039         if (!done) {
5040                 rc = -EINVAL;
5041                 goto do_prep_failed;
5042         }
5043         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5044                 if (!pmb->u.mb.un.varCfgPort.cMA) {
5045                         rc = -ENXIO;
5046                         goto do_prep_failed;
5047                 }
5048                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5049                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5050                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5051                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5052                                 phba->max_vpi : phba->max_vports;
5053
5054                 } else
5055                         phba->max_vpi = 0;
5056                 phba->fips_level = 0;
5057                 phba->fips_spec_rev = 0;
5058                 if (pmb->u.mb.un.varCfgPort.gdss) {
5059                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5060                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5061                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5062                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5063                                         "2850 Security Crypto Active. FIPS x%d "
5064                                         "(Spec Rev: x%d)",
5065                                         phba->fips_level, phba->fips_spec_rev);
5066                 }
5067                 if (pmb->u.mb.un.varCfgPort.sec_err) {
5068                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5069                                         "2856 Config Port Security Crypto "
5070                                         "Error: x%x ",
5071                                         pmb->u.mb.un.varCfgPort.sec_err);
5072                 }
5073                 if (pmb->u.mb.un.varCfgPort.gerbm)
5074                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5075                 if (pmb->u.mb.un.varCfgPort.gcrp)
5076                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5077
5078                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5079                 phba->port_gp = phba->mbox->us.s3_pgp.port;
5080
5081                 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5082                         if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5083                                 phba->cfg_enable_bg = 0;
5084                                 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5085                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5086                                                 "0443 Adapter did not grant "
5087                                                 "BlockGuard\n");
5088                         }
5089                 }
5090         } else {
5091                 phba->hbq_get = NULL;
5092                 phba->port_gp = phba->mbox->us.s2.port;
5093                 phba->max_vpi = 0;
5094         }
5095 do_prep_failed:
5096         mempool_free(pmb, phba->mbox_mem_pool);
5097         return rc;
5098 }
5099
5100
5101 /**
5102  * lpfc_sli_hba_setup - SLI initialization function
5103  * @phba: Pointer to HBA context object.
5104  *
5105  * This function is the main SLI initialization function. This function
5106  * is called by the HBA initialization code, HBA reset code and HBA
5107  * error attention handler code. Caller is not required to hold any
5108  * locks. This function issues config_port mailbox command to configure
5109  * the SLI, setup iocb rings and HBQ rings. In the end the function
5110  * calls the config_port_post function to issue init_link mailbox
5111  * command and to start the discovery. The function will return zero
5112  * if successful, else it will return negative error code.
5113  **/
5114 int
5115 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5116 {
5117         uint32_t rc;
5118         int  mode = 3, i;
5119         int longs;
5120
5121         switch (phba->cfg_sli_mode) {
5122         case 2:
5123                 if (phba->cfg_enable_npiv) {
5124                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5125                                 "1824 NPIV enabled: Override sli_mode "
5126                                 "parameter (%d) to auto (0).\n",
5127                                 phba->cfg_sli_mode);
5128                         break;
5129                 }
5130                 mode = 2;
5131                 break;
5132         case 0:
5133         case 3:
5134                 break;
5135         default:
5136                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5137                                 "1819 Unrecognized sli_mode parameter: %d.\n",
5138                                 phba->cfg_sli_mode);
5139
5140                 break;
5141         }
5142         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5143
5144         rc = lpfc_sli_config_port(phba, mode);
5145
5146         if (rc && phba->cfg_sli_mode == 3)
5147                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5148                                 "1820 Unable to select SLI-3.  "
5149                                 "Not supported by adapter.\n");
5150         if (rc && mode != 2)
5151                 rc = lpfc_sli_config_port(phba, 2);
5152         else if (rc && mode == 2)
5153                 rc = lpfc_sli_config_port(phba, 3);
5154         if (rc)
5155                 goto lpfc_sli_hba_setup_error;
5156
5157         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5158         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5159                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5160                 if (!rc) {
5161                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5162                                         "2709 This device supports "
5163                                         "Advanced Error Reporting (AER)\n");
5164                         spin_lock_irq(&phba->hbalock);
5165                         phba->hba_flag |= HBA_AER_ENABLED;
5166                         spin_unlock_irq(&phba->hbalock);
5167                 } else {
5168                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5169                                         "2708 This device does not support "
5170                                         "Advanced Error Reporting (AER): %d\n",
5171                                         rc);
5172                         phba->cfg_aer_support = 0;
5173                 }
5174         }
5175
5176         if (phba->sli_rev == 3) {
5177                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5178                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5179         } else {
5180                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5181                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5182                 phba->sli3_options = 0;
5183         }
5184
5185         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5186                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5187                         phba->sli_rev, phba->max_vpi);
5188         rc = lpfc_sli_ring_map(phba);
5189
5190         if (rc)
5191                 goto lpfc_sli_hba_setup_error;
5192
5193         /* Initialize VPIs. */
5194         if (phba->sli_rev == LPFC_SLI_REV3) {
5195                 /*
5196                  * The VPI bitmask and physical ID array are allocated
5197                  * and initialized once only - at driver load.  A port
5198                  * reset doesn't need to reinitialize this memory.
5199                  */
5200                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5201                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5202                         phba->vpi_bmask = kcalloc(longs,
5203                                                   sizeof(unsigned long),
5204                                                   GFP_KERNEL);
5205                         if (!phba->vpi_bmask) {
5206                                 rc = -ENOMEM;
5207                                 goto lpfc_sli_hba_setup_error;
5208                         }
5209
5210                         phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5211                                                 sizeof(uint16_t),
5212                                                 GFP_KERNEL);
5213                         if (!phba->vpi_ids) {
5214                                 kfree(phba->vpi_bmask);
5215                                 rc = -ENOMEM;
5216                                 goto lpfc_sli_hba_setup_error;
5217                         }
5218                         for (i = 0; i < phba->max_vpi; i++)
5219                                 phba->vpi_ids[i] = i;
5220                 }
5221         }
5222
5223         /* Init HBQs */
5224         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5225                 rc = lpfc_sli_hbq_setup(phba);
5226                 if (rc)
5227                         goto lpfc_sli_hba_setup_error;
5228         }
5229         spin_lock_irq(&phba->hbalock);
5230         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5231         spin_unlock_irq(&phba->hbalock);
5232
5233         rc = lpfc_config_port_post(phba);
5234         if (rc)
5235                 goto lpfc_sli_hba_setup_error;
5236
5237         return rc;
5238
5239 lpfc_sli_hba_setup_error:
5240         phba->link_state = LPFC_HBA_ERROR;
5241         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5242                         "0445 Firmware initialization failed\n");
5243         return rc;
5244 }
5245
5246 /**
5247  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5248  * @phba: Pointer to HBA context object.
5249  * @mboxq: mailbox pointer.
5250  * This function issue a dump mailbox command to read config region
5251  * 23 and parse the records in the region and populate driver
5252  * data structure.
5253  **/
5254 static int
5255 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5256 {
5257         LPFC_MBOXQ_t *mboxq;
5258         struct lpfc_dmabuf *mp;
5259         struct lpfc_mqe *mqe;
5260         uint32_t data_length;
5261         int rc;
5262
5263         /* Program the default value of vlan_id and fc_map */
5264         phba->valid_vlan = 0;
5265         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5266         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5267         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5268
5269         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5270         if (!mboxq)
5271                 return -ENOMEM;
5272
5273         mqe = &mboxq->u.mqe;
5274         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5275                 rc = -ENOMEM;
5276                 goto out_free_mboxq;
5277         }
5278
5279         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5280         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5281
5282         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5283                         "(%d):2571 Mailbox cmd x%x Status x%x "
5284                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5285                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5286                         "CQ: x%x x%x x%x x%x\n",
5287                         mboxq->vport ? mboxq->vport->vpi : 0,
5288                         bf_get(lpfc_mqe_command, mqe),
5289                         bf_get(lpfc_mqe_status, mqe),
5290                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5291                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5292                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5293                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5294                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5295                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5296                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5297                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5298                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5299                         mboxq->mcqe.word0,
5300                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5301                         mboxq->mcqe.trailer);
5302
5303         if (rc) {
5304                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5305                 kfree(mp);
5306                 rc = -EIO;
5307                 goto out_free_mboxq;
5308         }
5309         data_length = mqe->un.mb_words[5];
5310         if (data_length > DMP_RGN23_SIZE) {
5311                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5312                 kfree(mp);
5313                 rc = -EIO;
5314                 goto out_free_mboxq;
5315         }
5316
5317         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5318         lpfc_mbuf_free(phba, mp->virt, mp->phys);
5319         kfree(mp);
5320         rc = 0;
5321
5322 out_free_mboxq:
5323         mempool_free(mboxq, phba->mbox_mem_pool);
5324         return rc;
5325 }
5326
5327 /**
5328  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5329  * @phba: pointer to lpfc hba data structure.
5330  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5331  * @vpd: pointer to the memory to hold resulting port vpd data.
5332  * @vpd_size: On input, the number of bytes allocated to @vpd.
5333  *            On output, the number of data bytes in @vpd.
5334  *
5335  * This routine executes a READ_REV SLI4 mailbox command.  In
5336  * addition, this routine gets the port vpd data.
5337  *
5338  * Return codes
5339  *      0 - successful
5340  *      -ENOMEM - could not allocated memory.
5341  **/
5342 static int
5343 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5344                     uint8_t *vpd, uint32_t *vpd_size)
5345 {
5346         int rc = 0;
5347         uint32_t dma_size;
5348         struct lpfc_dmabuf *dmabuf;
5349         struct lpfc_mqe *mqe;
5350
5351         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5352         if (!dmabuf)
5353                 return -ENOMEM;
5354
5355         /*
5356          * Get a DMA buffer for the vpd data resulting from the READ_REV
5357          * mailbox command.
5358          */
5359         dma_size = *vpd_size;
5360         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5361                                           &dmabuf->phys, GFP_KERNEL);
5362         if (!dmabuf->virt) {
5363                 kfree(dmabuf);
5364                 return -ENOMEM;
5365         }
5366
5367         /*
5368          * The SLI4 implementation of READ_REV conflicts at word1,
5369          * bits 31:16 and SLI4 adds vpd functionality not present
5370          * in SLI3.  This code corrects the conflicts.
5371          */
5372         lpfc_read_rev(phba, mboxq);
5373         mqe = &mboxq->u.mqe;
5374         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5375         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5376         mqe->un.read_rev.word1 &= 0x0000FFFF;
5377         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5378         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5379
5380         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5381         if (rc) {
5382                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5383                                   dmabuf->virt, dmabuf->phys);
5384                 kfree(dmabuf);
5385                 return -EIO;
5386         }
5387
5388         /*
5389          * The available vpd length cannot be bigger than the
5390          * DMA buffer passed to the port.  Catch the less than
5391          * case and update the caller's size.
5392          */
5393         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5394                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5395
5396         memcpy(vpd, dmabuf->virt, *vpd_size);
5397
5398         dma_free_coherent(&phba->pcidev->dev, dma_size,
5399                           dmabuf->virt, dmabuf->phys);
5400         kfree(dmabuf);
5401         return 0;
5402 }
5403
5404 /**
5405  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5406  * @phba: pointer to lpfc hba data structure.
5407  *
5408  * This routine retrieves SLI4 device physical port name this PCI function
5409  * is attached to.
5410  *
5411  * Return codes
5412  *      0 - successful
5413  *      otherwise - failed to retrieve controller attributes
5414  **/
5415 static int
5416 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5417 {
5418         LPFC_MBOXQ_t *mboxq;
5419         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5420         struct lpfc_controller_attribute *cntl_attr;
5421         void *virtaddr = NULL;
5422         uint32_t alloclen, reqlen;
5423         uint32_t shdr_status, shdr_add_status;
5424         union lpfc_sli4_cfg_shdr *shdr;
5425         int rc;
5426
5427         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5428         if (!mboxq)
5429                 return -ENOMEM;
5430
5431         /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5432         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5433         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5434                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5435                         LPFC_SLI4_MBX_NEMBED);
5436
5437         if (alloclen < reqlen) {
5438                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5439                                 "3084 Allocated DMA memory size (%d) is "
5440                                 "less than the requested DMA memory size "
5441                                 "(%d)\n", alloclen, reqlen);
5442                 rc = -ENOMEM;
5443                 goto out_free_mboxq;
5444         }
5445         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5446         virtaddr = mboxq->sge_array->addr[0];
5447         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5448         shdr = &mbx_cntl_attr->cfg_shdr;
5449         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5450         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5451         if (shdr_status || shdr_add_status || rc) {
5452                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5453                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5454                                 "rc:x%x, status:x%x, add_status:x%x\n",
5455                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5456                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5457                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5458                                 rc, shdr_status, shdr_add_status);
5459                 rc = -ENXIO;
5460                 goto out_free_mboxq;
5461         }
5462
5463         cntl_attr = &mbx_cntl_attr->cntl_attr;
5464         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5465         phba->sli4_hba.lnk_info.lnk_tp =
5466                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5467         phba->sli4_hba.lnk_info.lnk_no =
5468                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5469
5470         memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5471         strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5472                 sizeof(phba->BIOSVersion));
5473
5474         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5475                         "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5476                         phba->sli4_hba.lnk_info.lnk_tp,
5477                         phba->sli4_hba.lnk_info.lnk_no,
5478                         phba->BIOSVersion);
5479 out_free_mboxq:
5480         if (rc != MBX_TIMEOUT) {
5481                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5482                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5483                 else
5484                         mempool_free(mboxq, phba->mbox_mem_pool);
5485         }
5486         return rc;
5487 }
5488
5489 /**
5490  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5491  * @phba: pointer to lpfc hba data structure.
5492  *
5493  * This routine retrieves SLI4 device physical port name this PCI function
5494  * is attached to.
5495  *
5496  * Return codes
5497  *      0 - successful
5498  *      otherwise - failed to retrieve physical port name
5499  **/
5500 static int
5501 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5502 {
5503         LPFC_MBOXQ_t *mboxq;
5504         struct lpfc_mbx_get_port_name *get_port_name;
5505         uint32_t shdr_status, shdr_add_status;
5506         union lpfc_sli4_cfg_shdr *shdr;
5507         char cport_name = 0;
5508         int rc;
5509
5510         /* We assume nothing at this point */
5511         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5512         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5513
5514         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5515         if (!mboxq)
5516                 return -ENOMEM;
5517         /* obtain link type and link number via READ_CONFIG */
5518         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5519         lpfc_sli4_read_config(phba);
5520         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5521                 goto retrieve_ppname;
5522
5523         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5524         rc = lpfc_sli4_get_ctl_attr(phba);
5525         if (rc)
5526                 goto out_free_mboxq;
5527
5528 retrieve_ppname:
5529         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5530                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5531                 sizeof(struct lpfc_mbx_get_port_name) -
5532                 sizeof(struct lpfc_sli4_cfg_mhdr),
5533                 LPFC_SLI4_MBX_EMBED);
5534         get_port_name = &mboxq->u.mqe.un.get_port_name;
5535         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5536         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5537         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5538                 phba->sli4_hba.lnk_info.lnk_tp);
5539         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5540         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5541         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5542         if (shdr_status || shdr_add_status || rc) {
5543                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5544                                 "3087 Mailbox x%x (x%x/x%x) failed: "
5545                                 "rc:x%x, status:x%x, add_status:x%x\n",
5546                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5547                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5548                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5549                                 rc, shdr_status, shdr_add_status);
5550                 rc = -ENXIO;
5551                 goto out_free_mboxq;
5552         }
5553         switch (phba->sli4_hba.lnk_info.lnk_no) {
5554         case LPFC_LINK_NUMBER_0:
5555                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5556                                 &get_port_name->u.response);
5557                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5558                 break;
5559         case LPFC_LINK_NUMBER_1:
5560                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5561                                 &get_port_name->u.response);
5562                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5563                 break;
5564         case LPFC_LINK_NUMBER_2:
5565                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5566                                 &get_port_name->u.response);
5567                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5568                 break;
5569         case LPFC_LINK_NUMBER_3:
5570                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5571                                 &get_port_name->u.response);
5572                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5573                 break;
5574         default:
5575                 break;
5576         }
5577
5578         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5579                 phba->Port[0] = cport_name;
5580                 phba->Port[1] = '\0';
5581                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5582                                 "3091 SLI get port name: %s\n", phba->Port);
5583         }
5584
5585 out_free_mboxq:
5586         if (rc != MBX_TIMEOUT) {
5587                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5588                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5589                 else
5590                         mempool_free(mboxq, phba->mbox_mem_pool);
5591         }
5592         return rc;
5593 }
5594
5595 /**
5596  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5597  * @phba: pointer to lpfc hba data structure.
5598  *
5599  * This routine is called to explicitly arm the SLI4 device's completion and
5600  * event queues
5601  **/
5602 static void
5603 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5604 {
5605         int qidx;
5606         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5607         struct lpfc_sli4_hdw_queue *qp;
5608
5609         sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5610         sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5611         if (sli4_hba->nvmels_cq)
5612                 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5613                                            LPFC_QUEUE_REARM);
5614
5615         qp = sli4_hba->hdwq;
5616         if (sli4_hba->hdwq) {
5617                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5618                         sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0,
5619                                                    LPFC_QUEUE_REARM);
5620                         sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0,
5621                                                    LPFC_QUEUE_REARM);
5622                 }
5623
5624                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++)
5625                         sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq,
5626                                                 0, LPFC_QUEUE_REARM);
5627         }
5628
5629         if (phba->nvmet_support) {
5630                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5631                         sli4_hba->sli4_write_cq_db(phba,
5632                                 sli4_hba->nvmet_cqset[qidx], 0,
5633                                 LPFC_QUEUE_REARM);
5634                 }
5635         }
5636 }
5637
5638 /**
5639  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5640  * @phba: Pointer to HBA context object.
5641  * @type: The resource extent type.
5642  * @extnt_count: buffer to hold port available extent count.
5643  * @extnt_size: buffer to hold element count per extent.
5644  *
5645  * This function calls the port and retrievs the number of available
5646  * extents and their size for a particular extent type.
5647  *
5648  * Returns: 0 if successful.  Nonzero otherwise.
5649  **/
5650 int
5651 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5652                                uint16_t *extnt_count, uint16_t *extnt_size)
5653 {
5654         int rc = 0;
5655         uint32_t length;
5656         uint32_t mbox_tmo;
5657         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5658         LPFC_MBOXQ_t *mbox;
5659
5660         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5661         if (!mbox)
5662                 return -ENOMEM;
5663
5664         /* Find out how many extents are available for this resource type */
5665         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5666                   sizeof(struct lpfc_sli4_cfg_mhdr));
5667         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5668                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5669                          length, LPFC_SLI4_MBX_EMBED);
5670
5671         /* Send an extents count of 0 - the GET doesn't use it. */
5672         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5673                                         LPFC_SLI4_MBX_EMBED);
5674         if (unlikely(rc)) {
5675                 rc = -EIO;
5676                 goto err_exit;
5677         }
5678
5679         if (!phba->sli4_hba.intr_enable)
5680                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5681         else {
5682                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5683                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5684         }
5685         if (unlikely(rc)) {
5686                 rc = -EIO;
5687                 goto err_exit;
5688         }
5689
5690         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5691         if (bf_get(lpfc_mbox_hdr_status,
5692                    &rsrc_info->header.cfg_shdr.response)) {
5693                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5694                                 "2930 Failed to get resource extents "
5695                                 "Status 0x%x Add'l Status 0x%x\n",
5696                                 bf_get(lpfc_mbox_hdr_status,
5697                                        &rsrc_info->header.cfg_shdr.response),
5698                                 bf_get(lpfc_mbox_hdr_add_status,
5699                                        &rsrc_info->header.cfg_shdr.response));
5700                 rc = -EIO;
5701                 goto err_exit;
5702         }
5703
5704         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5705                               &rsrc_info->u.rsp);
5706         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5707                              &rsrc_info->u.rsp);
5708
5709         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5710                         "3162 Retrieved extents type-%d from port: count:%d, "
5711                         "size:%d\n", type, *extnt_count, *extnt_size);
5712
5713 err_exit:
5714         mempool_free(mbox, phba->mbox_mem_pool);
5715         return rc;
5716 }
5717
5718 /**
5719  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5720  * @phba: Pointer to HBA context object.
5721  * @type: The extent type to check.
5722  *
5723  * This function reads the current available extents from the port and checks
5724  * if the extent count or extent size has changed since the last access.
5725  * Callers use this routine post port reset to understand if there is a
5726  * extent reprovisioning requirement.
5727  *
5728  * Returns:
5729  *   -Error: error indicates problem.
5730  *   1: Extent count or size has changed.
5731  *   0: No changes.
5732  **/
5733 static int
5734 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5735 {
5736         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5737         uint16_t size_diff, rsrc_ext_size;
5738         int rc = 0;
5739         struct lpfc_rsrc_blks *rsrc_entry;
5740         struct list_head *rsrc_blk_list = NULL;
5741
5742         size_diff = 0;
5743         curr_ext_cnt = 0;
5744         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5745                                             &rsrc_ext_cnt,
5746                                             &rsrc_ext_size);
5747         if (unlikely(rc))
5748                 return -EIO;
5749
5750         switch (type) {
5751         case LPFC_RSC_TYPE_FCOE_RPI:
5752                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5753                 break;
5754         case LPFC_RSC_TYPE_FCOE_VPI:
5755                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5756                 break;
5757         case LPFC_RSC_TYPE_FCOE_XRI:
5758                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5759                 break;
5760         case LPFC_RSC_TYPE_FCOE_VFI:
5761                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5762                 break;
5763         default:
5764                 break;
5765         }
5766
5767         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5768                 curr_ext_cnt++;
5769                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5770                         size_diff++;
5771         }
5772
5773         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5774                 rc = 1;
5775
5776         return rc;
5777 }
5778
5779 /**
5780  * lpfc_sli4_cfg_post_extnts -
5781  * @phba: Pointer to HBA context object.
5782  * @extnt_cnt - number of available extents.
5783  * @type - the extent type (rpi, xri, vfi, vpi).
5784  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5785  * @mbox - pointer to the caller's allocated mailbox structure.
5786  *
5787  * This function executes the extents allocation request.  It also
5788  * takes care of the amount of memory needed to allocate or get the
5789  * allocated extents. It is the caller's responsibility to evaluate
5790  * the response.
5791  *
5792  * Returns:
5793  *   -Error:  Error value describes the condition found.
5794  *   0: if successful
5795  **/
5796 static int
5797 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5798                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5799 {
5800         int rc = 0;
5801         uint32_t req_len;
5802         uint32_t emb_len;
5803         uint32_t alloc_len, mbox_tmo;
5804
5805         /* Calculate the total requested length of the dma memory */
5806         req_len = extnt_cnt * sizeof(uint16_t);
5807
5808         /*
5809          * Calculate the size of an embedded mailbox.  The uint32_t
5810          * accounts for extents-specific word.
5811          */
5812         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5813                 sizeof(uint32_t);
5814
5815         /*
5816          * Presume the allocation and response will fit into an embedded
5817          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5818          */
5819         *emb = LPFC_SLI4_MBX_EMBED;
5820         if (req_len > emb_len) {
5821                 req_len = extnt_cnt * sizeof(uint16_t) +
5822                         sizeof(union lpfc_sli4_cfg_shdr) +
5823                         sizeof(uint32_t);
5824                 *emb = LPFC_SLI4_MBX_NEMBED;
5825         }
5826
5827         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5828                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5829                                      req_len, *emb);
5830         if (alloc_len < req_len) {
5831                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5832                         "2982 Allocated DMA memory size (x%x) is "
5833                         "less than the requested DMA memory "
5834                         "size (x%x)\n", alloc_len, req_len);
5835                 return -ENOMEM;
5836         }
5837         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5838         if (unlikely(rc))
5839                 return -EIO;
5840
5841         if (!phba->sli4_hba.intr_enable)
5842                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5843         else {
5844                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5845                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5846         }
5847
5848         if (unlikely(rc))
5849                 rc = -EIO;
5850         return rc;
5851 }
5852
5853 /**
5854  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5855  * @phba: Pointer to HBA context object.
5856  * @type:  The resource extent type to allocate.
5857  *
5858  * This function allocates the number of elements for the specified
5859  * resource type.
5860  **/
5861 static int
5862 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5863 {
5864         bool emb = false;
5865         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5866         uint16_t rsrc_id, rsrc_start, j, k;
5867         uint16_t *ids;
5868         int i, rc;
5869         unsigned long longs;
5870         unsigned long *bmask;
5871         struct lpfc_rsrc_blks *rsrc_blks;
5872         LPFC_MBOXQ_t *mbox;
5873         uint32_t length;
5874         struct lpfc_id_range *id_array = NULL;
5875         void *virtaddr = NULL;
5876         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5877         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5878         struct list_head *ext_blk_list;
5879
5880         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5881                                             &rsrc_cnt,
5882                                             &rsrc_size);
5883         if (unlikely(rc))
5884                 return -EIO;
5885
5886         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5887                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5888                         "3009 No available Resource Extents "
5889                         "for resource type 0x%x: Count: 0x%x, "
5890                         "Size 0x%x\n", type, rsrc_cnt,
5891                         rsrc_size);
5892                 return -ENOMEM;
5893         }
5894
5895         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5896                         "2903 Post resource extents type-0x%x: "
5897                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5898
5899         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5900         if (!mbox)
5901                 return -ENOMEM;
5902
5903         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5904         if (unlikely(rc)) {
5905                 rc = -EIO;
5906                 goto err_exit;
5907         }
5908
5909         /*
5910          * Figure out where the response is located.  Then get local pointers
5911          * to the response data.  The port does not guarantee to respond to
5912          * all extents counts request so update the local variable with the
5913          * allocated count from the port.
5914          */
5915         if (emb == LPFC_SLI4_MBX_EMBED) {
5916                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5917                 id_array = &rsrc_ext->u.rsp.id[0];
5918                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5919         } else {
5920                 virtaddr = mbox->sge_array->addr[0];
5921                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5922                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5923                 id_array = &n_rsrc->id;
5924         }
5925
5926         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5927         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5928
5929         /*
5930          * Based on the resource size and count, correct the base and max
5931          * resource values.
5932          */
5933         length = sizeof(struct lpfc_rsrc_blks);
5934         switch (type) {
5935         case LPFC_RSC_TYPE_FCOE_RPI:
5936                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5937                                                    sizeof(unsigned long),
5938                                                    GFP_KERNEL);
5939                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5940                         rc = -ENOMEM;
5941                         goto err_exit;
5942                 }
5943                 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5944                                                  sizeof(uint16_t),
5945                                                  GFP_KERNEL);
5946                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5947                         kfree(phba->sli4_hba.rpi_bmask);
5948                         rc = -ENOMEM;
5949                         goto err_exit;
5950                 }
5951
5952                 /*
5953                  * The next_rpi was initialized with the maximum available
5954                  * count but the port may allocate a smaller number.  Catch
5955                  * that case and update the next_rpi.
5956                  */
5957                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5958
5959                 /* Initialize local ptrs for common extent processing later. */
5960                 bmask = phba->sli4_hba.rpi_bmask;
5961                 ids = phba->sli4_hba.rpi_ids;
5962                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5963                 break;
5964         case LPFC_RSC_TYPE_FCOE_VPI:
5965                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5966                                           GFP_KERNEL);
5967                 if (unlikely(!phba->vpi_bmask)) {
5968                         rc = -ENOMEM;
5969                         goto err_exit;
5970                 }
5971                 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5972                                          GFP_KERNEL);
5973                 if (unlikely(!phba->vpi_ids)) {
5974                         kfree(phba->vpi_bmask);
5975                         rc = -ENOMEM;
5976                         goto err_exit;
5977                 }
5978
5979                 /* Initialize local ptrs for common extent processing later. */
5980                 bmask = phba->vpi_bmask;
5981                 ids = phba->vpi_ids;
5982                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5983                 break;
5984         case LPFC_RSC_TYPE_FCOE_XRI:
5985                 phba->sli4_hba.xri_bmask = kcalloc(longs,
5986                                                    sizeof(unsigned long),
5987                                                    GFP_KERNEL);
5988                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5989                         rc = -ENOMEM;
5990                         goto err_exit;
5991                 }
5992                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5993                 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5994                                                  sizeof(uint16_t),
5995                                                  GFP_KERNEL);
5996                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5997                         kfree(phba->sli4_hba.xri_bmask);
5998                         rc = -ENOMEM;
5999                         goto err_exit;
6000                 }
6001
6002                 /* Initialize local ptrs for common extent processing later. */
6003                 bmask = phba->sli4_hba.xri_bmask;
6004                 ids = phba->sli4_hba.xri_ids;
6005                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6006                 break;
6007         case LPFC_RSC_TYPE_FCOE_VFI:
6008                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6009                                                    sizeof(unsigned long),
6010                                                    GFP_KERNEL);
6011                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6012                         rc = -ENOMEM;
6013                         goto err_exit;
6014                 }
6015                 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6016                                                  sizeof(uint16_t),
6017                                                  GFP_KERNEL);
6018                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6019                         kfree(phba->sli4_hba.vfi_bmask);
6020                         rc = -ENOMEM;
6021                         goto err_exit;
6022                 }
6023
6024                 /* Initialize local ptrs for common extent processing later. */
6025                 bmask = phba->sli4_hba.vfi_bmask;
6026                 ids = phba->sli4_hba.vfi_ids;
6027                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6028                 break;
6029         default:
6030                 /* Unsupported Opcode.  Fail call. */
6031                 id_array = NULL;
6032                 bmask = NULL;
6033                 ids = NULL;
6034                 ext_blk_list = NULL;
6035                 goto err_exit;
6036         }
6037
6038         /*
6039          * Complete initializing the extent configuration with the
6040          * allocated ids assigned to this function.  The bitmask serves
6041          * as an index into the array and manages the available ids.  The
6042          * array just stores the ids communicated to the port via the wqes.
6043          */
6044         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6045                 if ((i % 2) == 0)
6046                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6047                                          &id_array[k]);
6048                 else
6049                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6050                                          &id_array[k]);
6051
6052                 rsrc_blks = kzalloc(length, GFP_KERNEL);
6053                 if (unlikely(!rsrc_blks)) {
6054                         rc = -ENOMEM;
6055                         kfree(bmask);
6056                         kfree(ids);
6057                         goto err_exit;
6058                 }
6059                 rsrc_blks->rsrc_start = rsrc_id;
6060                 rsrc_blks->rsrc_size = rsrc_size;
6061                 list_add_tail(&rsrc_blks->list, ext_blk_list);
6062                 rsrc_start = rsrc_id;
6063                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6064                         phba->sli4_hba.io_xri_start = rsrc_start +
6065                                 lpfc_sli4_get_iocb_cnt(phba);
6066                 }
6067
6068                 while (rsrc_id < (rsrc_start + rsrc_size)) {
6069                         ids[j] = rsrc_id;
6070                         rsrc_id++;
6071                         j++;
6072                 }
6073                 /* Entire word processed.  Get next word.*/
6074                 if ((i % 2) == 1)
6075                         k++;
6076         }
6077  err_exit:
6078         lpfc_sli4_mbox_cmd_free(phba, mbox);
6079         return rc;
6080 }
6081
6082
6083
6084 /**
6085  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6086  * @phba: Pointer to HBA context object.
6087  * @type: the extent's type.
6088  *
6089  * This function deallocates all extents of a particular resource type.
6090  * SLI4 does not allow for deallocating a particular extent range.  It
6091  * is the caller's responsibility to release all kernel memory resources.
6092  **/
6093 static int
6094 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6095 {
6096         int rc;
6097         uint32_t length, mbox_tmo = 0;
6098         LPFC_MBOXQ_t *mbox;
6099         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6100         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6101
6102         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6103         if (!mbox)
6104                 return -ENOMEM;
6105
6106         /*
6107          * This function sends an embedded mailbox because it only sends the
6108          * the resource type.  All extents of this type are released by the
6109          * port.
6110          */
6111         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6112                   sizeof(struct lpfc_sli4_cfg_mhdr));
6113         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6114                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6115                          length, LPFC_SLI4_MBX_EMBED);
6116
6117         /* Send an extents count of 0 - the dealloc doesn't use it. */
6118         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6119                                         LPFC_SLI4_MBX_EMBED);
6120         if (unlikely(rc)) {
6121                 rc = -EIO;
6122                 goto out_free_mbox;
6123         }
6124         if (!phba->sli4_hba.intr_enable)
6125                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6126         else {
6127                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6128                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6129         }
6130         if (unlikely(rc)) {
6131                 rc = -EIO;
6132                 goto out_free_mbox;
6133         }
6134
6135         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6136         if (bf_get(lpfc_mbox_hdr_status,
6137                    &dealloc_rsrc->header.cfg_shdr.response)) {
6138                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6139                                 "2919 Failed to release resource extents "
6140                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6141                                 "Resource memory not released.\n",
6142                                 type,
6143                                 bf_get(lpfc_mbox_hdr_status,
6144                                     &dealloc_rsrc->header.cfg_shdr.response),
6145                                 bf_get(lpfc_mbox_hdr_add_status,
6146                                     &dealloc_rsrc->header.cfg_shdr.response));
6147                 rc = -EIO;
6148                 goto out_free_mbox;
6149         }
6150
6151         /* Release kernel memory resources for the specific type. */
6152         switch (type) {
6153         case LPFC_RSC_TYPE_FCOE_VPI:
6154                 kfree(phba->vpi_bmask);
6155                 kfree(phba->vpi_ids);
6156                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6157                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6158                                     &phba->lpfc_vpi_blk_list, list) {
6159                         list_del_init(&rsrc_blk->list);
6160                         kfree(rsrc_blk);
6161                 }
6162                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6163                 break;
6164         case LPFC_RSC_TYPE_FCOE_XRI:
6165                 kfree(phba->sli4_hba.xri_bmask);
6166                 kfree(phba->sli4_hba.xri_ids);
6167                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6168                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6169                         list_del_init(&rsrc_blk->list);
6170                         kfree(rsrc_blk);
6171                 }
6172                 break;
6173         case LPFC_RSC_TYPE_FCOE_VFI:
6174                 kfree(phba->sli4_hba.vfi_bmask);
6175                 kfree(phba->sli4_hba.vfi_ids);
6176                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6177                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6178                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6179                         list_del_init(&rsrc_blk->list);
6180                         kfree(rsrc_blk);
6181                 }
6182                 break;
6183         case LPFC_RSC_TYPE_FCOE_RPI:
6184                 /* RPI bitmask and physical id array are cleaned up earlier. */
6185                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6186                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6187                         list_del_init(&rsrc_blk->list);
6188                         kfree(rsrc_blk);
6189                 }
6190                 break;
6191         default:
6192                 break;
6193         }
6194
6195         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6196
6197  out_free_mbox:
6198         mempool_free(mbox, phba->mbox_mem_pool);
6199         return rc;
6200 }
6201
6202 static void
6203 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6204                   uint32_t feature)
6205 {
6206         uint32_t len;
6207
6208         len = sizeof(struct lpfc_mbx_set_feature) -
6209                 sizeof(struct lpfc_sli4_cfg_mhdr);
6210         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6211                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6212                          LPFC_SLI4_MBX_EMBED);
6213
6214         switch (feature) {
6215         case LPFC_SET_UE_RECOVERY:
6216                 bf_set(lpfc_mbx_set_feature_UER,
6217                        &mbox->u.mqe.un.set_feature, 1);
6218                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6219                 mbox->u.mqe.un.set_feature.param_len = 8;
6220                 break;
6221         case LPFC_SET_MDS_DIAGS:
6222                 bf_set(lpfc_mbx_set_feature_mds,
6223                        &mbox->u.mqe.un.set_feature, 1);
6224                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6225                        &mbox->u.mqe.un.set_feature, 1);
6226                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6227                 mbox->u.mqe.un.set_feature.param_len = 8;
6228                 break;
6229         }
6230
6231         return;
6232 }
6233
6234 /**
6235  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6236  * @phba: Pointer to HBA context object.
6237  *
6238  * Disable FW logging into host memory on the adapter. To
6239  * be done before reading logs from the host memory.
6240  **/
6241 void
6242 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6243 {
6244         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6245
6246         ras_fwlog->ras_active = false;
6247
6248         /* Disable FW logging to host memory */
6249         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6250                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6251 }
6252
6253 /**
6254  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6255  * @phba: Pointer to HBA context object.
6256  *
6257  * This function is called to free memory allocated for RAS FW logging
6258  * support in the driver.
6259  **/
6260 void
6261 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6262 {
6263         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6264         struct lpfc_dmabuf *dmabuf, *next;
6265
6266         if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6267                 list_for_each_entry_safe(dmabuf, next,
6268                                     &ras_fwlog->fwlog_buff_list,
6269                                     list) {
6270                         list_del(&dmabuf->list);
6271                         dma_free_coherent(&phba->pcidev->dev,
6272                                           LPFC_RAS_MAX_ENTRY_SIZE,
6273                                           dmabuf->virt, dmabuf->phys);
6274                         kfree(dmabuf);
6275                 }
6276         }
6277
6278         if (ras_fwlog->lwpd.virt) {
6279                 dma_free_coherent(&phba->pcidev->dev,
6280                                   sizeof(uint32_t) * 2,
6281                                   ras_fwlog->lwpd.virt,
6282                                   ras_fwlog->lwpd.phys);
6283                 ras_fwlog->lwpd.virt = NULL;
6284         }
6285
6286         ras_fwlog->ras_active = false;
6287 }
6288
6289 /**
6290  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6291  * @phba: Pointer to HBA context object.
6292  * @fwlog_buff_count: Count of buffers to be created.
6293  *
6294  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6295  * to update FW log is posted to the adapter.
6296  * Buffer count is calculated based on module param ras_fwlog_buffsize
6297  * Size of each buffer posted to FW is 64K.
6298  **/
6299
6300 static int
6301 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6302                         uint32_t fwlog_buff_count)
6303 {
6304         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6305         struct lpfc_dmabuf *dmabuf;
6306         int rc = 0, i = 0;
6307
6308         /* Initialize List */
6309         INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6310
6311         /* Allocate memory for the LWPD */
6312         ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6313                                             sizeof(uint32_t) * 2,
6314                                             &ras_fwlog->lwpd.phys,
6315                                             GFP_KERNEL);
6316         if (!ras_fwlog->lwpd.virt) {
6317                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6318                                 "6185 LWPD Memory Alloc Failed\n");
6319
6320                 return -ENOMEM;
6321         }
6322
6323         ras_fwlog->fw_buffcount = fwlog_buff_count;
6324         for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6325                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6326                                  GFP_KERNEL);
6327                 if (!dmabuf) {
6328                         rc = -ENOMEM;
6329                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6330                                         "6186 Memory Alloc failed FW logging");
6331                         goto free_mem;
6332                 }
6333
6334                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6335                                                   LPFC_RAS_MAX_ENTRY_SIZE,
6336                                                   &dmabuf->phys, GFP_KERNEL);
6337                 if (!dmabuf->virt) {
6338                         kfree(dmabuf);
6339                         rc = -ENOMEM;
6340                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6341                                         "6187 DMA Alloc Failed FW logging");
6342                         goto free_mem;
6343                 }
6344                 dmabuf->buffer_tag = i;
6345                 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6346         }
6347
6348 free_mem:
6349         if (rc)
6350                 lpfc_sli4_ras_dma_free(phba);
6351
6352         return rc;
6353 }
6354
6355 /**
6356  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6357  * @phba: pointer to lpfc hba data structure.
6358  * @pmboxq: pointer to the driver internal queue element for mailbox command.
6359  *
6360  * Completion handler for driver's RAS MBX command to the device.
6361  **/
6362 static void
6363 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6364 {
6365         MAILBOX_t *mb;
6366         union lpfc_sli4_cfg_shdr *shdr;
6367         uint32_t shdr_status, shdr_add_status;
6368         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6369
6370         mb = &pmb->u.mb;
6371
6372         shdr = (union lpfc_sli4_cfg_shdr *)
6373                 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6374         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6375         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6376
6377         if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6378                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6379                                 "6188 FW LOG mailbox "
6380                                 "completed with status x%x add_status x%x,"
6381                                 " mbx status x%x\n",
6382                                 shdr_status, shdr_add_status, mb->mbxStatus);
6383
6384                 ras_fwlog->ras_hwsupport = false;
6385                 goto disable_ras;
6386         }
6387
6388         ras_fwlog->ras_active = true;
6389         mempool_free(pmb, phba->mbox_mem_pool);
6390
6391         return;
6392
6393 disable_ras:
6394         /* Free RAS DMA memory */
6395         lpfc_sli4_ras_dma_free(phba);
6396         mempool_free(pmb, phba->mbox_mem_pool);
6397 }
6398
6399 /**
6400  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6401  * @phba: pointer to lpfc hba data structure.
6402  * @fwlog_level: Logging verbosity level.
6403  * @fwlog_enable: Enable/Disable logging.
6404  *
6405  * Initialize memory and post mailbox command to enable FW logging in host
6406  * memory.
6407  **/
6408 int
6409 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6410                          uint32_t fwlog_level,
6411                          uint32_t fwlog_enable)
6412 {
6413         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6414         struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6415         struct lpfc_dmabuf *dmabuf;
6416         LPFC_MBOXQ_t *mbox;
6417         uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6418         int rc = 0;
6419
6420         fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6421                           phba->cfg_ras_fwlog_buffsize);
6422         fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6423
6424         /*
6425          * If re-enabling FW logging support use earlier allocated
6426          * DMA buffers while posting MBX command.
6427          **/
6428         if (!ras_fwlog->lwpd.virt) {
6429                 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6430                 if (rc) {
6431                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6432                                         "6189 FW Log Memory Allocation Failed");
6433                         return rc;
6434                 }
6435         }
6436
6437         /* Setup Mailbox command */
6438         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6439         if (!mbox) {
6440                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6441                                 "6190 RAS MBX Alloc Failed");
6442                 rc = -ENOMEM;
6443                 goto mem_free;
6444         }
6445
6446         ras_fwlog->fw_loglevel = fwlog_level;
6447         len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6448                 sizeof(struct lpfc_sli4_cfg_mhdr));
6449
6450         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6451                          LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6452                          len, LPFC_SLI4_MBX_EMBED);
6453
6454         mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6455         bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6456                fwlog_enable);
6457         bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6458                ras_fwlog->fw_loglevel);
6459         bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6460                ras_fwlog->fw_buffcount);
6461         bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6462                LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6463
6464         /* Update DMA buffer address */
6465         list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6466                 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6467
6468                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6469                         putPaddrLow(dmabuf->phys);
6470
6471                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6472                         putPaddrHigh(dmabuf->phys);
6473         }
6474
6475         /* Update LPWD address */
6476         mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6477         mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6478
6479         mbox->vport = phba->pport;
6480         mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6481
6482         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6483
6484         if (rc == MBX_NOT_FINISHED) {
6485                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6486                                 "6191 FW-Log Mailbox failed. "
6487                                 "status %d mbxStatus : x%x", rc,
6488                                 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6489                 mempool_free(mbox, phba->mbox_mem_pool);
6490                 rc = -EIO;
6491                 goto mem_free;
6492         } else
6493                 rc = 0;
6494 mem_free:
6495         if (rc)
6496                 lpfc_sli4_ras_dma_free(phba);
6497
6498         return rc;
6499 }
6500
6501 /**
6502  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6503  * @phba: Pointer to HBA context object.
6504  *
6505  * Check if RAS is supported on the adapter and initialize it.
6506  **/
6507 void
6508 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6509 {
6510         /* Check RAS FW Log needs to be enabled or not */
6511         if (lpfc_check_fwlog_support(phba))
6512                 return;
6513
6514         lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6515                                  LPFC_RAS_ENABLE_LOGGING);
6516 }
6517
6518 /**
6519  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6520  * @phba: Pointer to HBA context object.
6521  *
6522  * This function allocates all SLI4 resource identifiers.
6523  **/
6524 int
6525 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6526 {
6527         int i, rc, error = 0;
6528         uint16_t count, base;
6529         unsigned long longs;
6530
6531         if (!phba->sli4_hba.rpi_hdrs_in_use)
6532                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6533         if (phba->sli4_hba.extents_in_use) {
6534                 /*
6535                  * The port supports resource extents. The XRI, VPI, VFI, RPI
6536                  * resource extent count must be read and allocated before
6537                  * provisioning the resource id arrays.
6538                  */
6539                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6540                     LPFC_IDX_RSRC_RDY) {
6541                         /*
6542                          * Extent-based resources are set - the driver could
6543                          * be in a port reset. Figure out if any corrective
6544                          * actions need to be taken.
6545                          */
6546                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6547                                                  LPFC_RSC_TYPE_FCOE_VFI);
6548                         if (rc != 0)
6549                                 error++;
6550                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6551                                                  LPFC_RSC_TYPE_FCOE_VPI);
6552                         if (rc != 0)
6553                                 error++;
6554                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6555                                                  LPFC_RSC_TYPE_FCOE_XRI);
6556                         if (rc != 0)
6557                                 error++;
6558                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6559                                                  LPFC_RSC_TYPE_FCOE_RPI);
6560                         if (rc != 0)
6561                                 error++;
6562
6563                         /*
6564                          * It's possible that the number of resources
6565                          * provided to this port instance changed between
6566                          * resets.  Detect this condition and reallocate
6567                          * resources.  Otherwise, there is no action.
6568                          */
6569                         if (error) {
6570                                 lpfc_printf_log(phba, KERN_INFO,
6571                                                 LOG_MBOX | LOG_INIT,
6572                                                 "2931 Detected extent resource "
6573                                                 "change.  Reallocating all "
6574                                                 "extents.\n");
6575                                 rc = lpfc_sli4_dealloc_extent(phba,
6576                                                  LPFC_RSC_TYPE_FCOE_VFI);
6577                                 rc = lpfc_sli4_dealloc_extent(phba,
6578                                                  LPFC_RSC_TYPE_FCOE_VPI);
6579                                 rc = lpfc_sli4_dealloc_extent(phba,
6580                                                  LPFC_RSC_TYPE_FCOE_XRI);
6581                                 rc = lpfc_sli4_dealloc_extent(phba,
6582                                                  LPFC_RSC_TYPE_FCOE_RPI);
6583                         } else
6584                                 return 0;
6585                 }
6586
6587                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6588                 if (unlikely(rc))
6589                         goto err_exit;
6590
6591                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6592                 if (unlikely(rc))
6593                         goto err_exit;
6594
6595                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6596                 if (unlikely(rc))
6597                         goto err_exit;
6598
6599                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6600                 if (unlikely(rc))
6601                         goto err_exit;
6602                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6603                        LPFC_IDX_RSRC_RDY);
6604                 return rc;
6605         } else {
6606                 /*
6607                  * The port does not support resource extents.  The XRI, VPI,
6608                  * VFI, RPI resource ids were determined from READ_CONFIG.
6609                  * Just allocate the bitmasks and provision the resource id
6610                  * arrays.  If a port reset is active, the resources don't
6611                  * need any action - just exit.
6612                  */
6613                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6614                     LPFC_IDX_RSRC_RDY) {
6615                         lpfc_sli4_dealloc_resource_identifiers(phba);
6616                         lpfc_sli4_remove_rpis(phba);
6617                 }
6618                 /* RPIs. */
6619                 count = phba->sli4_hba.max_cfg_param.max_rpi;
6620                 if (count <= 0) {
6621                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6622                                         "3279 Invalid provisioning of "
6623                                         "rpi:%d\n", count);
6624                         rc = -EINVAL;
6625                         goto err_exit;
6626                 }
6627                 base = phba->sli4_hba.max_cfg_param.rpi_base;
6628                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6629                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6630                                                    sizeof(unsigned long),
6631                                                    GFP_KERNEL);
6632                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6633                         rc = -ENOMEM;
6634                         goto err_exit;
6635                 }
6636                 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6637                                                  GFP_KERNEL);
6638                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6639                         rc = -ENOMEM;
6640                         goto free_rpi_bmask;
6641                 }
6642
6643                 for (i = 0; i < count; i++)
6644                         phba->sli4_hba.rpi_ids[i] = base + i;
6645
6646                 /* VPIs. */
6647                 count = phba->sli4_hba.max_cfg_param.max_vpi;
6648                 if (count <= 0) {
6649                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6650                                         "3280 Invalid provisioning of "
6651                                         "vpi:%d\n", count);
6652                         rc = -EINVAL;
6653                         goto free_rpi_ids;
6654                 }
6655                 base = phba->sli4_hba.max_cfg_param.vpi_base;
6656                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6657                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6658                                           GFP_KERNEL);
6659                 if (unlikely(!phba->vpi_bmask)) {
6660                         rc = -ENOMEM;
6661                         goto free_rpi_ids;
6662                 }
6663                 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6664                                         GFP_KERNEL);
6665                 if (unlikely(!phba->vpi_ids)) {
6666                         rc = -ENOMEM;
6667                         goto free_vpi_bmask;
6668                 }
6669
6670                 for (i = 0; i < count; i++)
6671                         phba->vpi_ids[i] = base + i;
6672
6673                 /* XRIs. */
6674                 count = phba->sli4_hba.max_cfg_param.max_xri;
6675                 if (count <= 0) {
6676                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6677                                         "3281 Invalid provisioning of "
6678                                         "xri:%d\n", count);
6679                         rc = -EINVAL;
6680                         goto free_vpi_ids;
6681                 }
6682                 base = phba->sli4_hba.max_cfg_param.xri_base;
6683                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6684                 phba->sli4_hba.xri_bmask = kcalloc(longs,
6685                                                    sizeof(unsigned long),
6686                                                    GFP_KERNEL);
6687                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6688                         rc = -ENOMEM;
6689                         goto free_vpi_ids;
6690                 }
6691                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6692                 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6693                                                  GFP_KERNEL);
6694                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6695                         rc = -ENOMEM;
6696                         goto free_xri_bmask;
6697                 }
6698
6699                 for (i = 0; i < count; i++)
6700                         phba->sli4_hba.xri_ids[i] = base + i;
6701
6702                 /* VFIs. */
6703                 count = phba->sli4_hba.max_cfg_param.max_vfi;
6704                 if (count <= 0) {
6705                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6706                                         "3282 Invalid provisioning of "
6707                                         "vfi:%d\n", count);
6708                         rc = -EINVAL;
6709                         goto free_xri_ids;
6710                 }
6711                 base = phba->sli4_hba.max_cfg_param.vfi_base;
6712                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6713                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6714                                                    sizeof(unsigned long),
6715                                                    GFP_KERNEL);
6716                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6717                         rc = -ENOMEM;
6718                         goto free_xri_ids;
6719                 }
6720                 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6721                                                  GFP_KERNEL);
6722                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6723                         rc = -ENOMEM;
6724                         goto free_vfi_bmask;
6725                 }
6726
6727                 for (i = 0; i < count; i++)
6728                         phba->sli4_hba.vfi_ids[i] = base + i;
6729
6730                 /*
6731                  * Mark all resources ready.  An HBA reset doesn't need
6732                  * to reset the initialization.
6733                  */
6734                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6735                        LPFC_IDX_RSRC_RDY);
6736                 return 0;
6737         }
6738
6739  free_vfi_bmask:
6740         kfree(phba->sli4_hba.vfi_bmask);
6741         phba->sli4_hba.vfi_bmask = NULL;
6742  free_xri_ids:
6743         kfree(phba->sli4_hba.xri_ids);
6744         phba->sli4_hba.xri_ids = NULL;
6745  free_xri_bmask:
6746         kfree(phba->sli4_hba.xri_bmask);
6747         phba->sli4_hba.xri_bmask = NULL;
6748  free_vpi_ids:
6749         kfree(phba->vpi_ids);
6750         phba->vpi_ids = NULL;
6751  free_vpi_bmask:
6752         kfree(phba->vpi_bmask);
6753         phba->vpi_bmask = NULL;
6754  free_rpi_ids:
6755         kfree(phba->sli4_hba.rpi_ids);
6756         phba->sli4_hba.rpi_ids = NULL;
6757  free_rpi_bmask:
6758         kfree(phba->sli4_hba.rpi_bmask);
6759         phba->sli4_hba.rpi_bmask = NULL;
6760  err_exit:
6761         return rc;
6762 }
6763
6764 /**
6765  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6766  * @phba: Pointer to HBA context object.
6767  *
6768  * This function allocates the number of elements for the specified
6769  * resource type.
6770  **/
6771 int
6772 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6773 {
6774         if (phba->sli4_hba.extents_in_use) {
6775                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6776                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6777                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6778                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6779         } else {
6780                 kfree(phba->vpi_bmask);
6781                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6782                 kfree(phba->vpi_ids);
6783                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6784                 kfree(phba->sli4_hba.xri_bmask);
6785                 kfree(phba->sli4_hba.xri_ids);
6786                 kfree(phba->sli4_hba.vfi_bmask);
6787                 kfree(phba->sli4_hba.vfi_ids);
6788                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6789                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6790         }
6791
6792         return 0;
6793 }
6794
6795 /**
6796  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6797  * @phba: Pointer to HBA context object.
6798  * @type: The resource extent type.
6799  * @extnt_count: buffer to hold port extent count response
6800  * @extnt_size: buffer to hold port extent size response.
6801  *
6802  * This function calls the port to read the host allocated extents
6803  * for a particular type.
6804  **/
6805 int
6806 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6807                                uint16_t *extnt_cnt, uint16_t *extnt_size)
6808 {
6809         bool emb;
6810         int rc = 0;
6811         uint16_t curr_blks = 0;
6812         uint32_t req_len, emb_len;
6813         uint32_t alloc_len, mbox_tmo;
6814         struct list_head *blk_list_head;
6815         struct lpfc_rsrc_blks *rsrc_blk;
6816         LPFC_MBOXQ_t *mbox;
6817         void *virtaddr = NULL;
6818         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6819         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6820         union  lpfc_sli4_cfg_shdr *shdr;
6821
6822         switch (type) {
6823         case LPFC_RSC_TYPE_FCOE_VPI:
6824                 blk_list_head = &phba->lpfc_vpi_blk_list;
6825                 break;
6826         case LPFC_RSC_TYPE_FCOE_XRI:
6827                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6828                 break;
6829         case LPFC_RSC_TYPE_FCOE_VFI:
6830                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6831                 break;
6832         case LPFC_RSC_TYPE_FCOE_RPI:
6833                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6834                 break;
6835         default:
6836                 return -EIO;
6837         }
6838
6839         /* Count the number of extents currently allocatd for this type. */
6840         list_for_each_entry(rsrc_blk, blk_list_head, list) {
6841                 if (curr_blks == 0) {
6842                         /*
6843                          * The GET_ALLOCATED mailbox does not return the size,
6844                          * just the count.  The size should be just the size
6845                          * stored in the current allocated block and all sizes
6846                          * for an extent type are the same so set the return
6847                          * value now.
6848                          */
6849                         *extnt_size = rsrc_blk->rsrc_size;
6850                 }
6851                 curr_blks++;
6852         }
6853
6854         /*
6855          * Calculate the size of an embedded mailbox.  The uint32_t
6856          * accounts for extents-specific word.
6857          */
6858         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6859                 sizeof(uint32_t);
6860
6861         /*
6862          * Presume the allocation and response will fit into an embedded
6863          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6864          */
6865         emb = LPFC_SLI4_MBX_EMBED;
6866         req_len = emb_len;
6867         if (req_len > emb_len) {
6868                 req_len = curr_blks * sizeof(uint16_t) +
6869                         sizeof(union lpfc_sli4_cfg_shdr) +
6870                         sizeof(uint32_t);
6871                 emb = LPFC_SLI4_MBX_NEMBED;
6872         }
6873
6874         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6875         if (!mbox)
6876                 return -ENOMEM;
6877         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6878
6879         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6880                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6881                                      req_len, emb);
6882         if (alloc_len < req_len) {
6883                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6884                         "2983 Allocated DMA memory size (x%x) is "
6885                         "less than the requested DMA memory "
6886                         "size (x%x)\n", alloc_len, req_len);
6887                 rc = -ENOMEM;
6888                 goto err_exit;
6889         }
6890         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6891         if (unlikely(rc)) {
6892                 rc = -EIO;
6893                 goto err_exit;
6894         }
6895
6896         if (!phba->sli4_hba.intr_enable)
6897                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6898         else {
6899                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6900                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6901         }
6902
6903         if (unlikely(rc)) {
6904                 rc = -EIO;
6905                 goto err_exit;
6906         }
6907
6908         /*
6909          * Figure out where the response is located.  Then get local pointers
6910          * to the response data.  The port does not guarantee to respond to
6911          * all extents counts request so update the local variable with the
6912          * allocated count from the port.
6913          */
6914         if (emb == LPFC_SLI4_MBX_EMBED) {
6915                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6916                 shdr = &rsrc_ext->header.cfg_shdr;
6917                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6918         } else {
6919                 virtaddr = mbox->sge_array->addr[0];
6920                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6921                 shdr = &n_rsrc->cfg_shdr;
6922                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6923         }
6924
6925         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6926                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6927                         "2984 Failed to read allocated resources "
6928                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6929                         type,
6930                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
6931                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6932                 rc = -EIO;
6933                 goto err_exit;
6934         }
6935  err_exit:
6936         lpfc_sli4_mbox_cmd_free(phba, mbox);
6937         return rc;
6938 }
6939
6940 /**
6941  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6942  * @phba: pointer to lpfc hba data structure.
6943  * @pring: Pointer to driver SLI ring object.
6944  * @sgl_list: linked link of sgl buffers to post
6945  * @cnt: number of linked list buffers
6946  *
6947  * This routine walks the list of buffers that have been allocated and
6948  * repost them to the port by using SGL block post. This is needed after a
6949  * pci_function_reset/warm_start or start. It attempts to construct blocks
6950  * of buffer sgls which contains contiguous xris and uses the non-embedded
6951  * SGL block post mailbox commands to post them to the port. For single
6952  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6953  * mailbox command for posting.
6954  *
6955  * Returns: 0 = success, non-zero failure.
6956  **/
6957 static int
6958 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6959                           struct list_head *sgl_list, int cnt)
6960 {
6961         struct lpfc_sglq *sglq_entry = NULL;
6962         struct lpfc_sglq *sglq_entry_next = NULL;
6963         struct lpfc_sglq *sglq_entry_first = NULL;
6964         int status, total_cnt;
6965         int post_cnt = 0, num_posted = 0, block_cnt = 0;
6966         int last_xritag = NO_XRI;
6967         LIST_HEAD(prep_sgl_list);
6968         LIST_HEAD(blck_sgl_list);
6969         LIST_HEAD(allc_sgl_list);
6970         LIST_HEAD(post_sgl_list);
6971         LIST_HEAD(free_sgl_list);
6972
6973         spin_lock_irq(&phba->hbalock);
6974         spin_lock(&phba->sli4_hba.sgl_list_lock);
6975         list_splice_init(sgl_list, &allc_sgl_list);
6976         spin_unlock(&phba->sli4_hba.sgl_list_lock);
6977         spin_unlock_irq(&phba->hbalock);
6978
6979         total_cnt = cnt;
6980         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6981                                  &allc_sgl_list, list) {
6982                 list_del_init(&sglq_entry->list);
6983                 block_cnt++;
6984                 if ((last_xritag != NO_XRI) &&
6985                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
6986                         /* a hole in xri block, form a sgl posting block */
6987                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
6988                         post_cnt = block_cnt - 1;
6989                         /* prepare list for next posting block */
6990                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6991                         block_cnt = 1;
6992                 } else {
6993                         /* prepare list for next posting block */
6994                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6995                         /* enough sgls for non-embed sgl mbox command */
6996                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6997                                 list_splice_init(&prep_sgl_list,
6998                                                  &blck_sgl_list);
6999                                 post_cnt = block_cnt;
7000                                 block_cnt = 0;
7001                         }
7002                 }
7003                 num_posted++;
7004
7005                 /* keep track of last sgl's xritag */
7006                 last_xritag = sglq_entry->sli4_xritag;
7007
7008                 /* end of repost sgl list condition for buffers */
7009                 if (num_posted == total_cnt) {
7010                         if (post_cnt == 0) {
7011                                 list_splice_init(&prep_sgl_list,
7012                                                  &blck_sgl_list);
7013                                 post_cnt = block_cnt;
7014                         } else if (block_cnt == 1) {
7015                                 status = lpfc_sli4_post_sgl(phba,
7016                                                 sglq_entry->phys, 0,
7017                                                 sglq_entry->sli4_xritag);
7018                                 if (!status) {
7019                                         /* successful, put sgl to posted list */
7020                                         list_add_tail(&sglq_entry->list,
7021                                                       &post_sgl_list);
7022                                 } else {
7023                                         /* Failure, put sgl to free list */
7024                                         lpfc_printf_log(phba, KERN_WARNING,
7025                                                 LOG_SLI,
7026                                                 "3159 Failed to post "
7027                                                 "sgl, xritag:x%x\n",
7028                                                 sglq_entry->sli4_xritag);
7029                                         list_add_tail(&sglq_entry->list,
7030                                                       &free_sgl_list);
7031                                         total_cnt--;
7032                                 }
7033                         }
7034                 }
7035
7036                 /* continue until a nembed page worth of sgls */
7037                 if (post_cnt == 0)
7038                         continue;
7039
7040                 /* post the buffer list sgls as a block */
7041                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7042                                                  post_cnt);
7043
7044                 if (!status) {
7045                         /* success, put sgl list to posted sgl list */
7046                         list_splice_init(&blck_sgl_list, &post_sgl_list);
7047                 } else {
7048                         /* Failure, put sgl list to free sgl list */
7049                         sglq_entry_first = list_first_entry(&blck_sgl_list,
7050                                                             struct lpfc_sglq,
7051                                                             list);
7052                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7053                                         "3160 Failed to post sgl-list, "
7054                                         "xritag:x%x-x%x\n",
7055                                         sglq_entry_first->sli4_xritag,
7056                                         (sglq_entry_first->sli4_xritag +
7057                                          post_cnt - 1));
7058                         list_splice_init(&blck_sgl_list, &free_sgl_list);
7059                         total_cnt -= post_cnt;
7060                 }
7061
7062                 /* don't reset xirtag due to hole in xri block */
7063                 if (block_cnt == 0)
7064                         last_xritag = NO_XRI;
7065
7066                 /* reset sgl post count for next round of posting */
7067                 post_cnt = 0;
7068         }
7069
7070         /* free the sgls failed to post */
7071         lpfc_free_sgl_list(phba, &free_sgl_list);
7072
7073         /* push sgls posted to the available list */
7074         if (!list_empty(&post_sgl_list)) {
7075                 spin_lock_irq(&phba->hbalock);
7076                 spin_lock(&phba->sli4_hba.sgl_list_lock);
7077                 list_splice_init(&post_sgl_list, sgl_list);
7078                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7079                 spin_unlock_irq(&phba->hbalock);
7080         } else {
7081                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7082                                 "3161 Failure to post sgl to port.\n");
7083                 return -EIO;
7084         }
7085
7086         /* return the number of XRIs actually posted */
7087         return total_cnt;
7088 }
7089
7090 /**
7091  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7092  * @phba: pointer to lpfc hba data structure.
7093  *
7094  * This routine walks the list of nvme buffers that have been allocated and
7095  * repost them to the port by using SGL block post. This is needed after a
7096  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7097  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7098  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7099  *
7100  * Returns: 0 = success, non-zero failure.
7101  **/
7102 int
7103 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7104 {
7105         LIST_HEAD(post_nblist);
7106         int num_posted, rc = 0;
7107
7108         /* get all NVME buffers need to repost to a local list */
7109         lpfc_io_buf_flush(phba, &post_nblist);
7110
7111         /* post the list of nvme buffer sgls to port if available */
7112         if (!list_empty(&post_nblist)) {
7113                 num_posted = lpfc_sli4_post_io_sgl_list(
7114                         phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7115                 /* failed to post any nvme buffer, return error */
7116                 if (num_posted == 0)
7117                         rc = -EIO;
7118         }
7119         return rc;
7120 }
7121
7122 void
7123 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7124 {
7125         uint32_t len;
7126
7127         len = sizeof(struct lpfc_mbx_set_host_data) -
7128                 sizeof(struct lpfc_sli4_cfg_mhdr);
7129         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7130                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7131                          LPFC_SLI4_MBX_EMBED);
7132
7133         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7134         mbox->u.mqe.un.set_host_data.param_len =
7135                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7136         snprintf(mbox->u.mqe.un.set_host_data.data,
7137                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7138                  "Linux %s v"LPFC_DRIVER_VERSION,
7139                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7140 }
7141
7142 int
7143 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7144                     struct lpfc_queue *drq, int count, int idx)
7145 {
7146         int rc, i;
7147         struct lpfc_rqe hrqe;
7148         struct lpfc_rqe drqe;
7149         struct lpfc_rqb *rqbp;
7150         unsigned long flags;
7151         struct rqb_dmabuf *rqb_buffer;
7152         LIST_HEAD(rqb_buf_list);
7153
7154         spin_lock_irqsave(&phba->hbalock, flags);
7155         rqbp = hrq->rqbp;
7156         for (i = 0; i < count; i++) {
7157                 /* IF RQ is already full, don't bother */
7158                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7159                         break;
7160                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7161                 if (!rqb_buffer)
7162                         break;
7163                 rqb_buffer->hrq = hrq;
7164                 rqb_buffer->drq = drq;
7165                 rqb_buffer->idx = idx;
7166                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7167         }
7168         while (!list_empty(&rqb_buf_list)) {
7169                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7170                                  hbuf.list);
7171
7172                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7173                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7174                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7175                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7176                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7177                 if (rc < 0) {
7178                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7179                                         "6421 Cannot post to HRQ %d: %x %x %x "
7180                                         "DRQ %x %x\n",
7181                                         hrq->queue_id,
7182                                         hrq->host_index,
7183                                         hrq->hba_index,
7184                                         hrq->entry_count,
7185                                         drq->host_index,
7186                                         drq->hba_index);
7187                         rqbp->rqb_free_buffer(phba, rqb_buffer);
7188                 } else {
7189                         list_add_tail(&rqb_buffer->hbuf.list,
7190                                       &rqbp->rqb_buffer_list);
7191                         rqbp->buffer_count++;
7192                 }
7193         }
7194         spin_unlock_irqrestore(&phba->hbalock, flags);
7195         return 1;
7196 }
7197
7198 /**
7199  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7200  * @phba: Pointer to HBA context object.
7201  *
7202  * This function is the main SLI4 device initialization PCI function. This
7203  * function is called by the HBA initialization code, HBA reset code and
7204  * HBA error attention handler code. Caller is not required to hold any
7205  * locks.
7206  **/
7207 int
7208 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7209 {
7210         int rc, i, cnt, len;
7211         LPFC_MBOXQ_t *mboxq;
7212         struct lpfc_mqe *mqe;
7213         uint8_t *vpd;
7214         uint32_t vpd_size;
7215         uint32_t ftr_rsp = 0;
7216         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7217         struct lpfc_vport *vport = phba->pport;
7218         struct lpfc_dmabuf *mp;
7219         struct lpfc_rqb *rqbp;
7220
7221         /* Perform a PCI function reset to start from clean */
7222         rc = lpfc_pci_function_reset(phba);
7223         if (unlikely(rc))
7224                 return -ENODEV;
7225
7226         /* Check the HBA Host Status Register for readyness */
7227         rc = lpfc_sli4_post_status_check(phba);
7228         if (unlikely(rc))
7229                 return -ENODEV;
7230         else {
7231                 spin_lock_irq(&phba->hbalock);
7232                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7233                 spin_unlock_irq(&phba->hbalock);
7234         }
7235
7236         /*
7237          * Allocate a single mailbox container for initializing the
7238          * port.
7239          */
7240         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7241         if (!mboxq)
7242                 return -ENOMEM;
7243
7244         /* Issue READ_REV to collect vpd and FW information. */
7245         vpd_size = SLI4_PAGE_SIZE;
7246         vpd = kzalloc(vpd_size, GFP_KERNEL);
7247         if (!vpd) {
7248                 rc = -ENOMEM;
7249                 goto out_free_mbox;
7250         }
7251
7252         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7253         if (unlikely(rc)) {
7254                 kfree(vpd);
7255                 goto out_free_mbox;
7256         }
7257
7258         mqe = &mboxq->u.mqe;
7259         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7260         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7261                 phba->hba_flag |= HBA_FCOE_MODE;
7262                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7263         } else {
7264                 phba->hba_flag &= ~HBA_FCOE_MODE;
7265         }
7266
7267         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7268                 LPFC_DCBX_CEE_MODE)
7269                 phba->hba_flag |= HBA_FIP_SUPPORT;
7270         else
7271                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7272
7273         phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7274
7275         if (phba->sli_rev != LPFC_SLI_REV4) {
7276                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7277                         "0376 READ_REV Error. SLI Level %d "
7278                         "FCoE enabled %d\n",
7279                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7280                 rc = -EIO;
7281                 kfree(vpd);
7282                 goto out_free_mbox;
7283         }
7284
7285         /*
7286          * Continue initialization with default values even if driver failed
7287          * to read FCoE param config regions, only read parameters if the
7288          * board is FCoE
7289          */
7290         if (phba->hba_flag & HBA_FCOE_MODE &&
7291             lpfc_sli4_read_fcoe_params(phba))
7292                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7293                         "2570 Failed to read FCoE parameters\n");
7294
7295         /*
7296          * Retrieve sli4 device physical port name, failure of doing it
7297          * is considered as non-fatal.
7298          */
7299         rc = lpfc_sli4_retrieve_pport_name(phba);
7300         if (!rc)
7301                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7302                                 "3080 Successful retrieving SLI4 device "
7303                                 "physical port name: %s.\n", phba->Port);
7304
7305         rc = lpfc_sli4_get_ctl_attr(phba);
7306         if (!rc)
7307                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7308                                 "8351 Successful retrieving SLI4 device "
7309                                 "CTL ATTR\n");
7310
7311         /*
7312          * Evaluate the read rev and vpd data. Populate the driver
7313          * state with the results. If this routine fails, the failure
7314          * is not fatal as the driver will use generic values.
7315          */
7316         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7317         if (unlikely(!rc)) {
7318                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7319                                 "0377 Error %d parsing vpd. "
7320                                 "Using defaults.\n", rc);
7321                 rc = 0;
7322         }
7323         kfree(vpd);
7324
7325         /* Save information as VPD data */
7326         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7327         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7328
7329         /*
7330          * This is because first G7 ASIC doesn't support the standard
7331          * 0x5a NVME cmd descriptor type/subtype
7332          */
7333         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7334                         LPFC_SLI_INTF_IF_TYPE_6) &&
7335             (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7336             (phba->vpd.rev.smRev == 0) &&
7337             (phba->cfg_nvme_embed_cmd == 1))
7338                 phba->cfg_nvme_embed_cmd = 0;
7339
7340         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7341         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7342                                          &mqe->un.read_rev);
7343         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7344                                        &mqe->un.read_rev);
7345         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7346                                             &mqe->un.read_rev);
7347         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7348                                            &mqe->un.read_rev);
7349         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7350         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7351         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7352         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7353         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7354         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7355         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7356                         "(%d):0380 READ_REV Status x%x "
7357                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7358                         mboxq->vport ? mboxq->vport->vpi : 0,
7359                         bf_get(lpfc_mqe_status, mqe),
7360                         phba->vpd.rev.opFwName,
7361                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7362                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7363
7364         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
7365         rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7366         if (phba->pport->cfg_lun_queue_depth > rc) {
7367                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7368                                 "3362 LUN queue depth changed from %d to %d\n",
7369                                 phba->pport->cfg_lun_queue_depth, rc);
7370                 phba->pport->cfg_lun_queue_depth = rc;
7371         }
7372
7373         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7374             LPFC_SLI_INTF_IF_TYPE_0) {
7375                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7376                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7377                 if (rc == MBX_SUCCESS) {
7378                         phba->hba_flag |= HBA_RECOVERABLE_UE;
7379                         /* Set 1Sec interval to detect UE */
7380                         phba->eratt_poll_interval = 1;
7381                         phba->sli4_hba.ue_to_sr = bf_get(
7382                                         lpfc_mbx_set_feature_UESR,
7383                                         &mboxq->u.mqe.un.set_feature);
7384                         phba->sli4_hba.ue_to_rp = bf_get(
7385                                         lpfc_mbx_set_feature_UERP,
7386                                         &mboxq->u.mqe.un.set_feature);
7387                 }
7388         }
7389
7390         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7391                 /* Enable MDS Diagnostics only if the SLI Port supports it */
7392                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7393                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7394                 if (rc != MBX_SUCCESS)
7395                         phba->mds_diags_support = 0;
7396         }
7397
7398         /*
7399          * Discover the port's supported feature set and match it against the
7400          * hosts requests.
7401          */
7402         lpfc_request_features(phba, mboxq);
7403         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7404         if (unlikely(rc)) {
7405                 rc = -EIO;
7406                 goto out_free_mbox;
7407         }
7408
7409         /*
7410          * The port must support FCP initiator mode as this is the
7411          * only mode running in the host.
7412          */
7413         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7414                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7415                                 "0378 No support for fcpi mode.\n");
7416                 ftr_rsp++;
7417         }
7418
7419         /* Performance Hints are ONLY for FCoE */
7420         if (phba->hba_flag & HBA_FCOE_MODE) {
7421                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7422                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7423                 else
7424                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7425         }
7426
7427         /*
7428          * If the port cannot support the host's requested features
7429          * then turn off the global config parameters to disable the
7430          * feature in the driver.  This is not a fatal error.
7431          */
7432         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7433                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7434                         phba->cfg_enable_bg = 0;
7435                         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7436                         ftr_rsp++;
7437                 }
7438         }
7439
7440         if (phba->max_vpi && phba->cfg_enable_npiv &&
7441             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7442                 ftr_rsp++;
7443
7444         if (ftr_rsp) {
7445                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7446                                 "0379 Feature Mismatch Data: x%08x %08x "
7447                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7448                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7449                                 phba->cfg_enable_npiv, phba->max_vpi);
7450                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7451                         phba->cfg_enable_bg = 0;
7452                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7453                         phba->cfg_enable_npiv = 0;
7454         }
7455
7456         /* These SLI3 features are assumed in SLI4 */
7457         spin_lock_irq(&phba->hbalock);
7458         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7459         spin_unlock_irq(&phba->hbalock);
7460
7461         /*
7462          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
7463          * calls depends on these resources to complete port setup.
7464          */
7465         rc = lpfc_sli4_alloc_resource_identifiers(phba);
7466         if (rc) {
7467                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7468                                 "2920 Failed to alloc Resource IDs "
7469                                 "rc = x%x\n", rc);
7470                 goto out_free_mbox;
7471         }
7472
7473         lpfc_set_host_data(phba, mboxq);
7474
7475         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7476         if (rc) {
7477                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7478                                 "2134 Failed to set host os driver version %x",
7479                                 rc);
7480         }
7481
7482         /* Read the port's service parameters. */
7483         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7484         if (rc) {
7485                 phba->link_state = LPFC_HBA_ERROR;
7486                 rc = -ENOMEM;
7487                 goto out_free_mbox;
7488         }
7489
7490         mboxq->vport = vport;
7491         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7492         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7493         if (rc == MBX_SUCCESS) {
7494                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7495                 rc = 0;
7496         }
7497
7498         /*
7499          * This memory was allocated by the lpfc_read_sparam routine. Release
7500          * it to the mbuf pool.
7501          */
7502         lpfc_mbuf_free(phba, mp->virt, mp->phys);
7503         kfree(mp);
7504         mboxq->ctx_buf = NULL;
7505         if (unlikely(rc)) {
7506                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7507                                 "0382 READ_SPARAM command failed "
7508                                 "status %d, mbxStatus x%x\n",
7509                                 rc, bf_get(lpfc_mqe_status, mqe));
7510                 phba->link_state = LPFC_HBA_ERROR;
7511                 rc = -EIO;
7512                 goto out_free_mbox;
7513         }
7514
7515         lpfc_update_vport_wwn(vport);
7516
7517         /* Update the fc_host data structures with new wwn. */
7518         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7519         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7520
7521         /* Create all the SLI4 queues */
7522         rc = lpfc_sli4_queue_create(phba);
7523         if (rc) {
7524                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7525                                 "3089 Failed to allocate queues\n");
7526                 rc = -ENODEV;
7527                 goto out_free_mbox;
7528         }
7529         /* Set up all the queues to the device */
7530         rc = lpfc_sli4_queue_setup(phba);
7531         if (unlikely(rc)) {
7532                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7533                                 "0381 Error %d during queue setup.\n ", rc);
7534                 goto out_stop_timers;
7535         }
7536         /* Initialize the driver internal SLI layer lists. */
7537         lpfc_sli4_setup(phba);
7538         lpfc_sli4_queue_init(phba);
7539
7540         /* update host els xri-sgl sizes and mappings */
7541         rc = lpfc_sli4_els_sgl_update(phba);
7542         if (unlikely(rc)) {
7543                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7544                                 "1400 Failed to update xri-sgl size and "
7545                                 "mapping: %d\n", rc);
7546                 goto out_destroy_queue;
7547         }
7548
7549         /* register the els sgl pool to the port */
7550         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7551                                        phba->sli4_hba.els_xri_cnt);
7552         if (unlikely(rc < 0)) {
7553                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7554                                 "0582 Error %d during els sgl post "
7555                                 "operation\n", rc);
7556                 rc = -ENODEV;
7557                 goto out_destroy_queue;
7558         }
7559         phba->sli4_hba.els_xri_cnt = rc;
7560
7561         if (phba->nvmet_support) {
7562                 /* update host nvmet xri-sgl sizes and mappings */
7563                 rc = lpfc_sli4_nvmet_sgl_update(phba);
7564                 if (unlikely(rc)) {
7565                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7566                                         "6308 Failed to update nvmet-sgl size "
7567                                         "and mapping: %d\n", rc);
7568                         goto out_destroy_queue;
7569                 }
7570
7571                 /* register the nvmet sgl pool to the port */
7572                 rc = lpfc_sli4_repost_sgl_list(
7573                         phba,
7574                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
7575                         phba->sli4_hba.nvmet_xri_cnt);
7576                 if (unlikely(rc < 0)) {
7577                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7578                                         "3117 Error %d during nvmet "
7579                                         "sgl post\n", rc);
7580                         rc = -ENODEV;
7581                         goto out_destroy_queue;
7582                 }
7583                 phba->sli4_hba.nvmet_xri_cnt = rc;
7584
7585                 cnt = phba->cfg_iocb_cnt * 1024;
7586                 /* We need 1 iocbq for every SGL, for IO processing */
7587                 cnt += phba->sli4_hba.nvmet_xri_cnt;
7588         } else {
7589                 /* update host common xri-sgl sizes and mappings */
7590                 rc = lpfc_sli4_io_sgl_update(phba);
7591                 if (unlikely(rc)) {
7592                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7593                                         "6082 Failed to update nvme-sgl size "
7594                                         "and mapping: %d\n", rc);
7595                         goto out_destroy_queue;
7596                 }
7597
7598                 /* register the allocated common sgl pool to the port */
7599                 rc = lpfc_sli4_repost_io_sgl_list(phba);
7600                 if (unlikely(rc)) {
7601                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7602                                         "6116 Error %d during nvme sgl post "
7603                                         "operation\n", rc);
7604                         /* Some NVME buffers were moved to abort nvme list */
7605                         /* A pci function reset will repost them */
7606                         rc = -ENODEV;
7607                         goto out_destroy_queue;
7608                 }
7609                 cnt = phba->cfg_iocb_cnt * 1024;
7610         }
7611
7612         if (!phba->sli.iocbq_lookup) {
7613                 /* Initialize and populate the iocb list per host */
7614                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7615                                 "2821 initialize iocb list %d total %d\n",
7616                                 phba->cfg_iocb_cnt, cnt);
7617                 rc = lpfc_init_iocb_list(phba, cnt);
7618                 if (rc) {
7619                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7620                                         "1413 Failed to init iocb list.\n");
7621                         goto out_destroy_queue;
7622                 }
7623         }
7624
7625         if (phba->nvmet_support)
7626                 lpfc_nvmet_create_targetport(phba);
7627
7628         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7629                 /* Post initial buffers to all RQs created */
7630                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7631                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7632                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7633                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7634                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7635                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7636                         rqbp->buffer_count = 0;
7637
7638                         lpfc_post_rq_buffer(
7639                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7640                                 phba->sli4_hba.nvmet_mrq_data[i],
7641                                 phba->cfg_nvmet_mrq_post, i);
7642                 }
7643         }
7644
7645         /* Post the rpi header region to the device. */
7646         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7647         if (unlikely(rc)) {
7648                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7649                                 "0393 Error %d during rpi post operation\n",
7650                                 rc);
7651                 rc = -ENODEV;
7652                 goto out_destroy_queue;
7653         }
7654         lpfc_sli4_node_prep(phba);
7655
7656         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7657                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7658                         /*
7659                          * The FC Port needs to register FCFI (index 0)
7660                          */
7661                         lpfc_reg_fcfi(phba, mboxq);
7662                         mboxq->vport = phba->pport;
7663                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7664                         if (rc != MBX_SUCCESS)
7665                                 goto out_unset_queue;
7666                         rc = 0;
7667                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7668                                                 &mboxq->u.mqe.un.reg_fcfi);
7669                 } else {
7670                         /* We are a NVME Target mode with MRQ > 1 */
7671
7672                         /* First register the FCFI */
7673                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7674                         mboxq->vport = phba->pport;
7675                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7676                         if (rc != MBX_SUCCESS)
7677                                 goto out_unset_queue;
7678                         rc = 0;
7679                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7680                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
7681
7682                         /* Next register the MRQs */
7683                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7684                         mboxq->vport = phba->pport;
7685                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7686                         if (rc != MBX_SUCCESS)
7687                                 goto out_unset_queue;
7688                         rc = 0;
7689                 }
7690                 /* Check if the port is configured to be disabled */
7691                 lpfc_sli_read_link_ste(phba);
7692         }
7693
7694         /* Don't post more new bufs if repost already recovered
7695          * the nvme sgls.
7696          */
7697         if (phba->nvmet_support == 0) {
7698                 if (phba->sli4_hba.io_xri_cnt == 0) {
7699                         len = lpfc_new_io_buf(
7700                                               phba, phba->sli4_hba.io_xri_max);
7701                         if (len == 0) {
7702                                 rc = -ENOMEM;
7703                                 goto out_unset_queue;
7704                         }
7705
7706                         if (phba->cfg_xri_rebalancing)
7707                                 lpfc_create_multixri_pools(phba);
7708                 }
7709         } else {
7710                 phba->cfg_xri_rebalancing = 0;
7711         }
7712
7713         /* Allow asynchronous mailbox command to go through */
7714         spin_lock_irq(&phba->hbalock);
7715         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7716         spin_unlock_irq(&phba->hbalock);
7717
7718         /* Post receive buffers to the device */
7719         lpfc_sli4_rb_setup(phba);
7720
7721         /* Reset HBA FCF states after HBA reset */
7722         phba->fcf.fcf_flag = 0;
7723         phba->fcf.current_rec.flag = 0;
7724
7725         /* Start the ELS watchdog timer */
7726         mod_timer(&vport->els_tmofunc,
7727                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7728
7729         /* Start heart beat timer */
7730         mod_timer(&phba->hb_tmofunc,
7731                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7732         phba->hb_outstanding = 0;
7733         phba->last_completion_time = jiffies;
7734
7735         /* start eq_delay heartbeat */
7736         if (phba->cfg_auto_imax)
7737                 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7738                                    msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7739
7740         /* Start error attention (ERATT) polling timer */
7741         mod_timer(&phba->eratt_poll,
7742                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7743
7744         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7745         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7746                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7747                 if (!rc) {
7748                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7749                                         "2829 This device supports "
7750                                         "Advanced Error Reporting (AER)\n");
7751                         spin_lock_irq(&phba->hbalock);
7752                         phba->hba_flag |= HBA_AER_ENABLED;
7753                         spin_unlock_irq(&phba->hbalock);
7754                 } else {
7755                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7756                                         "2830 This device does not support "
7757                                         "Advanced Error Reporting (AER)\n");
7758                         phba->cfg_aer_support = 0;
7759                 }
7760                 rc = 0;
7761         }
7762
7763         /*
7764          * The port is ready, set the host's link state to LINK_DOWN
7765          * in preparation for link interrupts.
7766          */
7767         spin_lock_irq(&phba->hbalock);
7768         phba->link_state = LPFC_LINK_DOWN;
7769
7770         /* Check if physical ports are trunked */
7771         if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7772                 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7773         if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7774                 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7775         if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7776                 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7777         if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7778                 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7779         spin_unlock_irq(&phba->hbalock);
7780
7781         /* Arm the CQs and then EQs on device */
7782         lpfc_sli4_arm_cqeq_intr(phba);
7783
7784         /* Indicate device interrupt mode */
7785         phba->sli4_hba.intr_enable = 1;
7786
7787         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7788             (phba->hba_flag & LINK_DISABLED)) {
7789                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7790                                 "3103 Adapter Link is disabled.\n");
7791                 lpfc_down_link(phba, mboxq);
7792                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7793                 if (rc != MBX_SUCCESS) {
7794                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7795                                         "3104 Adapter failed to issue "
7796                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7797                         goto out_io_buff_free;
7798                 }
7799         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7800                 /* don't perform init_link on SLI4 FC port loopback test */
7801                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7802                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7803                         if (rc)
7804                                 goto out_io_buff_free;
7805                 }
7806         }
7807         mempool_free(mboxq, phba->mbox_mem_pool);
7808         return rc;
7809 out_io_buff_free:
7810         /* Free allocated IO Buffers */
7811         lpfc_io_free(phba);
7812 out_unset_queue:
7813         /* Unset all the queues set up in this routine when error out */
7814         lpfc_sli4_queue_unset(phba);
7815 out_destroy_queue:
7816         lpfc_free_iocb_list(phba);
7817         lpfc_sli4_queue_destroy(phba);
7818 out_stop_timers:
7819         lpfc_stop_hba_timers(phba);
7820 out_free_mbox:
7821         mempool_free(mboxq, phba->mbox_mem_pool);
7822         return rc;
7823 }
7824
7825 /**
7826  * lpfc_mbox_timeout - Timeout call back function for mbox timer
7827  * @ptr: context object - pointer to hba structure.
7828  *
7829  * This is the callback function for mailbox timer. The mailbox
7830  * timer is armed when a new mailbox command is issued and the timer
7831  * is deleted when the mailbox complete. The function is called by
7832  * the kernel timer code when a mailbox does not complete within
7833  * expected time. This function wakes up the worker thread to
7834  * process the mailbox timeout and returns. All the processing is
7835  * done by the worker thread function lpfc_mbox_timeout_handler.
7836  **/
7837 void
7838 lpfc_mbox_timeout(struct timer_list *t)
7839 {
7840         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
7841         unsigned long iflag;
7842         uint32_t tmo_posted;
7843
7844         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7845         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7846         if (!tmo_posted)
7847                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7848         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7849
7850         if (!tmo_posted)
7851                 lpfc_worker_wake_up(phba);
7852         return;
7853 }
7854
7855 /**
7856  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7857  *                                    are pending
7858  * @phba: Pointer to HBA context object.
7859  *
7860  * This function checks if any mailbox completions are present on the mailbox
7861  * completion queue.
7862  **/
7863 static bool
7864 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7865 {
7866
7867         uint32_t idx;
7868         struct lpfc_queue *mcq;
7869         struct lpfc_mcqe *mcqe;
7870         bool pending_completions = false;
7871         uint8_t qe_valid;
7872
7873         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7874                 return false;
7875
7876         /* Check for completions on mailbox completion queue */
7877
7878         mcq = phba->sli4_hba.mbx_cq;
7879         idx = mcq->hba_index;
7880         qe_valid = mcq->qe_valid;
7881         while (bf_get_le32(lpfc_cqe_valid,
7882                (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7883                 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7884                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7885                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7886                         pending_completions = true;
7887                         break;
7888                 }
7889                 idx = (idx + 1) % mcq->entry_count;
7890                 if (mcq->hba_index == idx)
7891                         break;
7892
7893                 /* if the index wrapped around, toggle the valid bit */
7894                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7895                         qe_valid = (qe_valid) ? 0 : 1;
7896         }
7897         return pending_completions;
7898
7899 }
7900
7901 /**
7902  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7903  *                                            that were missed.
7904  * @phba: Pointer to HBA context object.
7905  *
7906  * For sli4, it is possible to miss an interrupt. As such mbox completions
7907  * maybe missed causing erroneous mailbox timeouts to occur. This function
7908  * checks to see if mbox completions are on the mailbox completion queue
7909  * and will process all the completions associated with the eq for the
7910  * mailbox completion queue.
7911  **/
7912 bool
7913 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7914 {
7915         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7916         uint32_t eqidx;
7917         struct lpfc_queue *fpeq = NULL;
7918         bool mbox_pending;
7919
7920         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7921                 return false;
7922
7923         /* Find the eq associated with the mcq */
7924
7925         if (sli4_hba->hdwq)
7926                 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++)
7927                         if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
7928                             sli4_hba->mbx_cq->assoc_qid) {
7929                                 fpeq = sli4_hba->hdwq[eqidx].hba_eq;
7930                                 break;
7931                         }
7932         if (!fpeq)
7933                 return false;
7934
7935         /* Turn off interrupts from this EQ */
7936
7937         sli4_hba->sli4_eq_clr_intr(fpeq);
7938
7939         /* Check to see if a mbox completion is pending */
7940
7941         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7942
7943         /*
7944          * If a mbox completion is pending, process all the events on EQ
7945          * associated with the mbox completion queue (this could include
7946          * mailbox commands, async events, els commands, receive queue data
7947          * and fcp commands)
7948          */
7949
7950         if (mbox_pending)
7951                 /* process and rearm the EQ */
7952                 lpfc_sli4_process_eq(phba, fpeq);
7953         else
7954                 /* Always clear and re-arm the EQ */
7955                 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7956
7957         return mbox_pending;
7958
7959 }
7960
7961 /**
7962  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7963  * @phba: Pointer to HBA context object.
7964  *
7965  * This function is called from worker thread when a mailbox command times out.
7966  * The caller is not required to hold any locks. This function will reset the
7967  * HBA and recover all the pending commands.
7968  **/
7969 void
7970 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7971 {
7972         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7973         MAILBOX_t *mb = NULL;
7974
7975         struct lpfc_sli *psli = &phba->sli;
7976
7977         /* If the mailbox completed, process the completion and return */
7978         if (lpfc_sli4_process_missed_mbox_completions(phba))
7979                 return;
7980
7981         if (pmbox != NULL)
7982                 mb = &pmbox->u.mb;
7983         /* Check the pmbox pointer first.  There is a race condition
7984          * between the mbox timeout handler getting executed in the
7985          * worklist and the mailbox actually completing. When this
7986          * race condition occurs, the mbox_active will be NULL.
7987          */
7988         spin_lock_irq(&phba->hbalock);
7989         if (pmbox == NULL) {
7990                 lpfc_printf_log(phba, KERN_WARNING,
7991                                 LOG_MBOX | LOG_SLI,
7992                                 "0353 Active Mailbox cleared - mailbox timeout "
7993                                 "exiting\n");
7994                 spin_unlock_irq(&phba->hbalock);
7995                 return;
7996         }
7997
7998         /* Mbox cmd <mbxCommand> timeout */
7999         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8000                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
8001                         mb->mbxCommand,
8002                         phba->pport->port_state,
8003                         phba->sli.sli_flag,
8004                         phba->sli.mbox_active);
8005         spin_unlock_irq(&phba->hbalock);
8006
8007         /* Setting state unknown so lpfc_sli_abort_iocb_ring
8008          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8009          * it to fail all outstanding SCSI IO.
8010          */
8011         spin_lock_irq(&phba->pport->work_port_lock);
8012         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8013         spin_unlock_irq(&phba->pport->work_port_lock);
8014         spin_lock_irq(&phba->hbalock);
8015         phba->link_state = LPFC_LINK_UNKNOWN;
8016         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8017         spin_unlock_irq(&phba->hbalock);
8018
8019         lpfc_sli_abort_fcp_rings(phba);
8020
8021         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8022                         "0345 Resetting board due to mailbox timeout\n");
8023
8024         /* Reset the HBA device */
8025         lpfc_reset_hba(phba);
8026 }
8027
8028 /**
8029  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8030  * @phba: Pointer to HBA context object.
8031  * @pmbox: Pointer to mailbox object.
8032  * @flag: Flag indicating how the mailbox need to be processed.
8033  *
8034  * This function is called by discovery code and HBA management code
8035  * to submit a mailbox command to firmware with SLI-3 interface spec. This
8036  * function gets the hbalock to protect the data structures.
8037  * The mailbox command can be submitted in polling mode, in which case
8038  * this function will wait in a polling loop for the completion of the
8039  * mailbox.
8040  * If the mailbox is submitted in no_wait mode (not polling) the
8041  * function will submit the command and returns immediately without waiting
8042  * for the mailbox completion. The no_wait is supported only when HBA
8043  * is in SLI2/SLI3 mode - interrupts are enabled.
8044  * The SLI interface allows only one mailbox pending at a time. If the
8045  * mailbox is issued in polling mode and there is already a mailbox
8046  * pending, then the function will return an error. If the mailbox is issued
8047  * in NO_WAIT mode and there is a mailbox pending already, the function
8048  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8049  * The sli layer owns the mailbox object until the completion of mailbox
8050  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8051  * return codes the caller owns the mailbox command after the return of
8052  * the function.
8053  **/
8054 static int
8055 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8056                        uint32_t flag)
8057 {
8058         MAILBOX_t *mbx;
8059         struct lpfc_sli *psli = &phba->sli;
8060         uint32_t status, evtctr;
8061         uint32_t ha_copy, hc_copy;
8062         int i;
8063         unsigned long timeout;
8064         unsigned long drvr_flag = 0;
8065         uint32_t word0, ldata;
8066         void __iomem *to_slim;
8067         int processing_queue = 0;
8068
8069         spin_lock_irqsave(&phba->hbalock, drvr_flag);
8070         if (!pmbox) {
8071                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8072                 /* processing mbox queue from intr_handler */
8073                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8074                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8075                         return MBX_SUCCESS;
8076                 }
8077                 processing_queue = 1;
8078                 pmbox = lpfc_mbox_get(phba);
8079                 if (!pmbox) {
8080                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8081                         return MBX_SUCCESS;
8082                 }
8083         }
8084
8085         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8086                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8087                 if(!pmbox->vport) {
8088                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8089                         lpfc_printf_log(phba, KERN_ERR,
8090                                         LOG_MBOX | LOG_VPORT,
8091                                         "1806 Mbox x%x failed. No vport\n",
8092                                         pmbox->u.mb.mbxCommand);
8093                         dump_stack();
8094                         goto out_not_finished;
8095                 }
8096         }
8097
8098         /* If the PCI channel is in offline state, do not post mbox. */
8099         if (unlikely(pci_channel_offline(phba->pcidev))) {
8100                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8101                 goto out_not_finished;
8102         }
8103
8104         /* If HBA has a deferred error attention, fail the iocb. */
8105         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8106                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8107                 goto out_not_finished;
8108         }
8109
8110         psli = &phba->sli;
8111
8112         mbx = &pmbox->u.mb;
8113         status = MBX_SUCCESS;
8114
8115         if (phba->link_state == LPFC_HBA_ERROR) {
8116                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8117
8118                 /* Mbox command <mbxCommand> cannot issue */
8119                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8120                                 "(%d):0311 Mailbox command x%x cannot "
8121                                 "issue Data: x%x x%x\n",
8122                                 pmbox->vport ? pmbox->vport->vpi : 0,
8123                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8124                 goto out_not_finished;
8125         }
8126
8127         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8128                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8129                         !(hc_copy & HC_MBINT_ENA)) {
8130                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8131                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8132                                 "(%d):2528 Mailbox command x%x cannot "
8133                                 "issue Data: x%x x%x\n",
8134                                 pmbox->vport ? pmbox->vport->vpi : 0,
8135                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8136                         goto out_not_finished;
8137                 }
8138         }
8139
8140         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8141                 /* Polling for a mbox command when another one is already active
8142                  * is not allowed in SLI. Also, the driver must have established
8143                  * SLI2 mode to queue and process multiple mbox commands.
8144                  */
8145
8146                 if (flag & MBX_POLL) {
8147                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8148
8149                         /* Mbox command <mbxCommand> cannot issue */
8150                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8151                                         "(%d):2529 Mailbox command x%x "
8152                                         "cannot issue Data: x%x x%x\n",
8153                                         pmbox->vport ? pmbox->vport->vpi : 0,
8154                                         pmbox->u.mb.mbxCommand,
8155                                         psli->sli_flag, flag);
8156                         goto out_not_finished;
8157                 }
8158
8159                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8160                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8161                         /* Mbox command <mbxCommand> cannot issue */
8162                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8163                                         "(%d):2530 Mailbox command x%x "
8164                                         "cannot issue Data: x%x x%x\n",
8165                                         pmbox->vport ? pmbox->vport->vpi : 0,
8166                                         pmbox->u.mb.mbxCommand,
8167                                         psli->sli_flag, flag);
8168                         goto out_not_finished;
8169                 }
8170
8171                 /* Another mailbox command is still being processed, queue this
8172                  * command to be processed later.
8173                  */
8174                 lpfc_mbox_put(phba, pmbox);
8175
8176                 /* Mbox cmd issue - BUSY */
8177                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8178                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
8179                                 "x%x x%x x%x x%x\n",
8180                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8181                                 mbx->mbxCommand,
8182                                 phba->pport ? phba->pport->port_state : 0xff,
8183                                 psli->sli_flag, flag);
8184
8185                 psli->slistat.mbox_busy++;
8186                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8187
8188                 if (pmbox->vport) {
8189                         lpfc_debugfs_disc_trc(pmbox->vport,
8190                                 LPFC_DISC_TRC_MBOX_VPORT,
8191                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
8192                                 (uint32_t)mbx->mbxCommand,
8193                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8194                 }
8195                 else {
8196                         lpfc_debugfs_disc_trc(phba->pport,
8197                                 LPFC_DISC_TRC_MBOX,
8198                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
8199                                 (uint32_t)mbx->mbxCommand,
8200                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8201                 }
8202
8203                 return MBX_BUSY;
8204         }
8205
8206         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8207
8208         /* If we are not polling, we MUST be in SLI2 mode */
8209         if (flag != MBX_POLL) {
8210                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8211                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
8212                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8213                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8214                         /* Mbox command <mbxCommand> cannot issue */
8215                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8216                                         "(%d):2531 Mailbox command x%x "
8217                                         "cannot issue Data: x%x x%x\n",
8218                                         pmbox->vport ? pmbox->vport->vpi : 0,
8219                                         pmbox->u.mb.mbxCommand,
8220                                         psli->sli_flag, flag);
8221                         goto out_not_finished;
8222                 }
8223                 /* timeout active mbox command */
8224                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8225                                            1000);
8226                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8227         }
8228
8229         /* Mailbox cmd <cmd> issue */
8230         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8231                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8232                         "x%x\n",
8233                         pmbox->vport ? pmbox->vport->vpi : 0,
8234                         mbx->mbxCommand,
8235                         phba->pport ? phba->pport->port_state : 0xff,
8236                         psli->sli_flag, flag);
8237
8238         if (mbx->mbxCommand != MBX_HEARTBEAT) {
8239                 if (pmbox->vport) {
8240                         lpfc_debugfs_disc_trc(pmbox->vport,
8241                                 LPFC_DISC_TRC_MBOX_VPORT,
8242                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8243                                 (uint32_t)mbx->mbxCommand,
8244                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8245                 }
8246                 else {
8247                         lpfc_debugfs_disc_trc(phba->pport,
8248                                 LPFC_DISC_TRC_MBOX,
8249                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
8250                                 (uint32_t)mbx->mbxCommand,
8251                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8252                 }
8253         }
8254
8255         psli->slistat.mbox_cmd++;
8256         evtctr = psli->slistat.mbox_event;
8257
8258         /* next set own bit for the adapter and copy over command word */
8259         mbx->mbxOwner = OWN_CHIP;
8260
8261         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8262                 /* Populate mbox extension offset word. */
8263                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8264                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8265                                 = (uint8_t *)phba->mbox_ext
8266                                   - (uint8_t *)phba->mbox;
8267                 }
8268
8269                 /* Copy the mailbox extension data */
8270                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8271                         lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8272                                               (uint8_t *)phba->mbox_ext,
8273                                               pmbox->in_ext_byte_len);
8274                 }
8275                 /* Copy command data to host SLIM area */
8276                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8277         } else {
8278                 /* Populate mbox extension offset word. */
8279                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8280                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8281                                 = MAILBOX_HBA_EXT_OFFSET;
8282
8283                 /* Copy the mailbox extension data */
8284                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8285                         lpfc_memcpy_to_slim(phba->MBslimaddr +
8286                                 MAILBOX_HBA_EXT_OFFSET,
8287                                 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8288
8289                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8290                         /* copy command data into host mbox for cmpl */
8291                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8292                                               MAILBOX_CMD_SIZE);
8293
8294                 /* First copy mbox command data to HBA SLIM, skip past first
8295                    word */
8296                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8297                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8298                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
8299
8300                 /* Next copy over first word, with mbxOwner set */
8301                 ldata = *((uint32_t *)mbx);
8302                 to_slim = phba->MBslimaddr;
8303                 writel(ldata, to_slim);
8304                 readl(to_slim); /* flush */
8305
8306                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8307                         /* switch over to host mailbox */
8308                         psli->sli_flag |= LPFC_SLI_ACTIVE;
8309         }
8310
8311         wmb();
8312
8313         switch (flag) {
8314         case MBX_NOWAIT:
8315                 /* Set up reference to mailbox command */
8316                 psli->mbox_active = pmbox;
8317                 /* Interrupt board to do it */
8318                 writel(CA_MBATT, phba->CAregaddr);
8319                 readl(phba->CAregaddr); /* flush */
8320                 /* Don't wait for it to finish, just return */
8321                 break;
8322
8323         case MBX_POLL:
8324                 /* Set up null reference to mailbox command */
8325                 psli->mbox_active = NULL;
8326                 /* Interrupt board to do it */
8327                 writel(CA_MBATT, phba->CAregaddr);
8328                 readl(phba->CAregaddr); /* flush */
8329
8330                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8331                         /* First read mbox status word */
8332                         word0 = *((uint32_t *)phba->mbox);
8333                         word0 = le32_to_cpu(word0);
8334                 } else {
8335                         /* First read mbox status word */
8336                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
8337                                 spin_unlock_irqrestore(&phba->hbalock,
8338                                                        drvr_flag);
8339                                 goto out_not_finished;
8340                         }
8341                 }
8342
8343                 /* Read the HBA Host Attention Register */
8344                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8345                         spin_unlock_irqrestore(&phba->hbalock,
8346                                                        drvr_flag);
8347                         goto out_not_finished;
8348                 }
8349                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8350                                                         1000) + jiffies;
8351                 i = 0;
8352                 /* Wait for command to complete */
8353                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8354                        (!(ha_copy & HA_MBATT) &&
8355                         (phba->link_state > LPFC_WARM_START))) {
8356                         if (time_after(jiffies, timeout)) {
8357                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8358                                 spin_unlock_irqrestore(&phba->hbalock,
8359                                                        drvr_flag);
8360                                 goto out_not_finished;
8361                         }
8362
8363                         /* Check if we took a mbox interrupt while we were
8364                            polling */
8365                         if (((word0 & OWN_CHIP) != OWN_CHIP)
8366                             && (evtctr != psli->slistat.mbox_event))
8367                                 break;
8368
8369                         if (i++ > 10) {
8370                                 spin_unlock_irqrestore(&phba->hbalock,
8371                                                        drvr_flag);
8372                                 msleep(1);
8373                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8374                         }
8375
8376                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8377                                 /* First copy command data */
8378                                 word0 = *((uint32_t *)phba->mbox);
8379                                 word0 = le32_to_cpu(word0);
8380                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8381                                         MAILBOX_t *slimmb;
8382                                         uint32_t slimword0;
8383                                         /* Check real SLIM for any errors */
8384                                         slimword0 = readl(phba->MBslimaddr);
8385                                         slimmb = (MAILBOX_t *) & slimword0;
8386                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8387                                             && slimmb->mbxStatus) {
8388                                                 psli->sli_flag &=
8389                                                     ~LPFC_SLI_ACTIVE;
8390                                                 word0 = slimword0;
8391                                         }
8392                                 }
8393                         } else {
8394                                 /* First copy command data */
8395                                 word0 = readl(phba->MBslimaddr);
8396                         }
8397                         /* Read the HBA Host Attention Register */
8398                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8399                                 spin_unlock_irqrestore(&phba->hbalock,
8400                                                        drvr_flag);
8401                                 goto out_not_finished;
8402                         }
8403                 }
8404
8405                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8406                         /* copy results back to user */
8407                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8408                                                 MAILBOX_CMD_SIZE);
8409                         /* Copy the mailbox extension data */
8410                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8411                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8412                                                       pmbox->ctx_buf,
8413                                                       pmbox->out_ext_byte_len);
8414                         }
8415                 } else {
8416                         /* First copy command data */
8417                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8418                                                 MAILBOX_CMD_SIZE);
8419                         /* Copy the mailbox extension data */
8420                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8421                                 lpfc_memcpy_from_slim(
8422                                         pmbox->ctx_buf,
8423                                         phba->MBslimaddr +
8424                                         MAILBOX_HBA_EXT_OFFSET,
8425                                         pmbox->out_ext_byte_len);
8426                         }
8427                 }
8428
8429                 writel(HA_MBATT, phba->HAregaddr);
8430                 readl(phba->HAregaddr); /* flush */
8431
8432                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8433                 status = mbx->mbxStatus;
8434         }
8435
8436         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8437         return status;
8438
8439 out_not_finished:
8440         if (processing_queue) {
8441                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8442                 lpfc_mbox_cmpl_put(phba, pmbox);
8443         }
8444         return MBX_NOT_FINISHED;
8445 }
8446
8447 /**
8448  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8449  * @phba: Pointer to HBA context object.
8450  *
8451  * The function blocks the posting of SLI4 asynchronous mailbox commands from
8452  * the driver internal pending mailbox queue. It will then try to wait out the
8453  * possible outstanding mailbox command before return.
8454  *
8455  * Returns:
8456  *      0 - the outstanding mailbox command completed; otherwise, the wait for
8457  *      the outstanding mailbox command timed out.
8458  **/
8459 static int
8460 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8461 {
8462         struct lpfc_sli *psli = &phba->sli;
8463         int rc = 0;
8464         unsigned long timeout = 0;
8465
8466         /* Mark the asynchronous mailbox command posting as blocked */
8467         spin_lock_irq(&phba->hbalock);
8468         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8469         /* Determine how long we might wait for the active mailbox
8470          * command to be gracefully completed by firmware.
8471          */
8472         if (phba->sli.mbox_active)
8473                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8474                                                 phba->sli.mbox_active) *
8475                                                 1000) + jiffies;
8476         spin_unlock_irq(&phba->hbalock);
8477
8478         /* Make sure the mailbox is really active */
8479         if (timeout)
8480                 lpfc_sli4_process_missed_mbox_completions(phba);
8481
8482         /* Wait for the outstnading mailbox command to complete */
8483         while (phba->sli.mbox_active) {
8484                 /* Check active mailbox complete status every 2ms */
8485                 msleep(2);
8486                 if (time_after(jiffies, timeout)) {
8487                         /* Timeout, marked the outstanding cmd not complete */
8488                         rc = 1;
8489                         break;
8490                 }
8491         }
8492
8493         /* Can not cleanly block async mailbox command, fails it */
8494         if (rc) {
8495                 spin_lock_irq(&phba->hbalock);
8496                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8497                 spin_unlock_irq(&phba->hbalock);
8498         }
8499         return rc;
8500 }
8501
8502 /**
8503  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8504  * @phba: Pointer to HBA context object.
8505  *
8506  * The function unblocks and resume posting of SLI4 asynchronous mailbox
8507  * commands from the driver internal pending mailbox queue. It makes sure
8508  * that there is no outstanding mailbox command before resuming posting
8509  * asynchronous mailbox commands. If, for any reason, there is outstanding
8510  * mailbox command, it will try to wait it out before resuming asynchronous
8511  * mailbox command posting.
8512  **/
8513 static void
8514 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8515 {
8516         struct lpfc_sli *psli = &phba->sli;
8517
8518         spin_lock_irq(&phba->hbalock);
8519         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8520                 /* Asynchronous mailbox posting is not blocked, do nothing */
8521                 spin_unlock_irq(&phba->hbalock);
8522                 return;
8523         }
8524
8525         /* Outstanding synchronous mailbox command is guaranteed to be done,
8526          * successful or timeout, after timing-out the outstanding mailbox
8527          * command shall always be removed, so just unblock posting async
8528          * mailbox command and resume
8529          */
8530         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8531         spin_unlock_irq(&phba->hbalock);
8532
8533         /* wake up worker thread to post asynchronlous mailbox command */
8534         lpfc_worker_wake_up(phba);
8535 }
8536
8537 /**
8538  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8539  * @phba: Pointer to HBA context object.
8540  * @mboxq: Pointer to mailbox object.
8541  *
8542  * The function waits for the bootstrap mailbox register ready bit from
8543  * port for twice the regular mailbox command timeout value.
8544  *
8545  *      0 - no timeout on waiting for bootstrap mailbox register ready.
8546  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8547  **/
8548 static int
8549 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8550 {
8551         uint32_t db_ready;
8552         unsigned long timeout;
8553         struct lpfc_register bmbx_reg;
8554
8555         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8556                                    * 1000) + jiffies;
8557
8558         do {
8559                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8560                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8561                 if (!db_ready)
8562                         mdelay(2);
8563
8564                 if (time_after(jiffies, timeout))
8565                         return MBXERR_ERROR;
8566         } while (!db_ready);
8567
8568         return 0;
8569 }
8570
8571 /**
8572  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8573  * @phba: Pointer to HBA context object.
8574  * @mboxq: Pointer to mailbox object.
8575  *
8576  * The function posts a mailbox to the port.  The mailbox is expected
8577  * to be comletely filled in and ready for the port to operate on it.
8578  * This routine executes a synchronous completion operation on the
8579  * mailbox by polling for its completion.
8580  *
8581  * The caller must not be holding any locks when calling this routine.
8582  *
8583  * Returns:
8584  *      MBX_SUCCESS - mailbox posted successfully
8585  *      Any of the MBX error values.
8586  **/
8587 static int
8588 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8589 {
8590         int rc = MBX_SUCCESS;
8591         unsigned long iflag;
8592         uint32_t mcqe_status;
8593         uint32_t mbx_cmnd;
8594         struct lpfc_sli *psli = &phba->sli;
8595         struct lpfc_mqe *mb = &mboxq->u.mqe;
8596         struct lpfc_bmbx_create *mbox_rgn;
8597         struct dma_address *dma_address;
8598
8599         /*
8600          * Only one mailbox can be active to the bootstrap mailbox region
8601          * at a time and there is no queueing provided.
8602          */
8603         spin_lock_irqsave(&phba->hbalock, iflag);
8604         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8605                 spin_unlock_irqrestore(&phba->hbalock, iflag);
8606                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8607                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8608                                 "cannot issue Data: x%x x%x\n",
8609                                 mboxq->vport ? mboxq->vport->vpi : 0,
8610                                 mboxq->u.mb.mbxCommand,
8611                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8612                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8613                                 psli->sli_flag, MBX_POLL);
8614                 return MBXERR_ERROR;
8615         }
8616         /* The server grabs the token and owns it until release */
8617         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8618         phba->sli.mbox_active = mboxq;
8619         spin_unlock_irqrestore(&phba->hbalock, iflag);
8620
8621         /* wait for bootstrap mbox register for readyness */
8622         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8623         if (rc)
8624                 goto exit;
8625         /*
8626          * Initialize the bootstrap memory region to avoid stale data areas
8627          * in the mailbox post.  Then copy the caller's mailbox contents to
8628          * the bmbx mailbox region.
8629          */
8630         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8631         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8632         lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8633                                sizeof(struct lpfc_mqe));
8634
8635         /* Post the high mailbox dma address to the port and wait for ready. */
8636         dma_address = &phba->sli4_hba.bmbx.dma_address;
8637         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8638
8639         /* wait for bootstrap mbox register for hi-address write done */
8640         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8641         if (rc)
8642                 goto exit;
8643
8644         /* Post the low mailbox dma address to the port. */
8645         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8646
8647         /* wait for bootstrap mbox register for low address write done */
8648         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8649         if (rc)
8650                 goto exit;
8651
8652         /*
8653          * Read the CQ to ensure the mailbox has completed.
8654          * If so, update the mailbox status so that the upper layers
8655          * can complete the request normally.
8656          */
8657         lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8658                                sizeof(struct lpfc_mqe));
8659         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8660         lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8661                                sizeof(struct lpfc_mcqe));
8662         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8663         /*
8664          * When the CQE status indicates a failure and the mailbox status
8665          * indicates success then copy the CQE status into the mailbox status
8666          * (and prefix it with x4000).
8667          */
8668         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8669                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8670                         bf_set(lpfc_mqe_status, mb,
8671                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
8672                 rc = MBXERR_ERROR;
8673         } else
8674                 lpfc_sli4_swap_str(phba, mboxq);
8675
8676         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8677                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8678                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8679                         " x%x x%x CQ: x%x x%x x%x x%x\n",
8680                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8681                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8682                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8683                         bf_get(lpfc_mqe_status, mb),
8684                         mb->un.mb_words[0], mb->un.mb_words[1],
8685                         mb->un.mb_words[2], mb->un.mb_words[3],
8686                         mb->un.mb_words[4], mb->un.mb_words[5],
8687                         mb->un.mb_words[6], mb->un.mb_words[7],
8688                         mb->un.mb_words[8], mb->un.mb_words[9],
8689                         mb->un.mb_words[10], mb->un.mb_words[11],
8690                         mb->un.mb_words[12], mboxq->mcqe.word0,
8691                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
8692                         mboxq->mcqe.trailer);
8693 exit:
8694         /* We are holding the token, no needed for lock when release */
8695         spin_lock_irqsave(&phba->hbalock, iflag);
8696         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8697         phba->sli.mbox_active = NULL;
8698         spin_unlock_irqrestore(&phba->hbalock, iflag);
8699         return rc;
8700 }
8701
8702 /**
8703  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8704  * @phba: Pointer to HBA context object.
8705  * @pmbox: Pointer to mailbox object.
8706  * @flag: Flag indicating how the mailbox need to be processed.
8707  *
8708  * This function is called by discovery code and HBA management code to submit
8709  * a mailbox command to firmware with SLI-4 interface spec.
8710  *
8711  * Return codes the caller owns the mailbox command after the return of the
8712  * function.
8713  **/
8714 static int
8715 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8716                        uint32_t flag)
8717 {
8718         struct lpfc_sli *psli = &phba->sli;
8719         unsigned long iflags;
8720         int rc;
8721
8722         /* dump from issue mailbox command if setup */
8723         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8724
8725         rc = lpfc_mbox_dev_check(phba);
8726         if (unlikely(rc)) {
8727                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8728                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8729                                 "cannot issue Data: x%x x%x\n",
8730                                 mboxq->vport ? mboxq->vport->vpi : 0,
8731                                 mboxq->u.mb.mbxCommand,
8732                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8733                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8734                                 psli->sli_flag, flag);
8735                 goto out_not_finished;
8736         }
8737
8738         /* Detect polling mode and jump to a handler */
8739         if (!phba->sli4_hba.intr_enable) {
8740                 if (flag == MBX_POLL)
8741                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8742                 else
8743                         rc = -EIO;
8744                 if (rc != MBX_SUCCESS)
8745                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8746                                         "(%d):2541 Mailbox command x%x "
8747                                         "(x%x/x%x) failure: "
8748                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8749                                         "Data: x%x x%x\n,",
8750                                         mboxq->vport ? mboxq->vport->vpi : 0,
8751                                         mboxq->u.mb.mbxCommand,
8752                                         lpfc_sli_config_mbox_subsys_get(phba,
8753                                                                         mboxq),
8754                                         lpfc_sli_config_mbox_opcode_get(phba,
8755                                                                         mboxq),
8756                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8757                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8758                                         bf_get(lpfc_mcqe_ext_status,
8759                                                &mboxq->mcqe),
8760                                         psli->sli_flag, flag);
8761                 return rc;
8762         } else if (flag == MBX_POLL) {
8763                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8764                                 "(%d):2542 Try to issue mailbox command "
8765                                 "x%x (x%x/x%x) synchronously ahead of async "
8766                                 "mailbox command queue: x%x x%x\n",
8767                                 mboxq->vport ? mboxq->vport->vpi : 0,
8768                                 mboxq->u.mb.mbxCommand,
8769                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8770                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8771                                 psli->sli_flag, flag);
8772                 /* Try to block the asynchronous mailbox posting */
8773                 rc = lpfc_sli4_async_mbox_block(phba);
8774                 if (!rc) {
8775                         /* Successfully blocked, now issue sync mbox cmd */
8776                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8777                         if (rc != MBX_SUCCESS)
8778                                 lpfc_printf_log(phba, KERN_WARNING,
8779                                         LOG_MBOX | LOG_SLI,
8780                                         "(%d):2597 Sync Mailbox command "
8781                                         "x%x (x%x/x%x) failure: "
8782                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8783                                         "Data: x%x x%x\n,",
8784                                         mboxq->vport ? mboxq->vport->vpi : 0,
8785                                         mboxq->u.mb.mbxCommand,
8786                                         lpfc_sli_config_mbox_subsys_get(phba,
8787                                                                         mboxq),
8788                                         lpfc_sli_config_mbox_opcode_get(phba,
8789                                                                         mboxq),
8790                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8791                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8792                                         bf_get(lpfc_mcqe_ext_status,
8793                                                &mboxq->mcqe),
8794                                         psli->sli_flag, flag);
8795                         /* Unblock the async mailbox posting afterward */
8796                         lpfc_sli4_async_mbox_unblock(phba);
8797                 }
8798                 return rc;
8799         }
8800
8801         /* Now, interrupt mode asynchrous mailbox command */
8802         rc = lpfc_mbox_cmd_check(phba, mboxq);
8803         if (rc) {
8804                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8805                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8806                                 "cannot issue Data: x%x x%x\n",
8807                                 mboxq->vport ? mboxq->vport->vpi : 0,
8808                                 mboxq->u.mb.mbxCommand,
8809                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8810                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8811                                 psli->sli_flag, flag);
8812                 goto out_not_finished;
8813         }
8814
8815         /* Put the mailbox command to the driver internal FIFO */
8816         psli->slistat.mbox_busy++;
8817         spin_lock_irqsave(&phba->hbalock, iflags);
8818         lpfc_mbox_put(phba, mboxq);
8819         spin_unlock_irqrestore(&phba->hbalock, iflags);
8820         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8821                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
8822                         "x%x (x%x/x%x) x%x x%x x%x\n",
8823                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8824                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8825                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8826                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8827                         phba->pport->port_state,
8828                         psli->sli_flag, MBX_NOWAIT);
8829         /* Wake up worker thread to transport mailbox command from head */
8830         lpfc_worker_wake_up(phba);
8831
8832         return MBX_BUSY;
8833
8834 out_not_finished:
8835         return MBX_NOT_FINISHED;
8836 }
8837
8838 /**
8839  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8840  * @phba: Pointer to HBA context object.
8841  *
8842  * This function is called by worker thread to send a mailbox command to
8843  * SLI4 HBA firmware.
8844  *
8845  **/
8846 int
8847 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8848 {
8849         struct lpfc_sli *psli = &phba->sli;
8850         LPFC_MBOXQ_t *mboxq;
8851         int rc = MBX_SUCCESS;
8852         unsigned long iflags;
8853         struct lpfc_mqe *mqe;
8854         uint32_t mbx_cmnd;
8855
8856         /* Check interrupt mode before post async mailbox command */
8857         if (unlikely(!phba->sli4_hba.intr_enable))
8858                 return MBX_NOT_FINISHED;
8859
8860         /* Check for mailbox command service token */
8861         spin_lock_irqsave(&phba->hbalock, iflags);
8862         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8863                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8864                 return MBX_NOT_FINISHED;
8865         }
8866         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8867                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8868                 return MBX_NOT_FINISHED;
8869         }
8870         if (unlikely(phba->sli.mbox_active)) {
8871                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8872                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8873                                 "0384 There is pending active mailbox cmd\n");
8874                 return MBX_NOT_FINISHED;
8875         }
8876         /* Take the mailbox command service token */
8877         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8878
8879         /* Get the next mailbox command from head of queue */
8880         mboxq = lpfc_mbox_get(phba);
8881
8882         /* If no more mailbox command waiting for post, we're done */
8883         if (!mboxq) {
8884                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8885                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8886                 return MBX_SUCCESS;
8887         }
8888         phba->sli.mbox_active = mboxq;
8889         spin_unlock_irqrestore(&phba->hbalock, iflags);
8890
8891         /* Check device readiness for posting mailbox command */
8892         rc = lpfc_mbox_dev_check(phba);
8893         if (unlikely(rc))
8894                 /* Driver clean routine will clean up pending mailbox */
8895                 goto out_not_finished;
8896
8897         /* Prepare the mbox command to be posted */
8898         mqe = &mboxq->u.mqe;
8899         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8900
8901         /* Start timer for the mbox_tmo and log some mailbox post messages */
8902         mod_timer(&psli->mbox_tmo, (jiffies +
8903                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8904
8905         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8906                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8907                         "x%x x%x\n",
8908                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8909                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8910                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8911                         phba->pport->port_state, psli->sli_flag);
8912
8913         if (mbx_cmnd != MBX_HEARTBEAT) {
8914                 if (mboxq->vport) {
8915                         lpfc_debugfs_disc_trc(mboxq->vport,
8916                                 LPFC_DISC_TRC_MBOX_VPORT,
8917                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8918                                 mbx_cmnd, mqe->un.mb_words[0],
8919                                 mqe->un.mb_words[1]);
8920                 } else {
8921                         lpfc_debugfs_disc_trc(phba->pport,
8922                                 LPFC_DISC_TRC_MBOX,
8923                                 "MBOX Send: cmd:x%x mb:x%x x%x",
8924                                 mbx_cmnd, mqe->un.mb_words[0],
8925                                 mqe->un.mb_words[1]);
8926                 }
8927         }
8928         psli->slistat.mbox_cmd++;
8929
8930         /* Post the mailbox command to the port */
8931         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8932         if (rc != MBX_SUCCESS) {
8933                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8934                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8935                                 "cannot issue Data: x%x x%x\n",
8936                                 mboxq->vport ? mboxq->vport->vpi : 0,
8937                                 mboxq->u.mb.mbxCommand,
8938                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8939                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8940                                 psli->sli_flag, MBX_NOWAIT);
8941                 goto out_not_finished;
8942         }
8943
8944         return rc;
8945
8946 out_not_finished:
8947         spin_lock_irqsave(&phba->hbalock, iflags);
8948         if (phba->sli.mbox_active) {
8949                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8950                 __lpfc_mbox_cmpl_put(phba, mboxq);
8951                 /* Release the token */
8952                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8953                 phba->sli.mbox_active = NULL;
8954         }
8955         spin_unlock_irqrestore(&phba->hbalock, iflags);
8956
8957         return MBX_NOT_FINISHED;
8958 }
8959
8960 /**
8961  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8962  * @phba: Pointer to HBA context object.
8963  * @pmbox: Pointer to mailbox object.
8964  * @flag: Flag indicating how the mailbox need to be processed.
8965  *
8966  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8967  * the API jump table function pointer from the lpfc_hba struct.
8968  *
8969  * Return codes the caller owns the mailbox command after the return of the
8970  * function.
8971  **/
8972 int
8973 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8974 {
8975         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8976 }
8977
8978 /**
8979  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8980  * @phba: The hba struct for which this call is being executed.
8981  * @dev_grp: The HBA PCI-Device group number.
8982  *
8983  * This routine sets up the mbox interface API function jump table in @phba
8984  * struct.
8985  * Returns: 0 - success, -ENODEV - failure.
8986  **/
8987 int
8988 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8989 {
8990
8991         switch (dev_grp) {
8992         case LPFC_PCI_DEV_LP:
8993                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8994                 phba->lpfc_sli_handle_slow_ring_event =
8995                                 lpfc_sli_handle_slow_ring_event_s3;
8996                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8997                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8998                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8999                 break;
9000         case LPFC_PCI_DEV_OC:
9001                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9002                 phba->lpfc_sli_handle_slow_ring_event =
9003                                 lpfc_sli_handle_slow_ring_event_s4;
9004                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9005                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9006                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9007                 break;
9008         default:
9009                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
9011                                 dev_grp);
9012                 return -ENODEV;
9013                 break;
9014         }
9015         return 0;
9016 }
9017
9018 /**
9019  * __lpfc_sli_ringtx_put - Add an iocb to the txq
9020  * @phba: Pointer to HBA context object.
9021  * @pring: Pointer to driver SLI ring object.
9022  * @piocb: Pointer to address of newly added command iocb.
9023  *
9024  * This function is called with hbalock held to add a command
9025  * iocb to the txq when SLI layer cannot submit the command iocb
9026  * to the ring.
9027  **/
9028 void
9029 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9030                     struct lpfc_iocbq *piocb)
9031 {
9032         lockdep_assert_held(&phba->hbalock);
9033         /* Insert the caller's iocb in the txq tail for later processing. */
9034         list_add_tail(&piocb->list, &pring->txq);
9035 }
9036
9037 /**
9038  * lpfc_sli_next_iocb - Get the next iocb in the txq
9039  * @phba: Pointer to HBA context object.
9040  * @pring: Pointer to driver SLI ring object.
9041  * @piocb: Pointer to address of newly added command iocb.
9042  *
9043  * This function is called with hbalock held before a new
9044  * iocb is submitted to the firmware. This function checks
9045  * txq to flush the iocbs in txq to Firmware before
9046  * submitting new iocbs to the Firmware.
9047  * If there are iocbs in the txq which need to be submitted
9048  * to firmware, lpfc_sli_next_iocb returns the first element
9049  * of the txq after dequeuing it from txq.
9050  * If there is no iocb in the txq then the function will return
9051  * *piocb and *piocb is set to NULL. Caller needs to check
9052  * *piocb to find if there are more commands in the txq.
9053  **/
9054 static struct lpfc_iocbq *
9055 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9056                    struct lpfc_iocbq **piocb)
9057 {
9058         struct lpfc_iocbq * nextiocb;
9059
9060         lockdep_assert_held(&phba->hbalock);
9061
9062         nextiocb = lpfc_sli_ringtx_get(phba, pring);
9063         if (!nextiocb) {
9064                 nextiocb = *piocb;
9065                 *piocb = NULL;
9066         }
9067
9068         return nextiocb;
9069 }
9070
9071 /**
9072  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9073  * @phba: Pointer to HBA context object.
9074  * @ring_number: SLI ring number to issue iocb on.
9075  * @piocb: Pointer to command iocb.
9076  * @flag: Flag indicating if this command can be put into txq.
9077  *
9078  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9079  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9080  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9081  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9082  * this function allows only iocbs for posting buffers. This function finds
9083  * next available slot in the command ring and posts the command to the
9084  * available slot and writes the port attention register to request HBA start
9085  * processing new iocb. If there is no slot available in the ring and
9086  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9087  * the function returns IOCB_BUSY.
9088  *
9089  * This function is called with hbalock held. The function will return success
9090  * after it successfully submit the iocb to firmware or after adding to the
9091  * txq.
9092  **/
9093 static int
9094 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9095                     struct lpfc_iocbq *piocb, uint32_t flag)
9096 {
9097         struct lpfc_iocbq *nextiocb;
9098         IOCB_t *iocb;
9099         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9100
9101         lockdep_assert_held(&phba->hbalock);
9102
9103         if (piocb->iocb_cmpl && (!piocb->vport) &&
9104            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9105            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9106                 lpfc_printf_log(phba, KERN_ERR,
9107                                 LOG_SLI | LOG_VPORT,
9108                                 "1807 IOCB x%x failed. No vport\n",
9109                                 piocb->iocb.ulpCommand);
9110                 dump_stack();
9111                 return IOCB_ERROR;
9112         }
9113
9114
9115         /* If the PCI channel is in offline state, do not post iocbs. */
9116         if (unlikely(pci_channel_offline(phba->pcidev)))
9117                 return IOCB_ERROR;
9118
9119         /* If HBA has a deferred error attention, fail the iocb. */
9120         if (unlikely(phba->hba_flag & DEFER_ERATT))
9121                 return IOCB_ERROR;
9122
9123         /*
9124          * We should never get an IOCB if we are in a < LINK_DOWN state
9125          */
9126         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9127                 return IOCB_ERROR;
9128
9129         /*
9130          * Check to see if we are blocking IOCB processing because of a
9131          * outstanding event.
9132          */
9133         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9134                 goto iocb_busy;
9135
9136         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9137                 /*
9138                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9139                  * can be issued if the link is not up.
9140                  */
9141                 switch (piocb->iocb.ulpCommand) {
9142                 case CMD_GEN_REQUEST64_CR:
9143                 case CMD_GEN_REQUEST64_CX:
9144                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9145                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9146                                         FC_RCTL_DD_UNSOL_CMD) ||
9147                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9148                                         MENLO_TRANSPORT_TYPE))
9149
9150                                 goto iocb_busy;
9151                         break;
9152                 case CMD_QUE_RING_BUF_CN:
9153                 case CMD_QUE_RING_BUF64_CN:
9154                         /*
9155                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9156                          * completion, iocb_cmpl MUST be 0.
9157                          */
9158                         if (piocb->iocb_cmpl)
9159                                 piocb->iocb_cmpl = NULL;
9160                         /*FALLTHROUGH*/
9161                 case CMD_CREATE_XRI_CR:
9162                 case CMD_CLOSE_XRI_CN:
9163                 case CMD_CLOSE_XRI_CX:
9164                         break;
9165                 default:
9166                         goto iocb_busy;
9167                 }
9168
9169         /*
9170          * For FCP commands, we must be in a state where we can process link
9171          * attention events.
9172          */
9173         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9174                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9175                 goto iocb_busy;
9176         }
9177
9178         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9179                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9180                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9181
9182         if (iocb)
9183                 lpfc_sli_update_ring(phba, pring);
9184         else
9185                 lpfc_sli_update_full_ring(phba, pring);
9186
9187         if (!piocb)
9188                 return IOCB_SUCCESS;
9189
9190         goto out_busy;
9191
9192  iocb_busy:
9193         pring->stats.iocb_cmd_delay++;
9194
9195  out_busy:
9196
9197         if (!(flag & SLI_IOCB_RET_IOCB)) {
9198                 __lpfc_sli_ringtx_put(phba, pring, piocb);
9199                 return IOCB_SUCCESS;
9200         }
9201
9202         return IOCB_BUSY;
9203 }
9204
9205 /**
9206  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9207  * @phba: Pointer to HBA context object.
9208  * @piocb: Pointer to command iocb.
9209  * @sglq: Pointer to the scatter gather queue object.
9210  *
9211  * This routine converts the bpl or bde that is in the IOCB
9212  * to a sgl list for the sli4 hardware. The physical address
9213  * of the bpl/bde is converted back to a virtual address.
9214  * If the IOCB contains a BPL then the list of BDE's is
9215  * converted to sli4_sge's. If the IOCB contains a single
9216  * BDE then it is converted to a single sli_sge.
9217  * The IOCB is still in cpu endianess so the contents of
9218  * the bpl can be used without byte swapping.
9219  *
9220  * Returns valid XRI = Success, NO_XRI = Failure.
9221 **/
9222 static uint16_t
9223 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9224                 struct lpfc_sglq *sglq)
9225 {
9226         uint16_t xritag = NO_XRI;
9227         struct ulp_bde64 *bpl = NULL;
9228         struct ulp_bde64 bde;
9229         struct sli4_sge *sgl  = NULL;
9230         struct lpfc_dmabuf *dmabuf;
9231         IOCB_t *icmd;
9232         int numBdes = 0;
9233         int i = 0;
9234         uint32_t offset = 0; /* accumulated offset in the sg request list */
9235         int inbound = 0; /* number of sg reply entries inbound from firmware */
9236
9237         if (!piocbq || !sglq)
9238                 return xritag;
9239
9240         sgl  = (struct sli4_sge *)sglq->sgl;
9241         icmd = &piocbq->iocb;
9242         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9243                 return sglq->sli4_xritag;
9244         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9245                 numBdes = icmd->un.genreq64.bdl.bdeSize /
9246                                 sizeof(struct ulp_bde64);
9247                 /* The addrHigh and addrLow fields within the IOCB
9248                  * have not been byteswapped yet so there is no
9249                  * need to swap them back.
9250                  */
9251                 if (piocbq->context3)
9252                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9253                 else
9254                         return xritag;
9255
9256                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
9257                 if (!bpl)
9258                         return xritag;
9259
9260                 for (i = 0; i < numBdes; i++) {
9261                         /* Should already be byte swapped. */
9262                         sgl->addr_hi = bpl->addrHigh;
9263                         sgl->addr_lo = bpl->addrLow;
9264
9265                         sgl->word2 = le32_to_cpu(sgl->word2);
9266                         if ((i+1) == numBdes)
9267                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
9268                         else
9269                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
9270                         /* swap the size field back to the cpu so we
9271                          * can assign it to the sgl.
9272                          */
9273                         bde.tus.w = le32_to_cpu(bpl->tus.w);
9274                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9275                         /* The offsets in the sgl need to be accumulated
9276                          * separately for the request and reply lists.
9277                          * The request is always first, the reply follows.
9278                          */
9279                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9280                                 /* add up the reply sg entries */
9281                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9282                                         inbound++;
9283                                 /* first inbound? reset the offset */
9284                                 if (inbound == 1)
9285                                         offset = 0;
9286                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9287                                 bf_set(lpfc_sli4_sge_type, sgl,
9288                                         LPFC_SGE_TYPE_DATA);
9289                                 offset += bde.tus.f.bdeSize;
9290                         }
9291                         sgl->word2 = cpu_to_le32(sgl->word2);
9292                         bpl++;
9293                         sgl++;
9294                 }
9295         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9296                         /* The addrHigh and addrLow fields of the BDE have not
9297                          * been byteswapped yet so they need to be swapped
9298                          * before putting them in the sgl.
9299                          */
9300                         sgl->addr_hi =
9301                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9302                         sgl->addr_lo =
9303                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9304                         sgl->word2 = le32_to_cpu(sgl->word2);
9305                         bf_set(lpfc_sli4_sge_last, sgl, 1);
9306                         sgl->word2 = cpu_to_le32(sgl->word2);
9307                         sgl->sge_len =
9308                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9309         }
9310         return sglq->sli4_xritag;
9311 }
9312
9313 /**
9314  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9315  * @phba: Pointer to HBA context object.
9316  * @piocb: Pointer to command iocb.
9317  * @wqe: Pointer to the work queue entry.
9318  *
9319  * This routine converts the iocb command to its Work Queue Entry
9320  * equivalent. The wqe pointer should not have any fields set when
9321  * this routine is called because it will memcpy over them.
9322  * This routine does not set the CQ_ID or the WQEC bits in the
9323  * wqe.
9324  *
9325  * Returns: 0 = Success, IOCB_ERROR = Failure.
9326  **/
9327 static int
9328 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9329                 union lpfc_wqe128 *wqe)
9330 {
9331         uint32_t xmit_len = 0, total_len = 0;
9332         uint8_t ct = 0;
9333         uint32_t fip;
9334         uint32_t abort_tag;
9335         uint8_t command_type = ELS_COMMAND_NON_FIP;
9336         uint8_t cmnd;
9337         uint16_t xritag;
9338         uint16_t abrt_iotag;
9339         struct lpfc_iocbq *abrtiocbq;
9340         struct ulp_bde64 *bpl = NULL;
9341         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9342         int numBdes, i;
9343         struct ulp_bde64 bde;
9344         struct lpfc_nodelist *ndlp;
9345         uint32_t *pcmd;
9346         uint32_t if_type;
9347
9348         fip = phba->hba_flag & HBA_FIP_SUPPORT;
9349         /* The fcp commands will set command type */
9350         if (iocbq->iocb_flag &  LPFC_IO_FCP)
9351                 command_type = FCP_COMMAND;
9352         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9353                 command_type = ELS_COMMAND_FIP;
9354         else
9355                 command_type = ELS_COMMAND_NON_FIP;
9356
9357         if (phba->fcp_embed_io)
9358                 memset(wqe, 0, sizeof(union lpfc_wqe128));
9359         /* Some of the fields are in the right position already */
9360         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9361         if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9362                 /* The ct field has moved so reset */
9363                 wqe->generic.wqe_com.word7 = 0;
9364                 wqe->generic.wqe_com.word10 = 0;
9365         }
9366
9367         abort_tag = (uint32_t) iocbq->iotag;
9368         xritag = iocbq->sli4_xritag;
9369         /* words0-2 bpl convert bde */
9370         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9371                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9372                                 sizeof(struct ulp_bde64);
9373                 bpl  = (struct ulp_bde64 *)
9374                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9375                 if (!bpl)
9376                         return IOCB_ERROR;
9377
9378                 /* Should already be byte swapped. */
9379                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
9380                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
9381                 /* swap the size field back to the cpu so we
9382                  * can assign it to the sgl.
9383                  */
9384                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
9385                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9386                 total_len = 0;
9387                 for (i = 0; i < numBdes; i++) {
9388                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
9389                         total_len += bde.tus.f.bdeSize;
9390                 }
9391         } else
9392                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9393
9394         iocbq->iocb.ulpIoTag = iocbq->iotag;
9395         cmnd = iocbq->iocb.ulpCommand;
9396
9397         switch (iocbq->iocb.ulpCommand) {
9398         case CMD_ELS_REQUEST64_CR:
9399                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9400                         ndlp = iocbq->context_un.ndlp;
9401                 else
9402                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
9403                 if (!iocbq->iocb.ulpLe) {
9404                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9405                                 "2007 Only Limited Edition cmd Format"
9406                                 " supported 0x%x\n",
9407                                 iocbq->iocb.ulpCommand);
9408                         return IOCB_ERROR;
9409                 }
9410
9411                 wqe->els_req.payload_len = xmit_len;
9412                 /* Els_reguest64 has a TMO */
9413                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9414                         iocbq->iocb.ulpTimeout);
9415                 /* Need a VF for word 4 set the vf bit*/
9416                 bf_set(els_req64_vf, &wqe->els_req, 0);
9417                 /* And a VFID for word 12 */
9418                 bf_set(els_req64_vfid, &wqe->els_req, 0);
9419                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9420                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9421                        iocbq->iocb.ulpContext);
9422                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9423                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9424                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9425                 if (command_type == ELS_COMMAND_FIP)
9426                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9427                                         >> LPFC_FIP_ELS_ID_SHIFT);
9428                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9429                                         iocbq->context2)->virt);
9430                 if_type = bf_get(lpfc_sli_intf_if_type,
9431                                         &phba->sli4_hba.sli_intf);
9432                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9433                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9434                                 *pcmd == ELS_CMD_SCR ||
9435                                 *pcmd == ELS_CMD_FDISC ||
9436                                 *pcmd == ELS_CMD_LOGO ||
9437                                 *pcmd == ELS_CMD_PLOGI)) {
9438                                 bf_set(els_req64_sp, &wqe->els_req, 1);
9439                                 bf_set(els_req64_sid, &wqe->els_req,
9440                                         iocbq->vport->fc_myDID);
9441                                 if ((*pcmd == ELS_CMD_FLOGI) &&
9442                                         !(phba->fc_topology ==
9443                                                 LPFC_TOPOLOGY_LOOP))
9444                                         bf_set(els_req64_sid, &wqe->els_req, 0);
9445                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9446                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9447                                         phba->vpi_ids[iocbq->vport->vpi]);
9448                         } else if (pcmd && iocbq->context1) {
9449                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9450                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9451                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9452                         }
9453                 }
9454                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9455                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9456                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9457                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9458                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9459                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9460                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9461                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9462                 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9463                 break;
9464         case CMD_XMIT_SEQUENCE64_CX:
9465                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9466                        iocbq->iocb.un.ulpWord[3]);
9467                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9468                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9469                 /* The entire sequence is transmitted for this IOCB */
9470                 xmit_len = total_len;
9471                 cmnd = CMD_XMIT_SEQUENCE64_CR;
9472                 if (phba->link_flag & LS_LOOPBACK_MODE)
9473                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9474                 /* fall through */
9475         case CMD_XMIT_SEQUENCE64_CR:
9476                 /* word3 iocb=io_tag32 wqe=reserved */
9477                 wqe->xmit_sequence.rsvd3 = 0;
9478                 /* word4 relative_offset memcpy */
9479                 /* word5 r_ctl/df_ctl memcpy */
9480                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9481                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9482                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9483                        LPFC_WQE_IOD_WRITE);
9484                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9485                        LPFC_WQE_LENLOC_WORD12);
9486                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9487                 wqe->xmit_sequence.xmit_len = xmit_len;
9488                 command_type = OTHER_COMMAND;
9489                 break;
9490         case CMD_XMIT_BCAST64_CN:
9491                 /* word3 iocb=iotag32 wqe=seq_payload_len */
9492                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9493                 /* word4 iocb=rsvd wqe=rsvd */
9494                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9495                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9496                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9497                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9498                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9499                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9500                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9501                        LPFC_WQE_LENLOC_WORD3);
9502                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9503                 break;
9504         case CMD_FCP_IWRITE64_CR:
9505                 command_type = FCP_COMMAND_DATA_OUT;
9506                 /* word3 iocb=iotag wqe=payload_offset_len */
9507                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9508                 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9509                        xmit_len + sizeof(struct fcp_rsp));
9510                 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9511                        0);
9512                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9513                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9514                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9515                        iocbq->iocb.ulpFCP2Rcvy);
9516                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9517                 /* Always open the exchange */
9518                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9519                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9520                        LPFC_WQE_LENLOC_WORD4);
9521                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9522                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9523                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9524                         bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9525                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9526                         if (iocbq->priority) {
9527                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9528                                        (iocbq->priority << 1));
9529                         } else {
9530                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9531                                        (phba->cfg_XLanePriority << 1));
9532                         }
9533                 }
9534                 /* Note, word 10 is already initialized to 0 */
9535
9536                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9537                 if (phba->cfg_enable_pbde)
9538                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9539                 else
9540                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9541
9542                 if (phba->fcp_embed_io) {
9543                         struct lpfc_io_buf *lpfc_cmd;
9544                         struct sli4_sge *sgl;
9545                         struct fcp_cmnd *fcp_cmnd;
9546                         uint32_t *ptr;
9547
9548                         /* 128 byte wqe support here */
9549
9550                         lpfc_cmd = iocbq->context1;
9551                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9552                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9553
9554                         /* Word 0-2 - FCP_CMND */
9555                         wqe->generic.bde.tus.f.bdeFlags =
9556                                 BUFF_TYPE_BDE_IMMED;
9557                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9558                         wqe->generic.bde.addrHigh = 0;
9559                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9560
9561                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9562                         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9563
9564                         /* Word 22-29  FCP CMND Payload */
9565                         ptr = &wqe->words[22];
9566                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9567                 }
9568                 break;
9569         case CMD_FCP_IREAD64_CR:
9570                 /* word3 iocb=iotag wqe=payload_offset_len */
9571                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9572                 bf_set(payload_offset_len, &wqe->fcp_iread,
9573                        xmit_len + sizeof(struct fcp_rsp));
9574                 bf_set(cmd_buff_len, &wqe->fcp_iread,
9575                        0);
9576                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9577                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9578                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9579                        iocbq->iocb.ulpFCP2Rcvy);
9580                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9581                 /* Always open the exchange */
9582                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9583                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9584                        LPFC_WQE_LENLOC_WORD4);
9585                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9586                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9587                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9588                         bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9589                         bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9590                         if (iocbq->priority) {
9591                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9592                                        (iocbq->priority << 1));
9593                         } else {
9594                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9595                                        (phba->cfg_XLanePriority << 1));
9596                         }
9597                 }
9598                 /* Note, word 10 is already initialized to 0 */
9599
9600                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9601                 if (phba->cfg_enable_pbde)
9602                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9603                 else
9604                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9605
9606                 if (phba->fcp_embed_io) {
9607                         struct lpfc_io_buf *lpfc_cmd;
9608                         struct sli4_sge *sgl;
9609                         struct fcp_cmnd *fcp_cmnd;
9610                         uint32_t *ptr;
9611
9612                         /* 128 byte wqe support here */
9613
9614                         lpfc_cmd = iocbq->context1;
9615                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9616                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9617
9618                         /* Word 0-2 - FCP_CMND */
9619                         wqe->generic.bde.tus.f.bdeFlags =
9620                                 BUFF_TYPE_BDE_IMMED;
9621                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9622                         wqe->generic.bde.addrHigh = 0;
9623                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9624
9625                         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9626                         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9627
9628                         /* Word 22-29  FCP CMND Payload */
9629                         ptr = &wqe->words[22];
9630                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9631                 }
9632                 break;
9633         case CMD_FCP_ICMND64_CR:
9634                 /* word3 iocb=iotag wqe=payload_offset_len */
9635                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9636                 bf_set(payload_offset_len, &wqe->fcp_icmd,
9637                        xmit_len + sizeof(struct fcp_rsp));
9638                 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9639                        0);
9640                 /* word3 iocb=IO_TAG wqe=reserved */
9641                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9642                 /* Always open the exchange */
9643                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9644                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9645                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9646                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9647                        LPFC_WQE_LENLOC_NONE);
9648                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9649                        iocbq->iocb.ulpFCP2Rcvy);
9650                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9651                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9652                         bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9653                         if (iocbq->priority) {
9654                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9655                                        (iocbq->priority << 1));
9656                         } else {
9657                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9658                                        (phba->cfg_XLanePriority << 1));
9659                         }
9660                 }
9661                 /* Note, word 10 is already initialized to 0 */
9662
9663                 if (phba->fcp_embed_io) {
9664                         struct lpfc_io_buf *lpfc_cmd;
9665                         struct sli4_sge *sgl;
9666                         struct fcp_cmnd *fcp_cmnd;
9667                         uint32_t *ptr;
9668
9669                         /* 128 byte wqe support here */
9670
9671                         lpfc_cmd = iocbq->context1;
9672                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9673                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9674
9675                         /* Word 0-2 - FCP_CMND */
9676                         wqe->generic.bde.tus.f.bdeFlags =
9677                                 BUFF_TYPE_BDE_IMMED;
9678                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9679                         wqe->generic.bde.addrHigh = 0;
9680                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9681
9682                         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9683                         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9684
9685                         /* Word 22-29  FCP CMND Payload */
9686                         ptr = &wqe->words[22];
9687                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9688                 }
9689                 break;
9690         case CMD_GEN_REQUEST64_CR:
9691                 /* For this command calculate the xmit length of the
9692                  * request bde.
9693                  */
9694                 xmit_len = 0;
9695                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9696                         sizeof(struct ulp_bde64);
9697                 for (i = 0; i < numBdes; i++) {
9698                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9699                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9700                                 break;
9701                         xmit_len += bde.tus.f.bdeSize;
9702                 }
9703                 /* word3 iocb=IO_TAG wqe=request_payload_len */
9704                 wqe->gen_req.request_payload_len = xmit_len;
9705                 /* word4 iocb=parameter wqe=relative_offset memcpy */
9706                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9707                 /* word6 context tag copied in memcpy */
9708                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
9709                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9710                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9711                                 "2015 Invalid CT %x command 0x%x\n",
9712                                 ct, iocbq->iocb.ulpCommand);
9713                         return IOCB_ERROR;
9714                 }
9715                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9716                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9717                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9718                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9719                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9720                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9721                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9722                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9723                 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9724                 command_type = OTHER_COMMAND;
9725                 break;
9726         case CMD_XMIT_ELS_RSP64_CX:
9727                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9728                 /* words0-2 BDE memcpy */
9729                 /* word3 iocb=iotag32 wqe=response_payload_len */
9730                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9731                 /* word4 */
9732                 wqe->xmit_els_rsp.word4 = 0;
9733                 /* word5 iocb=rsvd wge=did */
9734                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9735                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
9736
9737                 if_type = bf_get(lpfc_sli_intf_if_type,
9738                                         &phba->sli4_hba.sli_intf);
9739                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9740                         if (iocbq->vport->fc_flag & FC_PT2PT) {
9741                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9742                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9743                                         iocbq->vport->fc_myDID);
9744                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
9745                                         bf_set(wqe_els_did,
9746                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
9747                                 }
9748                         }
9749                 }
9750                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9751                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9752                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9753                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9754                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9755                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9756                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9757                                phba->vpi_ids[iocbq->vport->vpi]);
9758                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9759                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9760                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9761                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9762                        LPFC_WQE_LENLOC_WORD3);
9763                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9764                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9765                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9766                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9767                                         iocbq->context2)->virt);
9768                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9769                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9770                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9771                                         iocbq->vport->fc_myDID);
9772                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9773                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9774                                         phba->vpi_ids[phba->pport->vpi]);
9775                 }
9776                 command_type = OTHER_COMMAND;
9777                 break;
9778         case CMD_CLOSE_XRI_CN:
9779         case CMD_ABORT_XRI_CN:
9780         case CMD_ABORT_XRI_CX:
9781                 /* words 0-2 memcpy should be 0 rserved */
9782                 /* port will send abts */
9783                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9784                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9785                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9786                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9787                 } else
9788                         fip = 0;
9789
9790                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9791                         /*
9792                          * The link is down, or the command was ELS_FIP
9793                          * so the fw does not need to send abts
9794                          * on the wire.
9795                          */
9796                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9797                 else
9798                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9799                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9800                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9801                 wqe->abort_cmd.rsrvd5 = 0;
9802                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9803                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9804                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9805                 /*
9806                  * The abort handler will send us CMD_ABORT_XRI_CN or
9807                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9808                  */
9809                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9810                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9811                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9812                        LPFC_WQE_LENLOC_NONE);
9813                 cmnd = CMD_ABORT_XRI_CX;
9814                 command_type = OTHER_COMMAND;
9815                 xritag = 0;
9816                 break;
9817         case CMD_XMIT_BLS_RSP64_CX:
9818                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9819                 /* As BLS ABTS RSP WQE is very different from other WQEs,
9820                  * we re-construct this WQE here based on information in
9821                  * iocbq from scratch.
9822                  */
9823                 memset(wqe, 0, sizeof(union lpfc_wqe));
9824                 /* OX_ID is invariable to who sent ABTS to CT exchange */
9825                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9826                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9827                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9828                     LPFC_ABTS_UNSOL_INT) {
9829                         /* ABTS sent by initiator to CT exchange, the
9830                          * RX_ID field will be filled with the newly
9831                          * allocated responder XRI.
9832                          */
9833                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9834                                iocbq->sli4_xritag);
9835                 } else {
9836                         /* ABTS sent by responder to CT exchange, the
9837                          * RX_ID field will be filled with the responder
9838                          * RX_ID from ABTS.
9839                          */
9840                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9841                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9842                 }
9843                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9844                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9845
9846                 /* Use CT=VPI */
9847                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9848                         ndlp->nlp_DID);
9849                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9850                         iocbq->iocb.ulpContext);
9851                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9852                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9853                         phba->vpi_ids[phba->pport->vpi]);
9854                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9855                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9856                        LPFC_WQE_LENLOC_NONE);
9857                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9858                 command_type = OTHER_COMMAND;
9859                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9860                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9861                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9862                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9863                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9864                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9865                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9866                 }
9867
9868                 break;
9869         case CMD_SEND_FRAME:
9870                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9871                 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9872                 return 0;
9873         case CMD_XRI_ABORTED_CX:
9874         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9875         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9876         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9877         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9878         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9879         default:
9880                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9881                                 "2014 Invalid command 0x%x\n",
9882                                 iocbq->iocb.ulpCommand);
9883                 return IOCB_ERROR;
9884                 break;
9885         }
9886
9887         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9888                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9889         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9890                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9891         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9892                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9893         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9894                               LPFC_IO_DIF_INSERT);
9895         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9896         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9897         wqe->generic.wqe_com.abort_tag = abort_tag;
9898         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9899         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9900         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9901         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9902         return 0;
9903 }
9904
9905 /**
9906  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9907  * @phba: Pointer to HBA context object.
9908  * @ring_number: SLI ring number to issue iocb on.
9909  * @piocb: Pointer to command iocb.
9910  * @flag: Flag indicating if this command can be put into txq.
9911  *
9912  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9913  * an iocb command to an HBA with SLI-4 interface spec.
9914  *
9915  * This function is called with hbalock held. The function will return success
9916  * after it successfully submit the iocb to firmware or after adding to the
9917  * txq.
9918  **/
9919 static int
9920 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9921                          struct lpfc_iocbq *piocb, uint32_t flag)
9922 {
9923         struct lpfc_sglq *sglq;
9924         union lpfc_wqe128 wqe;
9925         struct lpfc_queue *wq;
9926         struct lpfc_sli_ring *pring;
9927
9928         /* Get the WQ */
9929         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9930             (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9931                 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
9932         } else {
9933                 wq = phba->sli4_hba.els_wq;
9934         }
9935
9936         /* Get corresponding ring */
9937         pring = wq->pring;
9938
9939         /*
9940          * The WQE can be either 64 or 128 bytes,
9941          */
9942
9943         lockdep_assert_held(&pring->ring_lock);
9944
9945         if (piocb->sli4_xritag == NO_XRI) {
9946                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9947                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9948                         sglq = NULL;
9949                 else {
9950                         if (!list_empty(&pring->txq)) {
9951                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
9952                                         __lpfc_sli_ringtx_put(phba,
9953                                                 pring, piocb);
9954                                         return IOCB_SUCCESS;
9955                                 } else {
9956                                         return IOCB_BUSY;
9957                                 }
9958                         } else {
9959                                 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9960                                 if (!sglq) {
9961                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
9962                                                 __lpfc_sli_ringtx_put(phba,
9963                                                                 pring,
9964                                                                 piocb);
9965                                                 return IOCB_SUCCESS;
9966                                         } else
9967                                                 return IOCB_BUSY;
9968                                 }
9969                         }
9970                 }
9971         } else if (piocb->iocb_flag &  LPFC_IO_FCP)
9972                 /* These IO's already have an XRI and a mapped sgl. */
9973                 sglq = NULL;
9974         else {
9975                 /*
9976                  * This is a continuation of a commandi,(CX) so this
9977                  * sglq is on the active list
9978                  */
9979                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9980                 if (!sglq)
9981                         return IOCB_ERROR;
9982         }
9983
9984         if (sglq) {
9985                 piocb->sli4_lxritag = sglq->sli4_lxritag;
9986                 piocb->sli4_xritag = sglq->sli4_xritag;
9987                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9988                         return IOCB_ERROR;
9989         }
9990
9991         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9992                 return IOCB_ERROR;
9993
9994         if (lpfc_sli4_wq_put(wq, &wqe))
9995                 return IOCB_ERROR;
9996         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9997
9998         return 0;
9999 }
10000
10001 /**
10002  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10003  *
10004  * This routine wraps the actual lockless version for issusing IOCB function
10005  * pointer from the lpfc_hba struct.
10006  *
10007  * Return codes:
10008  * IOCB_ERROR - Error
10009  * IOCB_SUCCESS - Success
10010  * IOCB_BUSY - Busy
10011  **/
10012 int
10013 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10014                 struct lpfc_iocbq *piocb, uint32_t flag)
10015 {
10016         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10017 }
10018
10019 /**
10020  * lpfc_sli_api_table_setup - Set up sli api function jump table
10021  * @phba: The hba struct for which this call is being executed.
10022  * @dev_grp: The HBA PCI-Device group number.
10023  *
10024  * This routine sets up the SLI interface API function jump table in @phba
10025  * struct.
10026  * Returns: 0 - success, -ENODEV - failure.
10027  **/
10028 int
10029 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10030 {
10031
10032         switch (dev_grp) {
10033         case LPFC_PCI_DEV_LP:
10034                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10035                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10036                 break;
10037         case LPFC_PCI_DEV_OC:
10038                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10039                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10040                 break;
10041         default:
10042                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10043                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
10044                                 dev_grp);
10045                 return -ENODEV;
10046                 break;
10047         }
10048         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10049         return 0;
10050 }
10051
10052 /**
10053  * lpfc_sli4_calc_ring - Calculates which ring to use
10054  * @phba: Pointer to HBA context object.
10055  * @piocb: Pointer to command iocb.
10056  *
10057  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10058  * hba_wqidx, thus we need to calculate the corresponding ring.
10059  * Since ABORTS must go on the same WQ of the command they are
10060  * aborting, we use command's hba_wqidx.
10061  */
10062 struct lpfc_sli_ring *
10063 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10064 {
10065         struct lpfc_io_buf *lpfc_cmd;
10066
10067         if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10068                 if (unlikely(!phba->sli4_hba.hdwq))
10069                         return NULL;
10070                 /*
10071                  * for abort iocb hba_wqidx should already
10072                  * be setup based on what work queue we used.
10073                  */
10074                 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10075                         lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10076                         piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10077                 }
10078                 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
10079         } else {
10080                 if (unlikely(!phba->sli4_hba.els_wq))
10081                         return NULL;
10082                 piocb->hba_wqidx = 0;
10083                 return phba->sli4_hba.els_wq->pring;
10084         }
10085 }
10086
10087 /**
10088  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10089  * @phba: Pointer to HBA context object.
10090  * @pring: Pointer to driver SLI ring object.
10091  * @piocb: Pointer to command iocb.
10092  * @flag: Flag indicating if this command can be put into txq.
10093  *
10094  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10095  * function. This function gets the hbalock and calls
10096  * __lpfc_sli_issue_iocb function and will return the error returned
10097  * by __lpfc_sli_issue_iocb function. This wrapper is used by
10098  * functions which do not hold hbalock.
10099  **/
10100 int
10101 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10102                     struct lpfc_iocbq *piocb, uint32_t flag)
10103 {
10104         struct lpfc_sli_ring *pring;
10105         unsigned long iflags;
10106         int rc;
10107
10108         if (phba->sli_rev == LPFC_SLI_REV4) {
10109                 pring = lpfc_sli4_calc_ring(phba, piocb);
10110                 if (unlikely(pring == NULL))
10111                         return IOCB_ERROR;
10112
10113                 spin_lock_irqsave(&pring->ring_lock, iflags);
10114                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10115                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10116         } else {
10117                 /* For now, SLI2/3 will still use hbalock */
10118                 spin_lock_irqsave(&phba->hbalock, iflags);
10119                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10120                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10121         }
10122         return rc;
10123 }
10124
10125 /**
10126  * lpfc_extra_ring_setup - Extra ring setup function
10127  * @phba: Pointer to HBA context object.
10128  *
10129  * This function is called while driver attaches with the
10130  * HBA to setup the extra ring. The extra ring is used
10131  * only when driver needs to support target mode functionality
10132  * or IP over FC functionalities.
10133  *
10134  * This function is called with no lock held. SLI3 only.
10135  **/
10136 static int
10137 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10138 {
10139         struct lpfc_sli *psli;
10140         struct lpfc_sli_ring *pring;
10141
10142         psli = &phba->sli;
10143
10144         /* Adjust cmd/rsp ring iocb entries more evenly */
10145
10146         /* Take some away from the FCP ring */
10147         pring = &psli->sli3_ring[LPFC_FCP_RING];
10148         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10149         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10150         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10151         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10152
10153         /* and give them to the extra ring */
10154         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10155
10156         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10157         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10158         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10159         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10160
10161         /* Setup default profile for this ring */
10162         pring->iotag_max = 4096;
10163         pring->num_mask = 1;
10164         pring->prt[0].profile = 0;      /* Mask 0 */
10165         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10166         pring->prt[0].type = phba->cfg_multi_ring_type;
10167         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10168         return 0;
10169 }
10170
10171 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10172  * @phba: Pointer to HBA context object.
10173  * @iocbq: Pointer to iocb object.
10174  *
10175  * The async_event handler calls this routine when it receives
10176  * an ASYNC_STATUS_CN event from the port.  The port generates
10177  * this event when an Abort Sequence request to an rport fails
10178  * twice in succession.  The abort could be originated by the
10179  * driver or by the port.  The ABTS could have been for an ELS
10180  * or FCP IO.  The port only generates this event when an ABTS
10181  * fails to complete after one retry.
10182  */
10183 static void
10184 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10185                           struct lpfc_iocbq *iocbq)
10186 {
10187         struct lpfc_nodelist *ndlp = NULL;
10188         uint16_t rpi = 0, vpi = 0;
10189         struct lpfc_vport *vport = NULL;
10190
10191         /* The rpi in the ulpContext is vport-sensitive. */
10192         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10193         rpi = iocbq->iocb.ulpContext;
10194
10195         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10196                         "3092 Port generated ABTS async event "
10197                         "on vpi %d rpi %d status 0x%x\n",
10198                         vpi, rpi, iocbq->iocb.ulpStatus);
10199
10200         vport = lpfc_find_vport_by_vpid(phba, vpi);
10201         if (!vport)
10202                 goto err_exit;
10203         ndlp = lpfc_findnode_rpi(vport, rpi);
10204         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10205                 goto err_exit;
10206
10207         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10208                 lpfc_sli_abts_recover_port(vport, ndlp);
10209         return;
10210
10211  err_exit:
10212         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10213                         "3095 Event Context not found, no "
10214                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10215                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10216                         vpi, rpi);
10217 }
10218
10219 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10220  * @phba: pointer to HBA context object.
10221  * @ndlp: nodelist pointer for the impacted rport.
10222  * @axri: pointer to the wcqe containing the failed exchange.
10223  *
10224  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10225  * port.  The port generates this event when an abort exchange request to an
10226  * rport fails twice in succession with no reply.  The abort could be originated
10227  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
10228  */
10229 void
10230 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10231                            struct lpfc_nodelist *ndlp,
10232                            struct sli4_wcqe_xri_aborted *axri)
10233 {
10234         struct lpfc_vport *vport;
10235         uint32_t ext_status = 0;
10236
10237         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10238                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10239                                 "3115 Node Context not found, driver "
10240                                 "ignoring abts err event\n");
10241                 return;
10242         }
10243
10244         vport = ndlp->vport;
10245         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10246                         "3116 Port generated FCP XRI ABORT event on "
10247                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10248                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10249                         bf_get(lpfc_wcqe_xa_xri, axri),
10250                         bf_get(lpfc_wcqe_xa_status, axri),
10251                         axri->parameter);
10252
10253         /*
10254          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
10255          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10256          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10257          */
10258         ext_status = axri->parameter & IOERR_PARAM_MASK;
10259         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10260             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10261                 lpfc_sli_abts_recover_port(vport, ndlp);
10262 }
10263
10264 /**
10265  * lpfc_sli_async_event_handler - ASYNC iocb handler function
10266  * @phba: Pointer to HBA context object.
10267  * @pring: Pointer to driver SLI ring object.
10268  * @iocbq: Pointer to iocb object.
10269  *
10270  * This function is called by the slow ring event handler
10271  * function when there is an ASYNC event iocb in the ring.
10272  * This function is called with no lock held.
10273  * Currently this function handles only temperature related
10274  * ASYNC events. The function decodes the temperature sensor
10275  * event message and posts events for the management applications.
10276  **/
10277 static void
10278 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10279         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10280 {
10281         IOCB_t *icmd;
10282         uint16_t evt_code;
10283         struct temp_event temp_event_data;
10284         struct Scsi_Host *shost;
10285         uint32_t *iocb_w;
10286
10287         icmd = &iocbq->iocb;
10288         evt_code = icmd->un.asyncstat.evt_code;
10289
10290         switch (evt_code) {
10291         case ASYNC_TEMP_WARN:
10292         case ASYNC_TEMP_SAFE:
10293                 temp_event_data.data = (uint32_t) icmd->ulpContext;
10294                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10295                 if (evt_code == ASYNC_TEMP_WARN) {
10296                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10297                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10298                                 "0347 Adapter is very hot, please take "
10299                                 "corrective action. temperature : %d Celsius\n",
10300                                 (uint32_t) icmd->ulpContext);
10301                 } else {
10302                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
10303                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10304                                 "0340 Adapter temperature is OK now. "
10305                                 "temperature : %d Celsius\n",
10306                                 (uint32_t) icmd->ulpContext);
10307                 }
10308
10309                 /* Send temperature change event to applications */
10310                 shost = lpfc_shost_from_vport(phba->pport);
10311                 fc_host_post_vendor_event(shost, fc_get_event_number(),
10312                         sizeof(temp_event_data), (char *) &temp_event_data,
10313                         LPFC_NL_VENDOR_ID);
10314                 break;
10315         case ASYNC_STATUS_CN:
10316                 lpfc_sli_abts_err_handler(phba, iocbq);
10317                 break;
10318         default:
10319                 iocb_w = (uint32_t *) icmd;
10320                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10321                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
10322                         " evt_code 0x%x\n"
10323                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
10324                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
10325                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
10326                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10327                         pring->ringno, icmd->un.asyncstat.evt_code,
10328                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10329                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10330                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10331                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10332
10333                 break;
10334         }
10335 }
10336
10337
10338 /**
10339  * lpfc_sli4_setup - SLI ring setup function
10340  * @phba: Pointer to HBA context object.
10341  *
10342  * lpfc_sli_setup sets up rings of the SLI interface with
10343  * number of iocbs per ring and iotags. This function is
10344  * called while driver attach to the HBA and before the
10345  * interrupts are enabled. So there is no need for locking.
10346  *
10347  * This function always returns 0.
10348  **/
10349 int
10350 lpfc_sli4_setup(struct lpfc_hba *phba)
10351 {
10352         struct lpfc_sli_ring *pring;
10353
10354         pring = phba->sli4_hba.els_wq->pring;
10355         pring->num_mask = LPFC_MAX_RING_MASK;
10356         pring->prt[0].profile = 0;      /* Mask 0 */
10357         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10358         pring->prt[0].type = FC_TYPE_ELS;
10359         pring->prt[0].lpfc_sli_rcv_unsol_event =
10360             lpfc_els_unsol_event;
10361         pring->prt[1].profile = 0;      /* Mask 1 */
10362         pring->prt[1].rctl = FC_RCTL_ELS_REP;
10363         pring->prt[1].type = FC_TYPE_ELS;
10364         pring->prt[1].lpfc_sli_rcv_unsol_event =
10365             lpfc_els_unsol_event;
10366         pring->prt[2].profile = 0;      /* Mask 2 */
10367         /* NameServer Inquiry */
10368         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10369         /* NameServer */
10370         pring->prt[2].type = FC_TYPE_CT;
10371         pring->prt[2].lpfc_sli_rcv_unsol_event =
10372             lpfc_ct_unsol_event;
10373         pring->prt[3].profile = 0;      /* Mask 3 */
10374         /* NameServer response */
10375         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10376         /* NameServer */
10377         pring->prt[3].type = FC_TYPE_CT;
10378         pring->prt[3].lpfc_sli_rcv_unsol_event =
10379             lpfc_ct_unsol_event;
10380         return 0;
10381 }
10382
10383 /**
10384  * lpfc_sli_setup - SLI ring setup function
10385  * @phba: Pointer to HBA context object.
10386  *
10387  * lpfc_sli_setup sets up rings of the SLI interface with
10388  * number of iocbs per ring and iotags. This function is
10389  * called while driver attach to the HBA and before the
10390  * interrupts are enabled. So there is no need for locking.
10391  *
10392  * This function always returns 0. SLI3 only.
10393  **/
10394 int
10395 lpfc_sli_setup(struct lpfc_hba *phba)
10396 {
10397         int i, totiocbsize = 0;
10398         struct lpfc_sli *psli = &phba->sli;
10399         struct lpfc_sli_ring *pring;
10400
10401         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10402         psli->sli_flag = 0;
10403
10404         psli->iocbq_lookup = NULL;
10405         psli->iocbq_lookup_len = 0;
10406         psli->last_iotag = 0;
10407
10408         for (i = 0; i < psli->num_rings; i++) {
10409                 pring = &psli->sli3_ring[i];
10410                 switch (i) {
10411                 case LPFC_FCP_RING:     /* ring 0 - FCP */
10412                         /* numCiocb and numRiocb are used in config_port */
10413                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10414                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10415                         pring->sli.sli3.numCiocb +=
10416                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10417                         pring->sli.sli3.numRiocb +=
10418                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10419                         pring->sli.sli3.numCiocb +=
10420                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10421                         pring->sli.sli3.numRiocb +=
10422                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10423                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10424                                                         SLI3_IOCB_CMD_SIZE :
10425                                                         SLI2_IOCB_CMD_SIZE;
10426                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10427                                                         SLI3_IOCB_RSP_SIZE :
10428                                                         SLI2_IOCB_RSP_SIZE;
10429                         pring->iotag_ctr = 0;
10430                         pring->iotag_max =
10431                             (phba->cfg_hba_queue_depth * 2);
10432                         pring->fast_iotag = pring->iotag_max;
10433                         pring->num_mask = 0;
10434                         break;
10435                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
10436                         /* numCiocb and numRiocb are used in config_port */
10437                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10438                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10439                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10440                                                         SLI3_IOCB_CMD_SIZE :
10441                                                         SLI2_IOCB_CMD_SIZE;
10442                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10443                                                         SLI3_IOCB_RSP_SIZE :
10444                                                         SLI2_IOCB_RSP_SIZE;
10445                         pring->iotag_max = phba->cfg_hba_queue_depth;
10446                         pring->num_mask = 0;
10447                         break;
10448                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
10449                         /* numCiocb and numRiocb are used in config_port */
10450                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10451                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10452                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10453                                                         SLI3_IOCB_CMD_SIZE :
10454                                                         SLI2_IOCB_CMD_SIZE;
10455                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10456                                                         SLI3_IOCB_RSP_SIZE :
10457                                                         SLI2_IOCB_RSP_SIZE;
10458                         pring->fast_iotag = 0;
10459                         pring->iotag_ctr = 0;
10460                         pring->iotag_max = 4096;
10461                         pring->lpfc_sli_rcv_async_status =
10462                                 lpfc_sli_async_event_handler;
10463                         pring->num_mask = LPFC_MAX_RING_MASK;
10464                         pring->prt[0].profile = 0;      /* Mask 0 */
10465                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10466                         pring->prt[0].type = FC_TYPE_ELS;
10467                         pring->prt[0].lpfc_sli_rcv_unsol_event =
10468                             lpfc_els_unsol_event;
10469                         pring->prt[1].profile = 0;      /* Mask 1 */
10470                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
10471                         pring->prt[1].type = FC_TYPE_ELS;
10472                         pring->prt[1].lpfc_sli_rcv_unsol_event =
10473                             lpfc_els_unsol_event;
10474                         pring->prt[2].profile = 0;      /* Mask 2 */
10475                         /* NameServer Inquiry */
10476                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10477                         /* NameServer */
10478                         pring->prt[2].type = FC_TYPE_CT;
10479                         pring->prt[2].lpfc_sli_rcv_unsol_event =
10480                             lpfc_ct_unsol_event;
10481                         pring->prt[3].profile = 0;      /* Mask 3 */
10482                         /* NameServer response */
10483                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10484                         /* NameServer */
10485                         pring->prt[3].type = FC_TYPE_CT;
10486                         pring->prt[3].lpfc_sli_rcv_unsol_event =
10487                             lpfc_ct_unsol_event;
10488                         break;
10489                 }
10490                 totiocbsize += (pring->sli.sli3.numCiocb *
10491                         pring->sli.sli3.sizeCiocb) +
10492                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10493         }
10494         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10495                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10496                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10497                        "SLI2 SLIM Data: x%x x%lx\n",
10498                        phba->brd_no, totiocbsize,
10499                        (unsigned long) MAX_SLIM_IOCB_SIZE);
10500         }
10501         if (phba->cfg_multi_ring_support == 2)
10502                 lpfc_extra_ring_setup(phba);
10503
10504         return 0;
10505 }
10506
10507 /**
10508  * lpfc_sli4_queue_init - Queue initialization function
10509  * @phba: Pointer to HBA context object.
10510  *
10511  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10512  * ring. This function also initializes ring indices of each ring.
10513  * This function is called during the initialization of the SLI
10514  * interface of an HBA.
10515  * This function is called with no lock held and always returns
10516  * 1.
10517  **/
10518 void
10519 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10520 {
10521         struct lpfc_sli *psli;
10522         struct lpfc_sli_ring *pring;
10523         int i;
10524
10525         psli = &phba->sli;
10526         spin_lock_irq(&phba->hbalock);
10527         INIT_LIST_HEAD(&psli->mboxq);
10528         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10529         /* Initialize list headers for txq and txcmplq as double linked lists */
10530         for (i = 0; i < phba->cfg_hdw_queue; i++) {
10531                 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
10532                 pring->flag = 0;
10533                 pring->ringno = LPFC_FCP_RING;
10534                 pring->txcmplq_cnt = 0;
10535                 INIT_LIST_HEAD(&pring->txq);
10536                 INIT_LIST_HEAD(&pring->txcmplq);
10537                 INIT_LIST_HEAD(&pring->iocb_continueq);
10538                 spin_lock_init(&pring->ring_lock);
10539         }
10540         pring = phba->sli4_hba.els_wq->pring;
10541         pring->flag = 0;
10542         pring->ringno = LPFC_ELS_RING;
10543         pring->txcmplq_cnt = 0;
10544         INIT_LIST_HEAD(&pring->txq);
10545         INIT_LIST_HEAD(&pring->txcmplq);
10546         INIT_LIST_HEAD(&pring->iocb_continueq);
10547         spin_lock_init(&pring->ring_lock);
10548
10549         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10550                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10551                         pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10552                         pring->flag = 0;
10553                         pring->ringno = LPFC_FCP_RING;
10554                         pring->txcmplq_cnt = 0;
10555                         INIT_LIST_HEAD(&pring->txq);
10556                         INIT_LIST_HEAD(&pring->txcmplq);
10557                         INIT_LIST_HEAD(&pring->iocb_continueq);
10558                         spin_lock_init(&pring->ring_lock);
10559                 }
10560                 pring = phba->sli4_hba.nvmels_wq->pring;
10561                 pring->flag = 0;
10562                 pring->ringno = LPFC_ELS_RING;
10563                 pring->txcmplq_cnt = 0;
10564                 INIT_LIST_HEAD(&pring->txq);
10565                 INIT_LIST_HEAD(&pring->txcmplq);
10566                 INIT_LIST_HEAD(&pring->iocb_continueq);
10567                 spin_lock_init(&pring->ring_lock);
10568         }
10569
10570         spin_unlock_irq(&phba->hbalock);
10571 }
10572
10573 /**
10574  * lpfc_sli_queue_init - Queue initialization function
10575  * @phba: Pointer to HBA context object.
10576  *
10577  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10578  * ring. This function also initializes ring indices of each ring.
10579  * This function is called during the initialization of the SLI
10580  * interface of an HBA.
10581  * This function is called with no lock held and always returns
10582  * 1.
10583  **/
10584 void
10585 lpfc_sli_queue_init(struct lpfc_hba *phba)
10586 {
10587         struct lpfc_sli *psli;
10588         struct lpfc_sli_ring *pring;
10589         int i;
10590
10591         psli = &phba->sli;
10592         spin_lock_irq(&phba->hbalock);
10593         INIT_LIST_HEAD(&psli->mboxq);
10594         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10595         /* Initialize list headers for txq and txcmplq as double linked lists */
10596         for (i = 0; i < psli->num_rings; i++) {
10597                 pring = &psli->sli3_ring[i];
10598                 pring->ringno = i;
10599                 pring->sli.sli3.next_cmdidx  = 0;
10600                 pring->sli.sli3.local_getidx = 0;
10601                 pring->sli.sli3.cmdidx = 0;
10602                 INIT_LIST_HEAD(&pring->iocb_continueq);
10603                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10604                 INIT_LIST_HEAD(&pring->postbufq);
10605                 pring->flag = 0;
10606                 INIT_LIST_HEAD(&pring->txq);
10607                 INIT_LIST_HEAD(&pring->txcmplq);
10608                 spin_lock_init(&pring->ring_lock);
10609         }
10610         spin_unlock_irq(&phba->hbalock);
10611 }
10612
10613 /**
10614  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10615  * @phba: Pointer to HBA context object.
10616  *
10617  * This routine flushes the mailbox command subsystem. It will unconditionally
10618  * flush all the mailbox commands in the three possible stages in the mailbox
10619  * command sub-system: pending mailbox command queue; the outstanding mailbox
10620  * command; and completed mailbox command queue. It is caller's responsibility
10621  * to make sure that the driver is in the proper state to flush the mailbox
10622  * command sub-system. Namely, the posting of mailbox commands into the
10623  * pending mailbox command queue from the various clients must be stopped;
10624  * either the HBA is in a state that it will never works on the outstanding
10625  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10626  * mailbox command has been completed.
10627  **/
10628 static void
10629 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10630 {
10631         LIST_HEAD(completions);
10632         struct lpfc_sli *psli = &phba->sli;
10633         LPFC_MBOXQ_t *pmb;
10634         unsigned long iflag;
10635
10636         /* Disable softirqs, including timers from obtaining phba->hbalock */
10637         local_bh_disable();
10638
10639         /* Flush all the mailbox commands in the mbox system */
10640         spin_lock_irqsave(&phba->hbalock, iflag);
10641
10642         /* The pending mailbox command queue */
10643         list_splice_init(&phba->sli.mboxq, &completions);
10644         /* The outstanding active mailbox command */
10645         if (psli->mbox_active) {
10646                 list_add_tail(&psli->mbox_active->list, &completions);
10647                 psli->mbox_active = NULL;
10648                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10649         }
10650         /* The completed mailbox command queue */
10651         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10652         spin_unlock_irqrestore(&phba->hbalock, iflag);
10653
10654         /* Enable softirqs again, done with phba->hbalock */
10655         local_bh_enable();
10656
10657         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10658         while (!list_empty(&completions)) {
10659                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10660                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10661                 if (pmb->mbox_cmpl)
10662                         pmb->mbox_cmpl(phba, pmb);
10663         }
10664 }
10665
10666 /**
10667  * lpfc_sli_host_down - Vport cleanup function
10668  * @vport: Pointer to virtual port object.
10669  *
10670  * lpfc_sli_host_down is called to clean up the resources
10671  * associated with a vport before destroying virtual
10672  * port data structures.
10673  * This function does following operations:
10674  * - Free discovery resources associated with this virtual
10675  *   port.
10676  * - Free iocbs associated with this virtual port in
10677  *   the txq.
10678  * - Send abort for all iocb commands associated with this
10679  *   vport in txcmplq.
10680  *
10681  * This function is called with no lock held and always returns 1.
10682  **/
10683 int
10684 lpfc_sli_host_down(struct lpfc_vport *vport)
10685 {
10686         LIST_HEAD(completions);
10687         struct lpfc_hba *phba = vport->phba;
10688         struct lpfc_sli *psli = &phba->sli;
10689         struct lpfc_queue *qp = NULL;
10690         struct lpfc_sli_ring *pring;
10691         struct lpfc_iocbq *iocb, *next_iocb;
10692         int i;
10693         unsigned long flags = 0;
10694         uint16_t prev_pring_flag;
10695
10696         lpfc_cleanup_discovery_resources(vport);
10697
10698         spin_lock_irqsave(&phba->hbalock, flags);
10699
10700         /*
10701          * Error everything on the txq since these iocbs
10702          * have not been given to the FW yet.
10703          * Also issue ABTS for everything on the txcmplq
10704          */
10705         if (phba->sli_rev != LPFC_SLI_REV4) {
10706                 for (i = 0; i < psli->num_rings; i++) {
10707                         pring = &psli->sli3_ring[i];
10708                         prev_pring_flag = pring->flag;
10709                         /* Only slow rings */
10710                         if (pring->ringno == LPFC_ELS_RING) {
10711                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10712                                 /* Set the lpfc data pending flag */
10713                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10714                         }
10715                         list_for_each_entry_safe(iocb, next_iocb,
10716                                                  &pring->txq, list) {
10717                                 if (iocb->vport != vport)
10718                                         continue;
10719                                 list_move_tail(&iocb->list, &completions);
10720                         }
10721                         list_for_each_entry_safe(iocb, next_iocb,
10722                                                  &pring->txcmplq, list) {
10723                                 if (iocb->vport != vport)
10724                                         continue;
10725                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10726                         }
10727                         pring->flag = prev_pring_flag;
10728                 }
10729         } else {
10730                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10731                         pring = qp->pring;
10732                         if (!pring)
10733                                 continue;
10734                         if (pring == phba->sli4_hba.els_wq->pring) {
10735                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10736                                 /* Set the lpfc data pending flag */
10737                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10738                         }
10739                         prev_pring_flag = pring->flag;
10740                         spin_lock_irq(&pring->ring_lock);
10741                         list_for_each_entry_safe(iocb, next_iocb,
10742                                                  &pring->txq, list) {
10743                                 if (iocb->vport != vport)
10744                                         continue;
10745                                 list_move_tail(&iocb->list, &completions);
10746                         }
10747                         spin_unlock_irq(&pring->ring_lock);
10748                         list_for_each_entry_safe(iocb, next_iocb,
10749                                                  &pring->txcmplq, list) {
10750                                 if (iocb->vport != vport)
10751                                         continue;
10752                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10753                         }
10754                         pring->flag = prev_pring_flag;
10755                 }
10756         }
10757         spin_unlock_irqrestore(&phba->hbalock, flags);
10758
10759         /* Cancel all the IOCBs from the completions list */
10760         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10761                               IOERR_SLI_DOWN);
10762         return 1;
10763 }
10764
10765 /**
10766  * lpfc_sli_hba_down - Resource cleanup function for the HBA
10767  * @phba: Pointer to HBA context object.
10768  *
10769  * This function cleans up all iocb, buffers, mailbox commands
10770  * while shutting down the HBA. This function is called with no
10771  * lock held and always returns 1.
10772  * This function does the following to cleanup driver resources:
10773  * - Free discovery resources for each virtual port
10774  * - Cleanup any pending fabric iocbs
10775  * - Iterate through the iocb txq and free each entry
10776  *   in the list.
10777  * - Free up any buffer posted to the HBA
10778  * - Free mailbox commands in the mailbox queue.
10779  **/
10780 int
10781 lpfc_sli_hba_down(struct lpfc_hba *phba)
10782 {
10783         LIST_HEAD(completions);
10784         struct lpfc_sli *psli = &phba->sli;
10785         struct lpfc_queue *qp = NULL;
10786         struct lpfc_sli_ring *pring;
10787         struct lpfc_dmabuf *buf_ptr;
10788         unsigned long flags = 0;
10789         int i;
10790
10791         /* Shutdown the mailbox command sub-system */
10792         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10793
10794         lpfc_hba_down_prep(phba);
10795
10796         /* Disable softirqs, including timers from obtaining phba->hbalock */
10797         local_bh_disable();
10798
10799         lpfc_fabric_abort_hba(phba);
10800
10801         spin_lock_irqsave(&phba->hbalock, flags);
10802
10803         /*
10804          * Error everything on the txq since these iocbs
10805          * have not been given to the FW yet.
10806          */
10807         if (phba->sli_rev != LPFC_SLI_REV4) {
10808                 for (i = 0; i < psli->num_rings; i++) {
10809                         pring = &psli->sli3_ring[i];
10810                         /* Only slow rings */
10811                         if (pring->ringno == LPFC_ELS_RING) {
10812                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10813                                 /* Set the lpfc data pending flag */
10814                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10815                         }
10816                         list_splice_init(&pring->txq, &completions);
10817                 }
10818         } else {
10819                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10820                         pring = qp->pring;
10821                         if (!pring)
10822                                 continue;
10823                         spin_lock_irq(&pring->ring_lock);
10824                         list_splice_init(&pring->txq, &completions);
10825                         spin_unlock_irq(&pring->ring_lock);
10826                         if (pring == phba->sli4_hba.els_wq->pring) {
10827                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10828                                 /* Set the lpfc data pending flag */
10829                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10830                         }
10831                 }
10832         }
10833         spin_unlock_irqrestore(&phba->hbalock, flags);
10834
10835         /* Cancel all the IOCBs from the completions list */
10836         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10837                               IOERR_SLI_DOWN);
10838
10839         spin_lock_irqsave(&phba->hbalock, flags);
10840         list_splice_init(&phba->elsbuf, &completions);
10841         phba->elsbuf_cnt = 0;
10842         phba->elsbuf_prev_cnt = 0;
10843         spin_unlock_irqrestore(&phba->hbalock, flags);
10844
10845         while (!list_empty(&completions)) {
10846                 list_remove_head(&completions, buf_ptr,
10847                         struct lpfc_dmabuf, list);
10848                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10849                 kfree(buf_ptr);
10850         }
10851
10852         /* Enable softirqs again, done with phba->hbalock */
10853         local_bh_enable();
10854
10855         /* Return any active mbox cmds */
10856         del_timer_sync(&psli->mbox_tmo);
10857
10858         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10859         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10860         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10861
10862         return 1;
10863 }
10864
10865 /**
10866  * lpfc_sli_pcimem_bcopy - SLI memory copy function
10867  * @srcp: Source memory pointer.
10868  * @destp: Destination memory pointer.
10869  * @cnt: Number of words required to be copied.
10870  *
10871  * This function is used for copying data between driver memory
10872  * and the SLI memory. This function also changes the endianness
10873  * of each word if native endianness is different from SLI
10874  * endianness. This function can be called with or without
10875  * lock.
10876  **/
10877 void
10878 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10879 {
10880         uint32_t *src = srcp;
10881         uint32_t *dest = destp;
10882         uint32_t ldata;
10883         int i;
10884
10885         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10886                 ldata = *src;
10887                 ldata = le32_to_cpu(ldata);
10888                 *dest = ldata;
10889                 src++;
10890                 dest++;
10891         }
10892 }
10893
10894
10895 /**
10896  * lpfc_sli_bemem_bcopy - SLI memory copy function
10897  * @srcp: Source memory pointer.
10898  * @destp: Destination memory pointer.
10899  * @cnt: Number of words required to be copied.
10900  *
10901  * This function is used for copying data between a data structure
10902  * with big endian representation to local endianness.
10903  * This function can be called with or without lock.
10904  **/
10905 void
10906 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10907 {
10908         uint32_t *src = srcp;
10909         uint32_t *dest = destp;
10910         uint32_t ldata;
10911         int i;
10912
10913         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10914                 ldata = *src;
10915                 ldata = be32_to_cpu(ldata);
10916                 *dest = ldata;
10917                 src++;
10918                 dest++;
10919         }
10920 }
10921
10922 /**
10923  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10924  * @phba: Pointer to HBA context object.
10925  * @pring: Pointer to driver SLI ring object.
10926  * @mp: Pointer to driver buffer object.
10927  *
10928  * This function is called with no lock held.
10929  * It always return zero after adding the buffer to the postbufq
10930  * buffer list.
10931  **/
10932 int
10933 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10934                          struct lpfc_dmabuf *mp)
10935 {
10936         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10937            later */
10938         spin_lock_irq(&phba->hbalock);
10939         list_add_tail(&mp->list, &pring->postbufq);
10940         pring->postbufq_cnt++;
10941         spin_unlock_irq(&phba->hbalock);
10942         return 0;
10943 }
10944
10945 /**
10946  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10947  * @phba: Pointer to HBA context object.
10948  *
10949  * When HBQ is enabled, buffers are searched based on tags. This function
10950  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10951  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10952  * does not conflict with tags of buffer posted for unsolicited events.
10953  * The function returns the allocated tag. The function is called with
10954  * no locks held.
10955  **/
10956 uint32_t
10957 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10958 {
10959         spin_lock_irq(&phba->hbalock);
10960         phba->buffer_tag_count++;
10961         /*
10962          * Always set the QUE_BUFTAG_BIT to distiguish between
10963          * a tag assigned by HBQ.
10964          */
10965         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10966         spin_unlock_irq(&phba->hbalock);
10967         return phba->buffer_tag_count;
10968 }
10969
10970 /**
10971  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10972  * @phba: Pointer to HBA context object.
10973  * @pring: Pointer to driver SLI ring object.
10974  * @tag: Buffer tag.
10975  *
10976  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10977  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10978  * iocb is posted to the response ring with the tag of the buffer.
10979  * This function searches the pring->postbufq list using the tag
10980  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10981  * iocb. If the buffer is found then lpfc_dmabuf object of the
10982  * buffer is returned to the caller else NULL is returned.
10983  * This function is called with no lock held.
10984  **/
10985 struct lpfc_dmabuf *
10986 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10987                         uint32_t tag)
10988 {
10989         struct lpfc_dmabuf *mp, *next_mp;
10990         struct list_head *slp = &pring->postbufq;
10991
10992         /* Search postbufq, from the beginning, looking for a match on tag */
10993         spin_lock_irq(&phba->hbalock);
10994         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10995                 if (mp->buffer_tag == tag) {
10996                         list_del_init(&mp->list);
10997                         pring->postbufq_cnt--;
10998                         spin_unlock_irq(&phba->hbalock);
10999                         return mp;
11000                 }
11001         }
11002
11003         spin_unlock_irq(&phba->hbalock);
11004         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11005                         "0402 Cannot find virtual addr for buffer tag on "
11006                         "ring %d Data x%lx x%p x%p x%x\n",
11007                         pring->ringno, (unsigned long) tag,
11008                         slp->next, slp->prev, pring->postbufq_cnt);
11009
11010         return NULL;
11011 }
11012
11013 /**
11014  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11015  * @phba: Pointer to HBA context object.
11016  * @pring: Pointer to driver SLI ring object.
11017  * @phys: DMA address of the buffer.
11018  *
11019  * This function searches the buffer list using the dma_address
11020  * of unsolicited event to find the driver's lpfc_dmabuf object
11021  * corresponding to the dma_address. The function returns the
11022  * lpfc_dmabuf object if a buffer is found else it returns NULL.
11023  * This function is called by the ct and els unsolicited event
11024  * handlers to get the buffer associated with the unsolicited
11025  * event.
11026  *
11027  * This function is called with no lock held.
11028  **/
11029 struct lpfc_dmabuf *
11030 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11031                          dma_addr_t phys)
11032 {
11033         struct lpfc_dmabuf *mp, *next_mp;
11034         struct list_head *slp = &pring->postbufq;
11035
11036         /* Search postbufq, from the beginning, looking for a match on phys */
11037         spin_lock_irq(&phba->hbalock);
11038         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11039                 if (mp->phys == phys) {
11040                         list_del_init(&mp->list);
11041                         pring->postbufq_cnt--;
11042                         spin_unlock_irq(&phba->hbalock);
11043                         return mp;
11044                 }
11045         }
11046
11047         spin_unlock_irq(&phba->hbalock);
11048         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11049                         "0410 Cannot find virtual addr for mapped buf on "
11050                         "ring %d Data x%llx x%p x%p x%x\n",
11051                         pring->ringno, (unsigned long long)phys,
11052                         slp->next, slp->prev, pring->postbufq_cnt);
11053         return NULL;
11054 }
11055
11056 /**
11057  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11058  * @phba: Pointer to HBA context object.
11059  * @cmdiocb: Pointer to driver command iocb object.
11060  * @rspiocb: Pointer to driver response iocb object.
11061  *
11062  * This function is the completion handler for the abort iocbs for
11063  * ELS commands. This function is called from the ELS ring event
11064  * handler with no lock held. This function frees memory resources
11065  * associated with the abort iocb.
11066  **/
11067 static void
11068 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11069                         struct lpfc_iocbq *rspiocb)
11070 {
11071         IOCB_t *irsp = &rspiocb->iocb;
11072         uint16_t abort_iotag, abort_context;
11073         struct lpfc_iocbq *abort_iocb = NULL;
11074
11075         if (irsp->ulpStatus) {
11076
11077                 /*
11078                  * Assume that the port already completed and returned, or
11079                  * will return the iocb. Just Log the message.
11080                  */
11081                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11082                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11083
11084                 spin_lock_irq(&phba->hbalock);
11085                 if (phba->sli_rev < LPFC_SLI_REV4) {
11086                         if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11087                             irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11088                             irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11089                                 spin_unlock_irq(&phba->hbalock);
11090                                 goto release_iocb;
11091                         }
11092                         if (abort_iotag != 0 &&
11093                                 abort_iotag <= phba->sli.last_iotag)
11094                                 abort_iocb =
11095                                         phba->sli.iocbq_lookup[abort_iotag];
11096                 } else
11097                         /* For sli4 the abort_tag is the XRI,
11098                          * so the abort routine puts the iotag  of the iocb
11099                          * being aborted in the context field of the abort
11100                          * IOCB.
11101                          */
11102                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
11103
11104                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11105                                 "0327 Cannot abort els iocb %p "
11106                                 "with tag %x context %x, abort status %x, "
11107                                 "abort code %x\n",
11108                                 abort_iocb, abort_iotag, abort_context,
11109                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
11110
11111                 spin_unlock_irq(&phba->hbalock);
11112         }
11113 release_iocb:
11114         lpfc_sli_release_iocbq(phba, cmdiocb);
11115         return;
11116 }
11117
11118 /**
11119  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11120  * @phba: Pointer to HBA context object.
11121  * @cmdiocb: Pointer to driver command iocb object.
11122  * @rspiocb: Pointer to driver response iocb object.
11123  *
11124  * The function is called from SLI ring event handler with no
11125  * lock held. This function is the completion handler for ELS commands
11126  * which are aborted. The function frees memory resources used for
11127  * the aborted ELS commands.
11128  **/
11129 static void
11130 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11131                      struct lpfc_iocbq *rspiocb)
11132 {
11133         IOCB_t *irsp = &rspiocb->iocb;
11134
11135         /* ELS cmd tag <ulpIoTag> completes */
11136         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11137                         "0139 Ignoring ELS cmd tag x%x completion Data: "
11138                         "x%x x%x x%x\n",
11139                         irsp->ulpIoTag, irsp->ulpStatus,
11140                         irsp->un.ulpWord[4], irsp->ulpTimeout);
11141         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11142                 lpfc_ct_free_iocb(phba, cmdiocb);
11143         else
11144                 lpfc_els_free_iocb(phba, cmdiocb);
11145         return;
11146 }
11147
11148 /**
11149  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11150  * @phba: Pointer to HBA context object.
11151  * @pring: Pointer to driver SLI ring object.
11152  * @cmdiocb: Pointer to driver command iocb object.
11153  *
11154  * This function issues an abort iocb for the provided command iocb down to
11155  * the port. Other than the case the outstanding command iocb is an abort
11156  * request, this function issues abort out unconditionally. This function is
11157  * called with hbalock held. The function returns 0 when it fails due to
11158  * memory allocation failure or when the command iocb is an abort request.
11159  **/
11160 static int
11161 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11162                            struct lpfc_iocbq *cmdiocb)
11163 {
11164         struct lpfc_vport *vport = cmdiocb->vport;
11165         struct lpfc_iocbq *abtsiocbp;
11166         IOCB_t *icmd = NULL;
11167         IOCB_t *iabt = NULL;
11168         int retval;
11169         unsigned long iflags;
11170         struct lpfc_nodelist *ndlp;
11171
11172         lockdep_assert_held(&phba->hbalock);
11173
11174         /*
11175          * There are certain command types we don't want to abort.  And we
11176          * don't want to abort commands that are already in the process of
11177          * being aborted.
11178          */
11179         icmd = &cmdiocb->iocb;
11180         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11181             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11182             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11183                 return 0;
11184
11185         /* issue ABTS for this IOCB based on iotag */
11186         abtsiocbp = __lpfc_sli_get_iocbq(phba);
11187         if (abtsiocbp == NULL)
11188                 return 0;
11189
11190         /* This signals the response to set the correct status
11191          * before calling the completion handler
11192          */
11193         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11194
11195         iabt = &abtsiocbp->iocb;
11196         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11197         iabt->un.acxri.abortContextTag = icmd->ulpContext;
11198         if (phba->sli_rev == LPFC_SLI_REV4) {
11199                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11200                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11201         } else {
11202                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11203                 if (pring->ringno == LPFC_ELS_RING) {
11204                         ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11205                         iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11206                 }
11207         }
11208         iabt->ulpLe = 1;
11209         iabt->ulpClass = icmd->ulpClass;
11210
11211         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11212         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11213         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11214                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11215         if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11216                 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11217
11218         if (phba->link_state >= LPFC_LINK_UP)
11219                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11220         else
11221                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11222
11223         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11224         abtsiocbp->vport = vport;
11225
11226         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11227                          "0339 Abort xri x%x, original iotag x%x, "
11228                          "abort cmd iotag x%x\n",
11229                          iabt->un.acxri.abortIoTag,
11230                          iabt->un.acxri.abortContextTag,
11231                          abtsiocbp->iotag);
11232
11233         if (phba->sli_rev == LPFC_SLI_REV4) {
11234                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11235                 if (unlikely(pring == NULL))
11236                         return 0;
11237                 /* Note: both hbalock and ring_lock need to be set here */
11238                 spin_lock_irqsave(&pring->ring_lock, iflags);
11239                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11240                         abtsiocbp, 0);
11241                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11242         } else {
11243                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11244                         abtsiocbp, 0);
11245         }
11246
11247         if (retval)
11248                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11249
11250         /*
11251          * Caller to this routine should check for IOCB_ERROR
11252          * and handle it properly.  This routine no longer removes
11253          * iocb off txcmplq and call compl in case of IOCB_ERROR.
11254          */
11255         return retval;
11256 }
11257
11258 /**
11259  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11260  * @phba: Pointer to HBA context object.
11261  * @pring: Pointer to driver SLI ring object.
11262  * @cmdiocb: Pointer to driver command iocb object.
11263  *
11264  * This function issues an abort iocb for the provided command iocb. In case
11265  * of unloading, the abort iocb will not be issued to commands on the ELS
11266  * ring. Instead, the callback function shall be changed to those commands
11267  * so that nothing happens when them finishes. This function is called with
11268  * hbalock held. The function returns 0 when the command iocb is an abort
11269  * request.
11270  **/
11271 int
11272 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11273                            struct lpfc_iocbq *cmdiocb)
11274 {
11275         struct lpfc_vport *vport = cmdiocb->vport;
11276         int retval = IOCB_ERROR;
11277         IOCB_t *icmd = NULL;
11278
11279         lockdep_assert_held(&phba->hbalock);
11280
11281         /*
11282          * There are certain command types we don't want to abort.  And we
11283          * don't want to abort commands that are already in the process of
11284          * being aborted.
11285          */
11286         icmd = &cmdiocb->iocb;
11287         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11288             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11289             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11290                 return 0;
11291
11292         if (!pring) {
11293                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11294                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11295                 else
11296                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11297                 goto abort_iotag_exit;
11298         }
11299
11300         /*
11301          * If we're unloading, don't abort iocb on the ELS ring, but change
11302          * the callback so that nothing happens when it finishes.
11303          */
11304         if ((vport->load_flag & FC_UNLOADING) &&
11305             (pring->ringno == LPFC_ELS_RING)) {
11306                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11307                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11308                 else
11309                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11310                 goto abort_iotag_exit;
11311         }
11312
11313         /* Now, we try to issue the abort to the cmdiocb out */
11314         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11315
11316 abort_iotag_exit:
11317         /*
11318          * Caller to this routine should check for IOCB_ERROR
11319          * and handle it properly.  This routine no longer removes
11320          * iocb off txcmplq and call compl in case of IOCB_ERROR.
11321          */
11322         return retval;
11323 }
11324
11325 /**
11326  * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11327  * @phba: Pointer to HBA context object.
11328  * @pring: Pointer to driver SLI ring object.
11329  * @cmdiocb: Pointer to driver command iocb object.
11330  *
11331  * This function issues an abort iocb for the provided command iocb down to
11332  * the port. Other than the case the outstanding command iocb is an abort
11333  * request, this function issues abort out unconditionally. This function is
11334  * called with hbalock held. The function returns 0 when it fails due to
11335  * memory allocation failure or when the command iocb is an abort request.
11336  **/
11337 static int
11338 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11339                         struct lpfc_iocbq *cmdiocb)
11340 {
11341         struct lpfc_vport *vport = cmdiocb->vport;
11342         struct lpfc_iocbq *abtsiocbp;
11343         union lpfc_wqe128 *abts_wqe;
11344         int retval;
11345         int idx = cmdiocb->hba_wqidx;
11346
11347         /*
11348          * There are certain command types we don't want to abort.  And we
11349          * don't want to abort commands that are already in the process of
11350          * being aborted.
11351          */
11352         if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11353             cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11354             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11355                 return 0;
11356
11357         /* issue ABTS for this io based on iotag */
11358         abtsiocbp = __lpfc_sli_get_iocbq(phba);
11359         if (abtsiocbp == NULL)
11360                 return 0;
11361
11362         /* This signals the response to set the correct status
11363          * before calling the completion handler
11364          */
11365         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11366
11367         /* Complete prepping the abort wqe and issue to the FW. */
11368         abts_wqe = &abtsiocbp->wqe;
11369
11370         /* Clear any stale WQE contents */
11371         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11372         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11373
11374         /* word 7 */
11375         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11376         bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11377                cmdiocb->iocb.ulpClass);
11378
11379         /* word 8 - tell the FW to abort the IO associated with this
11380          * outstanding exchange ID.
11381          */
11382         abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11383
11384         /* word 9 - this is the iotag for the abts_wqe completion. */
11385         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11386                abtsiocbp->iotag);
11387
11388         /* word 10 */
11389         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11390         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11391
11392         /* word 11 */
11393         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11394         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11395         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11396
11397         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11398         abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11399         abtsiocbp->vport = vport;
11400         abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
11401         retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
11402                                      abtsiocbp);
11403         if (retval) {
11404                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11405                                  "6147 Failed abts issue_wqe with status x%x "
11406                                  "for oxid x%x\n",
11407                                  retval, cmdiocb->sli4_xritag);
11408                 lpfc_sli_release_iocbq(phba, abtsiocbp);
11409                 return retval;
11410         }
11411
11412         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11413                          "6148 Drv Abort NVME Request Issued for "
11414                          "ox_id x%x on reqtag x%x\n",
11415                          cmdiocb->sli4_xritag,
11416                          abtsiocbp->iotag);
11417
11418         return retval;
11419 }
11420
11421 /**
11422  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11423  * @phba: pointer to lpfc HBA data structure.
11424  *
11425  * This routine will abort all pending and outstanding iocbs to an HBA.
11426  **/
11427 void
11428 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11429 {
11430         struct lpfc_sli *psli = &phba->sli;
11431         struct lpfc_sli_ring *pring;
11432         struct lpfc_queue *qp = NULL;
11433         int i;
11434
11435         if (phba->sli_rev != LPFC_SLI_REV4) {
11436                 for (i = 0; i < psli->num_rings; i++) {
11437                         pring = &psli->sli3_ring[i];
11438                         lpfc_sli_abort_iocb_ring(phba, pring);
11439                 }
11440                 return;
11441         }
11442         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11443                 pring = qp->pring;
11444                 if (!pring)
11445                         continue;
11446                 lpfc_sli_abort_iocb_ring(phba, pring);
11447         }
11448 }
11449
11450 /**
11451  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11452  * @iocbq: Pointer to driver iocb object.
11453  * @vport: Pointer to driver virtual port object.
11454  * @tgt_id: SCSI ID of the target.
11455  * @lun_id: LUN ID of the scsi device.
11456  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11457  *
11458  * This function acts as an iocb filter for functions which abort or count
11459  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11460  * 0 if the filtering criteria is met for the given iocb and will return
11461  * 1 if the filtering criteria is not met.
11462  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11463  * given iocb is for the SCSI device specified by vport, tgt_id and
11464  * lun_id parameter.
11465  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
11466  * given iocb is for the SCSI target specified by vport and tgt_id
11467  * parameters.
11468  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11469  * given iocb is for the SCSI host associated with the given vport.
11470  * This function is called with no locks held.
11471  **/
11472 static int
11473 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11474                            uint16_t tgt_id, uint64_t lun_id,
11475                            lpfc_ctx_cmd ctx_cmd)
11476 {
11477         struct lpfc_io_buf *lpfc_cmd;
11478         int rc = 1;
11479
11480         if (iocbq->vport != vport)
11481                 return rc;
11482
11483         if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
11484             !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11485                 return rc;
11486
11487         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11488
11489         if (lpfc_cmd->pCmd == NULL)
11490                 return rc;
11491
11492         switch (ctx_cmd) {
11493         case LPFC_CTX_LUN:
11494                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11495                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11496                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11497                         rc = 0;
11498                 break;
11499         case LPFC_CTX_TGT:
11500                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11501                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11502                         rc = 0;
11503                 break;
11504         case LPFC_CTX_HOST:
11505                 rc = 0;
11506                 break;
11507         default:
11508                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11509                         __func__, ctx_cmd);
11510                 break;
11511         }
11512
11513         return rc;
11514 }
11515
11516 /**
11517  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11518  * @vport: Pointer to virtual port.
11519  * @tgt_id: SCSI ID of the target.
11520  * @lun_id: LUN ID of the scsi device.
11521  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11522  *
11523  * This function returns number of FCP commands pending for the vport.
11524  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11525  * commands pending on the vport associated with SCSI device specified
11526  * by tgt_id and lun_id parameters.
11527  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11528  * commands pending on the vport associated with SCSI target specified
11529  * by tgt_id parameter.
11530  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11531  * commands pending on the vport.
11532  * This function returns the number of iocbs which satisfy the filter.
11533  * This function is called without any lock held.
11534  **/
11535 int
11536 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11537                   lpfc_ctx_cmd ctx_cmd)
11538 {
11539         struct lpfc_hba *phba = vport->phba;
11540         struct lpfc_iocbq *iocbq;
11541         int sum, i;
11542
11543         spin_lock_irq(&phba->hbalock);
11544         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11545                 iocbq = phba->sli.iocbq_lookup[i];
11546
11547                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11548                                                 ctx_cmd) == 0)
11549                         sum++;
11550         }
11551         spin_unlock_irq(&phba->hbalock);
11552
11553         return sum;
11554 }
11555
11556 /**
11557  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11558  * @phba: Pointer to HBA context object
11559  * @cmdiocb: Pointer to command iocb object.
11560  * @rspiocb: Pointer to response iocb object.
11561  *
11562  * This function is called when an aborted FCP iocb completes. This
11563  * function is called by the ring event handler with no lock held.
11564  * This function frees the iocb.
11565  **/
11566 void
11567 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11568                         struct lpfc_iocbq *rspiocb)
11569 {
11570         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11571                         "3096 ABORT_XRI_CN completing on rpi x%x "
11572                         "original iotag x%x, abort cmd iotag x%x "
11573                         "status 0x%x, reason 0x%x\n",
11574                         cmdiocb->iocb.un.acxri.abortContextTag,
11575                         cmdiocb->iocb.un.acxri.abortIoTag,
11576                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11577                         rspiocb->iocb.un.ulpWord[4]);
11578         lpfc_sli_release_iocbq(phba, cmdiocb);
11579         return;
11580 }
11581
11582 /**
11583  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11584  * @vport: Pointer to virtual port.
11585  * @pring: Pointer to driver SLI ring object.
11586  * @tgt_id: SCSI ID of the target.
11587  * @lun_id: LUN ID of the scsi device.
11588  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11589  *
11590  * This function sends an abort command for every SCSI command
11591  * associated with the given virtual port pending on the ring
11592  * filtered by lpfc_sli_validate_fcp_iocb function.
11593  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11594  * FCP iocbs associated with lun specified by tgt_id and lun_id
11595  * parameters
11596  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11597  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11598  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11599  * FCP iocbs associated with virtual port.
11600  * This function returns number of iocbs it failed to abort.
11601  * This function is called with no locks held.
11602  **/
11603 int
11604 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11605                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11606 {
11607         struct lpfc_hba *phba = vport->phba;
11608         struct lpfc_iocbq *iocbq;
11609         struct lpfc_iocbq *abtsiocb;
11610         struct lpfc_sli_ring *pring_s4;
11611         IOCB_t *cmd = NULL;
11612         int errcnt = 0, ret_val = 0;
11613         int i;
11614
11615         /* all I/Os are in process of being flushed */
11616         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11617                 return errcnt;
11618
11619         for (i = 1; i <= phba->sli.last_iotag; i++) {
11620                 iocbq = phba->sli.iocbq_lookup[i];
11621
11622                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11623                                                abort_cmd) != 0)
11624                         continue;
11625
11626                 /*
11627                  * If the iocbq is already being aborted, don't take a second
11628                  * action, but do count it.
11629                  */
11630                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11631                         continue;
11632
11633                 /* issue ABTS for this IOCB based on iotag */
11634                 abtsiocb = lpfc_sli_get_iocbq(phba);
11635                 if (abtsiocb == NULL) {
11636                         errcnt++;
11637                         continue;
11638                 }
11639
11640                 /* indicate the IO is being aborted by the driver. */
11641                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11642
11643                 cmd = &iocbq->iocb;
11644                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11645                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11646                 if (phba->sli_rev == LPFC_SLI_REV4)
11647                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11648                 else
11649                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11650                 abtsiocb->iocb.ulpLe = 1;
11651                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11652                 abtsiocb->vport = vport;
11653
11654                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11655                 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11656                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11657                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11658                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11659                         abtsiocb->iocb_flag |= LPFC_IO_FOF;
11660
11661                 if (lpfc_is_link_up(phba))
11662                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11663                 else
11664                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11665
11666                 /* Setup callback routine and issue the command. */
11667                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11668                 if (phba->sli_rev == LPFC_SLI_REV4) {
11669                         pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11670                         if (!pring_s4)
11671                                 continue;
11672                         ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11673                                                       abtsiocb, 0);
11674                 } else
11675                         ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11676                                                       abtsiocb, 0);
11677                 if (ret_val == IOCB_ERROR) {
11678                         lpfc_sli_release_iocbq(phba, abtsiocb);
11679                         errcnt++;
11680                         continue;
11681                 }
11682         }
11683
11684         return errcnt;
11685 }
11686
11687 /**
11688  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11689  * @vport: Pointer to virtual port.
11690  * @pring: Pointer to driver SLI ring object.
11691  * @tgt_id: SCSI ID of the target.
11692  * @lun_id: LUN ID of the scsi device.
11693  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11694  *
11695  * This function sends an abort command for every SCSI command
11696  * associated with the given virtual port pending on the ring
11697  * filtered by lpfc_sli_validate_fcp_iocb function.
11698  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11699  * FCP iocbs associated with lun specified by tgt_id and lun_id
11700  * parameters
11701  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11702  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11703  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11704  * FCP iocbs associated with virtual port.
11705  * This function returns number of iocbs it aborted .
11706  * This function is called with no locks held right after a taskmgmt
11707  * command is sent.
11708  **/
11709 int
11710 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11711                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11712 {
11713         struct lpfc_hba *phba = vport->phba;
11714         struct lpfc_io_buf *lpfc_cmd;
11715         struct lpfc_iocbq *abtsiocbq;
11716         struct lpfc_nodelist *ndlp;
11717         struct lpfc_iocbq *iocbq;
11718         IOCB_t *icmd;
11719         int sum, i, ret_val;
11720         unsigned long iflags;
11721         struct lpfc_sli_ring *pring_s4 = NULL;
11722
11723         spin_lock_irqsave(&phba->hbalock, iflags);
11724
11725         /* all I/Os are in process of being flushed */
11726         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11727                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11728                 return 0;
11729         }
11730         sum = 0;
11731
11732         for (i = 1; i <= phba->sli.last_iotag; i++) {
11733                 iocbq = phba->sli.iocbq_lookup[i];
11734
11735                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11736                                                cmd) != 0)
11737                         continue;
11738
11739                 /* Guard against IO completion being called at same time */
11740                 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11741                 spin_lock(&lpfc_cmd->buf_lock);
11742
11743                 if (!lpfc_cmd->pCmd) {
11744                         spin_unlock(&lpfc_cmd->buf_lock);
11745                         continue;
11746                 }
11747
11748                 if (phba->sli_rev == LPFC_SLI_REV4) {
11749                         pring_s4 =
11750                             phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11751                         if (!pring_s4) {
11752                                 spin_unlock(&lpfc_cmd->buf_lock);
11753                                 continue;
11754                         }
11755                         /* Note: both hbalock and ring_lock must be set here */
11756                         spin_lock(&pring_s4->ring_lock);
11757                 }
11758
11759                 /*
11760                  * If the iocbq is already being aborted, don't take a second
11761                  * action, but do count it.
11762                  */
11763                 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11764                     !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11765                         if (phba->sli_rev == LPFC_SLI_REV4)
11766                                 spin_unlock(&pring_s4->ring_lock);
11767                         spin_unlock(&lpfc_cmd->buf_lock);
11768                         continue;
11769                 }
11770
11771                 /* issue ABTS for this IOCB based on iotag */
11772                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11773                 if (!abtsiocbq) {
11774                         if (phba->sli_rev == LPFC_SLI_REV4)
11775                                 spin_unlock(&pring_s4->ring_lock);
11776                         spin_unlock(&lpfc_cmd->buf_lock);
11777                         continue;
11778                 }
11779
11780                 icmd = &iocbq->iocb;
11781                 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11782                 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11783                 if (phba->sli_rev == LPFC_SLI_REV4)
11784                         abtsiocbq->iocb.un.acxri.abortIoTag =
11785                                                          iocbq->sli4_xritag;
11786                 else
11787                         abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11788                 abtsiocbq->iocb.ulpLe = 1;
11789                 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11790                 abtsiocbq->vport = vport;
11791
11792                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11793                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11794                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11795                         abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11796                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11797                         abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11798
11799                 ndlp = lpfc_cmd->rdata->pnode;
11800
11801                 if (lpfc_is_link_up(phba) &&
11802                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11803                         abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11804                 else
11805                         abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11806
11807                 /* Setup callback routine and issue the command. */
11808                 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11809
11810                 /*
11811                  * Indicate the IO is being aborted by the driver and set
11812                  * the caller's flag into the aborted IO.
11813                  */
11814                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11815
11816                 if (phba->sli_rev == LPFC_SLI_REV4) {
11817                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11818                                                         abtsiocbq, 0);
11819                         spin_unlock(&pring_s4->ring_lock);
11820                 } else {
11821                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11822                                                         abtsiocbq, 0);
11823                 }
11824
11825                 spin_unlock(&lpfc_cmd->buf_lock);
11826
11827                 if (ret_val == IOCB_ERROR)
11828                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
11829                 else
11830                         sum++;
11831         }
11832         spin_unlock_irqrestore(&phba->hbalock, iflags);
11833         return sum;
11834 }
11835
11836 /**
11837  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11838  * @phba: Pointer to HBA context object.
11839  * @cmdiocbq: Pointer to command iocb.
11840  * @rspiocbq: Pointer to response iocb.
11841  *
11842  * This function is the completion handler for iocbs issued using
11843  * lpfc_sli_issue_iocb_wait function. This function is called by the
11844  * ring event handler function without any lock held. This function
11845  * can be called from both worker thread context and interrupt
11846  * context. This function also can be called from other thread which
11847  * cleans up the SLI layer objects.
11848  * This function copy the contents of the response iocb to the
11849  * response iocb memory object provided by the caller of
11850  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11851  * sleeps for the iocb completion.
11852  **/
11853 static void
11854 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11855                         struct lpfc_iocbq *cmdiocbq,
11856                         struct lpfc_iocbq *rspiocbq)
11857 {
11858         wait_queue_head_t *pdone_q;
11859         unsigned long iflags;
11860         struct lpfc_io_buf *lpfc_cmd;
11861
11862         spin_lock_irqsave(&phba->hbalock, iflags);
11863         if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11864
11865                 /*
11866                  * A time out has occurred for the iocb.  If a time out
11867                  * completion handler has been supplied, call it.  Otherwise,
11868                  * just free the iocbq.
11869                  */
11870
11871                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11872                 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11873                 cmdiocbq->wait_iocb_cmpl = NULL;
11874                 if (cmdiocbq->iocb_cmpl)
11875                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11876                 else
11877                         lpfc_sli_release_iocbq(phba, cmdiocbq);
11878                 return;
11879         }
11880
11881         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11882         if (cmdiocbq->context2 && rspiocbq)
11883                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11884                        &rspiocbq->iocb, sizeof(IOCB_t));
11885
11886         /* Set the exchange busy flag for task management commands */
11887         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11888                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11889                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11890                         cur_iocbq);
11891                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11892         }
11893
11894         pdone_q = cmdiocbq->context_un.wait_queue;
11895         if (pdone_q)
11896                 wake_up(pdone_q);
11897         spin_unlock_irqrestore(&phba->hbalock, iflags);
11898         return;
11899 }
11900
11901 /**
11902  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11903  * @phba: Pointer to HBA context object..
11904  * @piocbq: Pointer to command iocb.
11905  * @flag: Flag to test.
11906  *
11907  * This routine grabs the hbalock and then test the iocb_flag to
11908  * see if the passed in flag is set.
11909  * Returns:
11910  * 1 if flag is set.
11911  * 0 if flag is not set.
11912  **/
11913 static int
11914 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11915                  struct lpfc_iocbq *piocbq, uint32_t flag)
11916 {
11917         unsigned long iflags;
11918         int ret;
11919
11920         spin_lock_irqsave(&phba->hbalock, iflags);
11921         ret = piocbq->iocb_flag & flag;
11922         spin_unlock_irqrestore(&phba->hbalock, iflags);
11923         return ret;
11924
11925 }
11926
11927 /**
11928  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11929  * @phba: Pointer to HBA context object..
11930  * @pring: Pointer to sli ring.
11931  * @piocb: Pointer to command iocb.
11932  * @prspiocbq: Pointer to response iocb.
11933  * @timeout: Timeout in number of seconds.
11934  *
11935  * This function issues the iocb to firmware and waits for the
11936  * iocb to complete. The iocb_cmpl field of the shall be used
11937  * to handle iocbs which time out. If the field is NULL, the
11938  * function shall free the iocbq structure.  If more clean up is
11939  * needed, the caller is expected to provide a completion function
11940  * that will provide the needed clean up.  If the iocb command is
11941  * not completed within timeout seconds, the function will either
11942  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11943  * completion function set in the iocb_cmpl field and then return
11944  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
11945  * resources if this function returns IOCB_TIMEDOUT.
11946  * The function waits for the iocb completion using an
11947  * non-interruptible wait.
11948  * This function will sleep while waiting for iocb completion.
11949  * So, this function should not be called from any context which
11950  * does not allow sleeping. Due to the same reason, this function
11951  * cannot be called with interrupt disabled.
11952  * This function assumes that the iocb completions occur while
11953  * this function sleep. So, this function cannot be called from
11954  * the thread which process iocb completion for this ring.
11955  * This function clears the iocb_flag of the iocb object before
11956  * issuing the iocb and the iocb completion handler sets this
11957  * flag and wakes this thread when the iocb completes.
11958  * The contents of the response iocb will be copied to prspiocbq
11959  * by the completion handler when the command completes.
11960  * This function returns IOCB_SUCCESS when success.
11961  * This function is called with no lock held.
11962  **/
11963 int
11964 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11965                          uint32_t ring_number,
11966                          struct lpfc_iocbq *piocb,
11967                          struct lpfc_iocbq *prspiocbq,
11968                          uint32_t timeout)
11969 {
11970         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11971         long timeleft, timeout_req = 0;
11972         int retval = IOCB_SUCCESS;
11973         uint32_t creg_val;
11974         struct lpfc_iocbq *iocb;
11975         int txq_cnt = 0;
11976         int txcmplq_cnt = 0;
11977         struct lpfc_sli_ring *pring;
11978         unsigned long iflags;
11979         bool iocb_completed = true;
11980
11981         if (phba->sli_rev >= LPFC_SLI_REV4)
11982                 pring = lpfc_sli4_calc_ring(phba, piocb);
11983         else
11984                 pring = &phba->sli.sli3_ring[ring_number];
11985         /*
11986          * If the caller has provided a response iocbq buffer, then context2
11987          * is NULL or its an error.
11988          */
11989         if (prspiocbq) {
11990                 if (piocb->context2)
11991                         return IOCB_ERROR;
11992                 piocb->context2 = prspiocbq;
11993         }
11994
11995         piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11996         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11997         piocb->context_un.wait_queue = &done_q;
11998         piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11999
12000         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12001                 if (lpfc_readl(phba->HCregaddr, &creg_val))
12002                         return IOCB_ERROR;
12003                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12004                 writel(creg_val, phba->HCregaddr);
12005                 readl(phba->HCregaddr); /* flush */
12006         }
12007
12008         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12009                                      SLI_IOCB_RET_IOCB);
12010         if (retval == IOCB_SUCCESS) {
12011                 timeout_req = msecs_to_jiffies(timeout * 1000);
12012                 timeleft = wait_event_timeout(done_q,
12013                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12014                                 timeout_req);
12015                 spin_lock_irqsave(&phba->hbalock, iflags);
12016                 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12017
12018                         /*
12019                          * IOCB timed out.  Inform the wake iocb wait
12020                          * completion function and set local status
12021                          */
12022
12023                         iocb_completed = false;
12024                         piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12025                 }
12026                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12027                 if (iocb_completed) {
12028                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12029                                         "0331 IOCB wake signaled\n");
12030                         /* Note: we are not indicating if the IOCB has a success
12031                          * status or not - that's for the caller to check.
12032                          * IOCB_SUCCESS means just that the command was sent and
12033                          * completed. Not that it completed successfully.
12034                          * */
12035                 } else if (timeleft == 0) {
12036                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12037                                         "0338 IOCB wait timeout error - no "
12038                                         "wake response Data x%x\n", timeout);
12039                         retval = IOCB_TIMEDOUT;
12040                 } else {
12041                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12042                                         "0330 IOCB wake NOT set, "
12043                                         "Data x%x x%lx\n",
12044                                         timeout, (timeleft / jiffies));
12045                         retval = IOCB_TIMEDOUT;
12046                 }
12047         } else if (retval == IOCB_BUSY) {
12048                 if (phba->cfg_log_verbose & LOG_SLI) {
12049                         list_for_each_entry(iocb, &pring->txq, list) {
12050                                 txq_cnt++;
12051                         }
12052                         list_for_each_entry(iocb, &pring->txcmplq, list) {
12053                                 txcmplq_cnt++;
12054                         }
12055                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12056                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12057                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12058                 }
12059                 return retval;
12060         } else {
12061                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12062                                 "0332 IOCB wait issue failed, Data x%x\n",
12063                                 retval);
12064                 retval = IOCB_ERROR;
12065         }
12066
12067         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12068                 if (lpfc_readl(phba->HCregaddr, &creg_val))
12069                         return IOCB_ERROR;
12070                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12071                 writel(creg_val, phba->HCregaddr);
12072                 readl(phba->HCregaddr); /* flush */
12073         }
12074
12075         if (prspiocbq)
12076                 piocb->context2 = NULL;
12077
12078         piocb->context_un.wait_queue = NULL;
12079         piocb->iocb_cmpl = NULL;
12080         return retval;
12081 }
12082
12083 /**
12084  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12085  * @phba: Pointer to HBA context object.
12086  * @pmboxq: Pointer to driver mailbox object.
12087  * @timeout: Timeout in number of seconds.
12088  *
12089  * This function issues the mailbox to firmware and waits for the
12090  * mailbox command to complete. If the mailbox command is not
12091  * completed within timeout seconds, it returns MBX_TIMEOUT.
12092  * The function waits for the mailbox completion using an
12093  * interruptible wait. If the thread is woken up due to a
12094  * signal, MBX_TIMEOUT error is returned to the caller. Caller
12095  * should not free the mailbox resources, if this function returns
12096  * MBX_TIMEOUT.
12097  * This function will sleep while waiting for mailbox completion.
12098  * So, this function should not be called from any context which
12099  * does not allow sleeping. Due to the same reason, this function
12100  * cannot be called with interrupt disabled.
12101  * This function assumes that the mailbox completion occurs while
12102  * this function sleep. So, this function cannot be called from
12103  * the worker thread which processes mailbox completion.
12104  * This function is called in the context of HBA management
12105  * applications.
12106  * This function returns MBX_SUCCESS when successful.
12107  * This function is called with no lock held.
12108  **/
12109 int
12110 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12111                          uint32_t timeout)
12112 {
12113         struct completion mbox_done;
12114         int retval;
12115         unsigned long flag;
12116
12117         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12118         /* setup wake call as IOCB callback */
12119         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12120
12121         /* setup context3 field to pass wait_queue pointer to wake function  */
12122         init_completion(&mbox_done);
12123         pmboxq->context3 = &mbox_done;
12124         /* now issue the command */
12125         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12126         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12127                 wait_for_completion_timeout(&mbox_done,
12128                                             msecs_to_jiffies(timeout * 1000));
12129
12130                 spin_lock_irqsave(&phba->hbalock, flag);
12131                 pmboxq->context3 = NULL;
12132                 /*
12133                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
12134                  * else do not free the resources.
12135                  */
12136                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12137                         retval = MBX_SUCCESS;
12138                 } else {
12139                         retval = MBX_TIMEOUT;
12140                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12141                 }
12142                 spin_unlock_irqrestore(&phba->hbalock, flag);
12143         }
12144         return retval;
12145 }
12146
12147 /**
12148  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12149  * @phba: Pointer to HBA context.
12150  *
12151  * This function is called to shutdown the driver's mailbox sub-system.
12152  * It first marks the mailbox sub-system is in a block state to prevent
12153  * the asynchronous mailbox command from issued off the pending mailbox
12154  * command queue. If the mailbox command sub-system shutdown is due to
12155  * HBA error conditions such as EEH or ERATT, this routine shall invoke
12156  * the mailbox sub-system flush routine to forcefully bring down the
12157  * mailbox sub-system. Otherwise, if it is due to normal condition (such
12158  * as with offline or HBA function reset), this routine will wait for the
12159  * outstanding mailbox command to complete before invoking the mailbox
12160  * sub-system flush routine to gracefully bring down mailbox sub-system.
12161  **/
12162 void
12163 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12164 {
12165         struct lpfc_sli *psli = &phba->sli;
12166         unsigned long timeout;
12167
12168         if (mbx_action == LPFC_MBX_NO_WAIT) {
12169                 /* delay 100ms for port state */
12170                 msleep(100);
12171                 lpfc_sli_mbox_sys_flush(phba);
12172                 return;
12173         }
12174         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12175
12176         /* Disable softirqs, including timers from obtaining phba->hbalock */
12177         local_bh_disable();
12178
12179         spin_lock_irq(&phba->hbalock);
12180         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12181
12182         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12183                 /* Determine how long we might wait for the active mailbox
12184                  * command to be gracefully completed by firmware.
12185                  */
12186                 if (phba->sli.mbox_active)
12187                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12188                                                 phba->sli.mbox_active) *
12189                                                 1000) + jiffies;
12190                 spin_unlock_irq(&phba->hbalock);
12191
12192                 /* Enable softirqs again, done with phba->hbalock */
12193                 local_bh_enable();
12194
12195                 while (phba->sli.mbox_active) {
12196                         /* Check active mailbox complete status every 2ms */
12197                         msleep(2);
12198                         if (time_after(jiffies, timeout))
12199                                 /* Timeout, let the mailbox flush routine to
12200                                  * forcefully release active mailbox command
12201                                  */
12202                                 break;
12203                 }
12204         } else {
12205                 spin_unlock_irq(&phba->hbalock);
12206
12207                 /* Enable softirqs again, done with phba->hbalock */
12208                 local_bh_enable();
12209         }
12210
12211         lpfc_sli_mbox_sys_flush(phba);
12212 }
12213
12214 /**
12215  * lpfc_sli_eratt_read - read sli-3 error attention events
12216  * @phba: Pointer to HBA context.
12217  *
12218  * This function is called to read the SLI3 device error attention registers
12219  * for possible error attention events. The caller must hold the hostlock
12220  * with spin_lock_irq().
12221  *
12222  * This function returns 1 when there is Error Attention in the Host Attention
12223  * Register and returns 0 otherwise.
12224  **/
12225 static int
12226 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12227 {
12228         uint32_t ha_copy;
12229
12230         /* Read chip Host Attention (HA) register */
12231         if (lpfc_readl(phba->HAregaddr, &ha_copy))
12232                 goto unplug_err;
12233
12234         if (ha_copy & HA_ERATT) {
12235                 /* Read host status register to retrieve error event */
12236                 if (lpfc_sli_read_hs(phba))
12237                         goto unplug_err;
12238
12239                 /* Check if there is a deferred error condition is active */
12240                 if ((HS_FFER1 & phba->work_hs) &&
12241                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12242                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12243                         phba->hba_flag |= DEFER_ERATT;
12244                         /* Clear all interrupt enable conditions */
12245                         writel(0, phba->HCregaddr);
12246                         readl(phba->HCregaddr);
12247                 }
12248
12249                 /* Set the driver HA work bitmap */
12250                 phba->work_ha |= HA_ERATT;
12251                 /* Indicate polling handles this ERATT */
12252                 phba->hba_flag |= HBA_ERATT_HANDLED;
12253                 return 1;
12254         }
12255         return 0;
12256
12257 unplug_err:
12258         /* Set the driver HS work bitmap */
12259         phba->work_hs |= UNPLUG_ERR;
12260         /* Set the driver HA work bitmap */
12261         phba->work_ha |= HA_ERATT;
12262         /* Indicate polling handles this ERATT */
12263         phba->hba_flag |= HBA_ERATT_HANDLED;
12264         return 1;
12265 }
12266
12267 /**
12268  * lpfc_sli4_eratt_read - read sli-4 error attention events
12269  * @phba: Pointer to HBA context.
12270  *
12271  * This function is called to read the SLI4 device error attention registers
12272  * for possible error attention events. The caller must hold the hostlock
12273  * with spin_lock_irq().
12274  *
12275  * This function returns 1 when there is Error Attention in the Host Attention
12276  * Register and returns 0 otherwise.
12277  **/
12278 static int
12279 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12280 {
12281         uint32_t uerr_sta_hi, uerr_sta_lo;
12282         uint32_t if_type, portsmphr;
12283         struct lpfc_register portstat_reg;
12284
12285         /*
12286          * For now, use the SLI4 device internal unrecoverable error
12287          * registers for error attention. This can be changed later.
12288          */
12289         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12290         switch (if_type) {
12291         case LPFC_SLI_INTF_IF_TYPE_0:
12292                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12293                         &uerr_sta_lo) ||
12294                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12295                         &uerr_sta_hi)) {
12296                         phba->work_hs |= UNPLUG_ERR;
12297                         phba->work_ha |= HA_ERATT;
12298                         phba->hba_flag |= HBA_ERATT_HANDLED;
12299                         return 1;
12300                 }
12301                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12302                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12303                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12304                                         "1423 HBA Unrecoverable error: "
12305                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12306                                         "ue_mask_lo_reg=0x%x, "
12307                                         "ue_mask_hi_reg=0x%x\n",
12308                                         uerr_sta_lo, uerr_sta_hi,
12309                                         phba->sli4_hba.ue_mask_lo,
12310                                         phba->sli4_hba.ue_mask_hi);
12311                         phba->work_status[0] = uerr_sta_lo;
12312                         phba->work_status[1] = uerr_sta_hi;
12313                         phba->work_ha |= HA_ERATT;
12314                         phba->hba_flag |= HBA_ERATT_HANDLED;
12315                         return 1;
12316                 }
12317                 break;
12318         case LPFC_SLI_INTF_IF_TYPE_2:
12319         case LPFC_SLI_INTF_IF_TYPE_6:
12320                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12321                         &portstat_reg.word0) ||
12322                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12323                         &portsmphr)){
12324                         phba->work_hs |= UNPLUG_ERR;
12325                         phba->work_ha |= HA_ERATT;
12326                         phba->hba_flag |= HBA_ERATT_HANDLED;
12327                         return 1;
12328                 }
12329                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12330                         phba->work_status[0] =
12331                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12332                         phba->work_status[1] =
12333                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12334                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12335                                         "2885 Port Status Event: "
12336                                         "port status reg 0x%x, "
12337                                         "port smphr reg 0x%x, "
12338                                         "error 1=0x%x, error 2=0x%x\n",
12339                                         portstat_reg.word0,
12340                                         portsmphr,
12341                                         phba->work_status[0],
12342                                         phba->work_status[1]);
12343                         phba->work_ha |= HA_ERATT;
12344                         phba->hba_flag |= HBA_ERATT_HANDLED;
12345                         return 1;
12346                 }
12347                 break;
12348         case LPFC_SLI_INTF_IF_TYPE_1:
12349         default:
12350                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12351                                 "2886 HBA Error Attention on unsupported "
12352                                 "if type %d.", if_type);
12353                 return 1;
12354         }
12355
12356         return 0;
12357 }
12358
12359 /**
12360  * lpfc_sli_check_eratt - check error attention events
12361  * @phba: Pointer to HBA context.
12362  *
12363  * This function is called from timer soft interrupt context to check HBA's
12364  * error attention register bit for error attention events.
12365  *
12366  * This function returns 1 when there is Error Attention in the Host Attention
12367  * Register and returns 0 otherwise.
12368  **/
12369 int
12370 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12371 {
12372         uint32_t ha_copy;
12373
12374         /* If somebody is waiting to handle an eratt, don't process it
12375          * here. The brdkill function will do this.
12376          */
12377         if (phba->link_flag & LS_IGNORE_ERATT)
12378                 return 0;
12379
12380         /* Check if interrupt handler handles this ERATT */
12381         spin_lock_irq(&phba->hbalock);
12382         if (phba->hba_flag & HBA_ERATT_HANDLED) {
12383                 /* Interrupt handler has handled ERATT */
12384                 spin_unlock_irq(&phba->hbalock);
12385                 return 0;
12386         }
12387
12388         /*
12389          * If there is deferred error attention, do not check for error
12390          * attention
12391          */
12392         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12393                 spin_unlock_irq(&phba->hbalock);
12394                 return 0;
12395         }
12396
12397         /* If PCI channel is offline, don't process it */
12398         if (unlikely(pci_channel_offline(phba->pcidev))) {
12399                 spin_unlock_irq(&phba->hbalock);
12400                 return 0;
12401         }
12402
12403         switch (phba->sli_rev) {
12404         case LPFC_SLI_REV2:
12405         case LPFC_SLI_REV3:
12406                 /* Read chip Host Attention (HA) register */
12407                 ha_copy = lpfc_sli_eratt_read(phba);
12408                 break;
12409         case LPFC_SLI_REV4:
12410                 /* Read device Uncoverable Error (UERR) registers */
12411                 ha_copy = lpfc_sli4_eratt_read(phba);
12412                 break;
12413         default:
12414                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12415                                 "0299 Invalid SLI revision (%d)\n",
12416                                 phba->sli_rev);
12417                 ha_copy = 0;
12418                 break;
12419         }
12420         spin_unlock_irq(&phba->hbalock);
12421
12422         return ha_copy;
12423 }
12424
12425 /**
12426  * lpfc_intr_state_check - Check device state for interrupt handling
12427  * @phba: Pointer to HBA context.
12428  *
12429  * This inline routine checks whether a device or its PCI slot is in a state
12430  * that the interrupt should be handled.
12431  *
12432  * This function returns 0 if the device or the PCI slot is in a state that
12433  * interrupt should be handled, otherwise -EIO.
12434  */
12435 static inline int
12436 lpfc_intr_state_check(struct lpfc_hba *phba)
12437 {
12438         /* If the pci channel is offline, ignore all the interrupts */
12439         if (unlikely(pci_channel_offline(phba->pcidev)))
12440                 return -EIO;
12441
12442         /* Update device level interrupt statistics */
12443         phba->sli.slistat.sli_intr++;
12444
12445         /* Ignore all interrupts during initialization. */
12446         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12447                 return -EIO;
12448
12449         return 0;
12450 }
12451
12452 /**
12453  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12454  * @irq: Interrupt number.
12455  * @dev_id: The device context pointer.
12456  *
12457  * This function is directly called from the PCI layer as an interrupt
12458  * service routine when device with SLI-3 interface spec is enabled with
12459  * MSI-X multi-message interrupt mode and there are slow-path events in
12460  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12461  * interrupt mode, this function is called as part of the device-level
12462  * interrupt handler. When the PCI slot is in error recovery or the HBA
12463  * is undergoing initialization, the interrupt handler will not process
12464  * the interrupt. The link attention and ELS ring attention events are
12465  * handled by the worker thread. The interrupt handler signals the worker
12466  * thread and returns for these events. This function is called without
12467  * any lock held. It gets the hbalock to access and update SLI data
12468  * structures.
12469  *
12470  * This function returns IRQ_HANDLED when interrupt is handled else it
12471  * returns IRQ_NONE.
12472  **/
12473 irqreturn_t
12474 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12475 {
12476         struct lpfc_hba  *phba;
12477         uint32_t ha_copy, hc_copy;
12478         uint32_t work_ha_copy;
12479         unsigned long status;
12480         unsigned long iflag;
12481         uint32_t control;
12482
12483         MAILBOX_t *mbox, *pmbox;
12484         struct lpfc_vport *vport;
12485         struct lpfc_nodelist *ndlp;
12486         struct lpfc_dmabuf *mp;
12487         LPFC_MBOXQ_t *pmb;
12488         int rc;
12489
12490         /*
12491          * Get the driver's phba structure from the dev_id and
12492          * assume the HBA is not interrupting.
12493          */
12494         phba = (struct lpfc_hba *)dev_id;
12495
12496         if (unlikely(!phba))
12497                 return IRQ_NONE;
12498
12499         /*
12500          * Stuff needs to be attented to when this function is invoked as an
12501          * individual interrupt handler in MSI-X multi-message interrupt mode
12502          */
12503         if (phba->intr_type == MSIX) {
12504                 /* Check device state for handling interrupt */
12505                 if (lpfc_intr_state_check(phba))
12506                         return IRQ_NONE;
12507                 /* Need to read HA REG for slow-path events */
12508                 spin_lock_irqsave(&phba->hbalock, iflag);
12509                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12510                         goto unplug_error;
12511                 /* If somebody is waiting to handle an eratt don't process it
12512                  * here. The brdkill function will do this.
12513                  */
12514                 if (phba->link_flag & LS_IGNORE_ERATT)
12515                         ha_copy &= ~HA_ERATT;
12516                 /* Check the need for handling ERATT in interrupt handler */
12517                 if (ha_copy & HA_ERATT) {
12518                         if (phba->hba_flag & HBA_ERATT_HANDLED)
12519                                 /* ERATT polling has handled ERATT */
12520                                 ha_copy &= ~HA_ERATT;
12521                         else
12522                                 /* Indicate interrupt handler handles ERATT */
12523                                 phba->hba_flag |= HBA_ERATT_HANDLED;
12524                 }
12525
12526                 /*
12527                  * If there is deferred error attention, do not check for any
12528                  * interrupt.
12529                  */
12530                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12531                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12532                         return IRQ_NONE;
12533                 }
12534
12535                 /* Clear up only attention source related to slow-path */
12536                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12537                         goto unplug_error;
12538
12539                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12540                         HC_LAINT_ENA | HC_ERINT_ENA),
12541                         phba->HCregaddr);
12542                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12543                         phba->HAregaddr);
12544                 writel(hc_copy, phba->HCregaddr);
12545                 readl(phba->HAregaddr); /* flush */
12546                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12547         } else
12548                 ha_copy = phba->ha_copy;
12549
12550         work_ha_copy = ha_copy & phba->work_ha_mask;
12551
12552         if (work_ha_copy) {
12553                 if (work_ha_copy & HA_LATT) {
12554                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12555                                 /*
12556                                  * Turn off Link Attention interrupts
12557                                  * until CLEAR_LA done
12558                                  */
12559                                 spin_lock_irqsave(&phba->hbalock, iflag);
12560                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12561                                 if (lpfc_readl(phba->HCregaddr, &control))
12562                                         goto unplug_error;
12563                                 control &= ~HC_LAINT_ENA;
12564                                 writel(control, phba->HCregaddr);
12565                                 readl(phba->HCregaddr); /* flush */
12566                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12567                         }
12568                         else
12569                                 work_ha_copy &= ~HA_LATT;
12570                 }
12571
12572                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12573                         /*
12574                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12575                          * the only slow ring.
12576                          */
12577                         status = (work_ha_copy &
12578                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
12579                         status >>= (4*LPFC_ELS_RING);
12580                         if (status & HA_RXMASK) {
12581                                 spin_lock_irqsave(&phba->hbalock, iflag);
12582                                 if (lpfc_readl(phba->HCregaddr, &control))
12583                                         goto unplug_error;
12584
12585                                 lpfc_debugfs_slow_ring_trc(phba,
12586                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
12587                                 control, status,
12588                                 (uint32_t)phba->sli.slistat.sli_intr);
12589
12590                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12591                                         lpfc_debugfs_slow_ring_trc(phba,
12592                                                 "ISR Disable ring:"
12593                                                 "pwork:x%x hawork:x%x wait:x%x",
12594                                                 phba->work_ha, work_ha_copy,
12595                                                 (uint32_t)((unsigned long)
12596                                                 &phba->work_waitq));
12597
12598                                         control &=
12599                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
12600                                         writel(control, phba->HCregaddr);
12601                                         readl(phba->HCregaddr); /* flush */
12602                                 }
12603                                 else {
12604                                         lpfc_debugfs_slow_ring_trc(phba,
12605                                                 "ISR slow ring:   pwork:"
12606                                                 "x%x hawork:x%x wait:x%x",
12607                                                 phba->work_ha, work_ha_copy,
12608                                                 (uint32_t)((unsigned long)
12609                                                 &phba->work_waitq));
12610                                 }
12611                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12612                         }
12613                 }
12614                 spin_lock_irqsave(&phba->hbalock, iflag);
12615                 if (work_ha_copy & HA_ERATT) {
12616                         if (lpfc_sli_read_hs(phba))
12617                                 goto unplug_error;
12618                         /*
12619                          * Check if there is a deferred error condition
12620                          * is active
12621                          */
12622                         if ((HS_FFER1 & phba->work_hs) &&
12623                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12624                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
12625                                   phba->work_hs)) {
12626                                 phba->hba_flag |= DEFER_ERATT;
12627                                 /* Clear all interrupt enable conditions */
12628                                 writel(0, phba->HCregaddr);
12629                                 readl(phba->HCregaddr);
12630                         }
12631                 }
12632
12633                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12634                         pmb = phba->sli.mbox_active;
12635                         pmbox = &pmb->u.mb;
12636                         mbox = phba->mbox;
12637                         vport = pmb->vport;
12638
12639                         /* First check out the status word */
12640                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12641                         if (pmbox->mbxOwner != OWN_HOST) {
12642                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12643                                 /*
12644                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
12645                                  * mbxStatus <status>
12646                                  */
12647                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12648                                                 LOG_SLI,
12649                                                 "(%d):0304 Stray Mailbox "
12650                                                 "Interrupt mbxCommand x%x "
12651                                                 "mbxStatus x%x\n",
12652                                                 (vport ? vport->vpi : 0),
12653                                                 pmbox->mbxCommand,
12654                                                 pmbox->mbxStatus);
12655                                 /* clear mailbox attention bit */
12656                                 work_ha_copy &= ~HA_MBATT;
12657                         } else {
12658                                 phba->sli.mbox_active = NULL;
12659                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12660                                 phba->last_completion_time = jiffies;
12661                                 del_timer(&phba->sli.mbox_tmo);
12662                                 if (pmb->mbox_cmpl) {
12663                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
12664                                                         MAILBOX_CMD_SIZE);
12665                                         if (pmb->out_ext_byte_len &&
12666                                                 pmb->ctx_buf)
12667                                                 lpfc_sli_pcimem_bcopy(
12668                                                 phba->mbox_ext,
12669                                                 pmb->ctx_buf,
12670                                                 pmb->out_ext_byte_len);
12671                                 }
12672                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12673                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12674
12675                                         lpfc_debugfs_disc_trc(vport,
12676                                                 LPFC_DISC_TRC_MBOX_VPORT,
12677                                                 "MBOX dflt rpi: : "
12678                                                 "status:x%x rpi:x%x",
12679                                                 (uint32_t)pmbox->mbxStatus,
12680                                                 pmbox->un.varWords[0], 0);
12681
12682                                         if (!pmbox->mbxStatus) {
12683                                                 mp = (struct lpfc_dmabuf *)
12684                                                         (pmb->ctx_buf);
12685                                                 ndlp = (struct lpfc_nodelist *)
12686                                                         pmb->ctx_ndlp;
12687
12688                                                 /* Reg_LOGIN of dflt RPI was
12689                                                  * successful. new lets get
12690                                                  * rid of the RPI using the
12691                                                  * same mbox buffer.
12692                                                  */
12693                                                 lpfc_unreg_login(phba,
12694                                                         vport->vpi,
12695                                                         pmbox->un.varWords[0],
12696                                                         pmb);
12697                                                 pmb->mbox_cmpl =
12698                                                         lpfc_mbx_cmpl_dflt_rpi;
12699                                                 pmb->ctx_buf = mp;
12700                                                 pmb->ctx_ndlp = ndlp;
12701                                                 pmb->vport = vport;
12702                                                 rc = lpfc_sli_issue_mbox(phba,
12703                                                                 pmb,
12704                                                                 MBX_NOWAIT);
12705                                                 if (rc != MBX_BUSY)
12706                                                         lpfc_printf_log(phba,
12707                                                         KERN_ERR,
12708                                                         LOG_MBOX | LOG_SLI,
12709                                                         "0350 rc should have"
12710                                                         "been MBX_BUSY\n");
12711                                                 if (rc != MBX_NOT_FINISHED)
12712                                                         goto send_current_mbox;
12713                                         }
12714                                 }
12715                                 spin_lock_irqsave(
12716                                                 &phba->pport->work_port_lock,
12717                                                 iflag);
12718                                 phba->pport->work_port_events &=
12719                                         ~WORKER_MBOX_TMO;
12720                                 spin_unlock_irqrestore(
12721                                                 &phba->pport->work_port_lock,
12722                                                 iflag);
12723                                 lpfc_mbox_cmpl_put(phba, pmb);
12724                         }
12725                 } else
12726                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12727
12728                 if ((work_ha_copy & HA_MBATT) &&
12729                     (phba->sli.mbox_active == NULL)) {
12730 send_current_mbox:
12731                         /* Process next mailbox command if there is one */
12732                         do {
12733                                 rc = lpfc_sli_issue_mbox(phba, NULL,
12734                                                          MBX_NOWAIT);
12735                         } while (rc == MBX_NOT_FINISHED);
12736                         if (rc != MBX_SUCCESS)
12737                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12738                                                 LOG_SLI, "0349 rc should be "
12739                                                 "MBX_SUCCESS\n");
12740                 }
12741
12742                 spin_lock_irqsave(&phba->hbalock, iflag);
12743                 phba->work_ha |= work_ha_copy;
12744                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12745                 lpfc_worker_wake_up(phba);
12746         }
12747         return IRQ_HANDLED;
12748 unplug_error:
12749         spin_unlock_irqrestore(&phba->hbalock, iflag);
12750         return IRQ_HANDLED;
12751
12752 } /* lpfc_sli_sp_intr_handler */
12753
12754 /**
12755  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12756  * @irq: Interrupt number.
12757  * @dev_id: The device context pointer.
12758  *
12759  * This function is directly called from the PCI layer as an interrupt
12760  * service routine when device with SLI-3 interface spec is enabled with
12761  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12762  * ring event in the HBA. However, when the device is enabled with either
12763  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12764  * device-level interrupt handler. When the PCI slot is in error recovery
12765  * or the HBA is undergoing initialization, the interrupt handler will not
12766  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12767  * the intrrupt context. This function is called without any lock held.
12768  * It gets the hbalock to access and update SLI data structures.
12769  *
12770  * This function returns IRQ_HANDLED when interrupt is handled else it
12771  * returns IRQ_NONE.
12772  **/
12773 irqreturn_t
12774 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12775 {
12776         struct lpfc_hba  *phba;
12777         uint32_t ha_copy;
12778         unsigned long status;
12779         unsigned long iflag;
12780         struct lpfc_sli_ring *pring;
12781
12782         /* Get the driver's phba structure from the dev_id and
12783          * assume the HBA is not interrupting.
12784          */
12785         phba = (struct lpfc_hba *) dev_id;
12786
12787         if (unlikely(!phba))
12788                 return IRQ_NONE;
12789
12790         /*
12791          * Stuff needs to be attented to when this function is invoked as an
12792          * individual interrupt handler in MSI-X multi-message interrupt mode
12793          */
12794         if (phba->intr_type == MSIX) {
12795                 /* Check device state for handling interrupt */
12796                 if (lpfc_intr_state_check(phba))
12797                         return IRQ_NONE;
12798                 /* Need to read HA REG for FCP ring and other ring events */
12799                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12800                         return IRQ_HANDLED;
12801                 /* Clear up only attention source related to fast-path */
12802                 spin_lock_irqsave(&phba->hbalock, iflag);
12803                 /*
12804                  * If there is deferred error attention, do not check for
12805                  * any interrupt.
12806                  */
12807                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12808                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12809                         return IRQ_NONE;
12810                 }
12811                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12812                         phba->HAregaddr);
12813                 readl(phba->HAregaddr); /* flush */
12814                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12815         } else
12816                 ha_copy = phba->ha_copy;
12817
12818         /*
12819          * Process all events on FCP ring. Take the optimized path for FCP IO.
12820          */
12821         ha_copy &= ~(phba->work_ha_mask);
12822
12823         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12824         status >>= (4*LPFC_FCP_RING);
12825         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12826         if (status & HA_RXMASK)
12827                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12828
12829         if (phba->cfg_multi_ring_support == 2) {
12830                 /*
12831                  * Process all events on extra ring. Take the optimized path
12832                  * for extra ring IO.
12833                  */
12834                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12835                 status >>= (4*LPFC_EXTRA_RING);
12836                 if (status & HA_RXMASK) {
12837                         lpfc_sli_handle_fast_ring_event(phba,
12838                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12839                                         status);
12840                 }
12841         }
12842         return IRQ_HANDLED;
12843 }  /* lpfc_sli_fp_intr_handler */
12844
12845 /**
12846  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12847  * @irq: Interrupt number.
12848  * @dev_id: The device context pointer.
12849  *
12850  * This function is the HBA device-level interrupt handler to device with
12851  * SLI-3 interface spec, called from the PCI layer when either MSI or
12852  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12853  * requires driver attention. This function invokes the slow-path interrupt
12854  * attention handling function and fast-path interrupt attention handling
12855  * function in turn to process the relevant HBA attention events. This
12856  * function is called without any lock held. It gets the hbalock to access
12857  * and update SLI data structures.
12858  *
12859  * This function returns IRQ_HANDLED when interrupt is handled, else it
12860  * returns IRQ_NONE.
12861  **/
12862 irqreturn_t
12863 lpfc_sli_intr_handler(int irq, void *dev_id)
12864 {
12865         struct lpfc_hba  *phba;
12866         irqreturn_t sp_irq_rc, fp_irq_rc;
12867         unsigned long status1, status2;
12868         uint32_t hc_copy;
12869
12870         /*
12871          * Get the driver's phba structure from the dev_id and
12872          * assume the HBA is not interrupting.
12873          */
12874         phba = (struct lpfc_hba *) dev_id;
12875
12876         if (unlikely(!phba))
12877                 return IRQ_NONE;
12878
12879         /* Check device state for handling interrupt */
12880         if (lpfc_intr_state_check(phba))
12881                 return IRQ_NONE;
12882
12883         spin_lock(&phba->hbalock);
12884         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12885                 spin_unlock(&phba->hbalock);
12886                 return IRQ_HANDLED;
12887         }
12888
12889         if (unlikely(!phba->ha_copy)) {
12890                 spin_unlock(&phba->hbalock);
12891                 return IRQ_NONE;
12892         } else if (phba->ha_copy & HA_ERATT) {
12893                 if (phba->hba_flag & HBA_ERATT_HANDLED)
12894                         /* ERATT polling has handled ERATT */
12895                         phba->ha_copy &= ~HA_ERATT;
12896                 else
12897                         /* Indicate interrupt handler handles ERATT */
12898                         phba->hba_flag |= HBA_ERATT_HANDLED;
12899         }
12900
12901         /*
12902          * If there is deferred error attention, do not check for any interrupt.
12903          */
12904         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12905                 spin_unlock(&phba->hbalock);
12906                 return IRQ_NONE;
12907         }
12908
12909         /* Clear attention sources except link and error attentions */
12910         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12911                 spin_unlock(&phba->hbalock);
12912                 return IRQ_HANDLED;
12913         }
12914         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12915                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12916                 phba->HCregaddr);
12917         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12918         writel(hc_copy, phba->HCregaddr);
12919         readl(phba->HAregaddr); /* flush */
12920         spin_unlock(&phba->hbalock);
12921
12922         /*
12923          * Invokes slow-path host attention interrupt handling as appropriate.
12924          */
12925
12926         /* status of events with mailbox and link attention */
12927         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12928
12929         /* status of events with ELS ring */
12930         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
12931         status2 >>= (4*LPFC_ELS_RING);
12932
12933         if (status1 || (status2 & HA_RXMASK))
12934                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12935         else
12936                 sp_irq_rc = IRQ_NONE;
12937
12938         /*
12939          * Invoke fast-path host attention interrupt handling as appropriate.
12940          */
12941
12942         /* status of events with FCP ring */
12943         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12944         status1 >>= (4*LPFC_FCP_RING);
12945
12946         /* status of events with extra ring */
12947         if (phba->cfg_multi_ring_support == 2) {
12948                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12949                 status2 >>= (4*LPFC_EXTRA_RING);
12950         } else
12951                 status2 = 0;
12952
12953         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12954                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12955         else
12956                 fp_irq_rc = IRQ_NONE;
12957
12958         /* Return device-level interrupt handling status */
12959         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12960 }  /* lpfc_sli_intr_handler */
12961
12962 /**
12963  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12964  * @phba: pointer to lpfc hba data structure.
12965  *
12966  * This routine is invoked by the worker thread to process all the pending
12967  * SLI4 els abort xri events.
12968  **/
12969 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12970 {
12971         struct lpfc_cq_event *cq_event;
12972
12973         /* First, declare the els xri abort event has been handled */
12974         spin_lock_irq(&phba->hbalock);
12975         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12976         spin_unlock_irq(&phba->hbalock);
12977         /* Now, handle all the els xri abort events */
12978         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12979                 /* Get the first event from the head of the event queue */
12980                 spin_lock_irq(&phba->hbalock);
12981                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12982                                  cq_event, struct lpfc_cq_event, list);
12983                 spin_unlock_irq(&phba->hbalock);
12984                 /* Notify aborted XRI for ELS work queue */
12985                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12986                 /* Free the event processed back to the free pool */
12987                 lpfc_sli4_cq_event_release(phba, cq_event);
12988         }
12989 }
12990
12991 /**
12992  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12993  * @phba: pointer to lpfc hba data structure
12994  * @pIocbIn: pointer to the rspiocbq
12995  * @pIocbOut: pointer to the cmdiocbq
12996  * @wcqe: pointer to the complete wcqe
12997  *
12998  * This routine transfers the fields of a command iocbq to a response iocbq
12999  * by copying all the IOCB fields from command iocbq and transferring the
13000  * completion status information from the complete wcqe.
13001  **/
13002 static void
13003 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13004                               struct lpfc_iocbq *pIocbIn,
13005                               struct lpfc_iocbq *pIocbOut,
13006                               struct lpfc_wcqe_complete *wcqe)
13007 {
13008         int numBdes, i;
13009         unsigned long iflags;
13010         uint32_t status, max_response;
13011         struct lpfc_dmabuf *dmabuf;
13012         struct ulp_bde64 *bpl, bde;
13013         size_t offset = offsetof(struct lpfc_iocbq, iocb);
13014
13015         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13016                sizeof(struct lpfc_iocbq) - offset);
13017         /* Map WCQE parameters into irspiocb parameters */
13018         status = bf_get(lpfc_wcqe_c_status, wcqe);
13019         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13020         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13021                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13022                         pIocbIn->iocb.un.fcpi.fcpi_parm =
13023                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
13024                                         wcqe->total_data_placed;
13025                 else
13026                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13027         else {
13028                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13029                 switch (pIocbOut->iocb.ulpCommand) {
13030                 case CMD_ELS_REQUEST64_CR:
13031                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13032                         bpl  = (struct ulp_bde64 *)dmabuf->virt;
13033                         bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13034                         max_response = bde.tus.f.bdeSize;
13035                         break;
13036                 case CMD_GEN_REQUEST64_CR:
13037                         max_response = 0;
13038                         if (!pIocbOut->context3)
13039                                 break;
13040                         numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13041                                         sizeof(struct ulp_bde64);
13042                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13043                         bpl = (struct ulp_bde64 *)dmabuf->virt;
13044                         for (i = 0; i < numBdes; i++) {
13045                                 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13046                                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13047                                         max_response += bde.tus.f.bdeSize;
13048                         }
13049                         break;
13050                 default:
13051                         max_response = wcqe->total_data_placed;
13052                         break;
13053                 }
13054                 if (max_response < wcqe->total_data_placed)
13055                         pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13056                 else
13057                         pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13058                                 wcqe->total_data_placed;
13059         }
13060
13061         /* Convert BG errors for completion status */
13062         if (status == CQE_STATUS_DI_ERROR) {
13063                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13064
13065                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13066                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13067                 else
13068                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13069
13070                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13071                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13072                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13073                                 BGS_GUARD_ERR_MASK;
13074                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13075                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13076                                 BGS_APPTAG_ERR_MASK;
13077                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13078                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13079                                 BGS_REFTAG_ERR_MASK;
13080
13081                 /* Check to see if there was any good data before the error */
13082                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13083                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13084                                 BGS_HI_WATER_MARK_PRESENT_MASK;
13085                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
13086                                 wcqe->total_data_placed;
13087                 }
13088
13089                 /*
13090                 * Set ALL the error bits to indicate we don't know what
13091                 * type of error it is.
13092                 */
13093                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13094                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13095                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13096                                 BGS_GUARD_ERR_MASK);
13097         }
13098
13099         /* Pick up HBA exchange busy condition */
13100         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13101                 spin_lock_irqsave(&phba->hbalock, iflags);
13102                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13103                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13104         }
13105 }
13106
13107 /**
13108  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13109  * @phba: Pointer to HBA context object.
13110  * @wcqe: Pointer to work-queue completion queue entry.
13111  *
13112  * This routine handles an ELS work-queue completion event and construct
13113  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13114  * discovery engine to handle.
13115  *
13116  * Return: Pointer to the receive IOCBQ, NULL otherwise.
13117  **/
13118 static struct lpfc_iocbq *
13119 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13120                                struct lpfc_iocbq *irspiocbq)
13121 {
13122         struct lpfc_sli_ring *pring;
13123         struct lpfc_iocbq *cmdiocbq;
13124         struct lpfc_wcqe_complete *wcqe;
13125         unsigned long iflags;
13126
13127         pring = lpfc_phba_elsring(phba);
13128         if (unlikely(!pring))
13129                 return NULL;
13130
13131         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13132         spin_lock_irqsave(&pring->ring_lock, iflags);
13133         pring->stats.iocb_event++;
13134         /* Look up the ELS command IOCB and create pseudo response IOCB */
13135         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13136                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13137         if (unlikely(!cmdiocbq)) {
13138                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13139                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13140                                 "0386 ELS complete with no corresponding "
13141                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13142                                 wcqe->word0, wcqe->total_data_placed,
13143                                 wcqe->parameter, wcqe->word3);
13144                 lpfc_sli_release_iocbq(phba, irspiocbq);
13145                 return NULL;
13146         }
13147
13148         /* Put the iocb back on the txcmplq */
13149         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13150         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13151
13152         /* Fake the irspiocbq and copy necessary response information */
13153         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13154
13155         return irspiocbq;
13156 }
13157
13158 inline struct lpfc_cq_event *
13159 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13160 {
13161         struct lpfc_cq_event *cq_event;
13162
13163         /* Allocate a new internal CQ_EVENT entry */
13164         cq_event = lpfc_sli4_cq_event_alloc(phba);
13165         if (!cq_event) {
13166                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13167                                 "0602 Failed to alloc CQ_EVENT entry\n");
13168                 return NULL;
13169         }
13170
13171         /* Move the CQE into the event */
13172         memcpy(&cq_event->cqe, entry, size);
13173         return cq_event;
13174 }
13175
13176 /**
13177  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13178  * @phba: Pointer to HBA context object.
13179  * @cqe: Pointer to mailbox completion queue entry.
13180  *
13181  * This routine process a mailbox completion queue entry with asynchrous
13182  * event.
13183  *
13184  * Return: true if work posted to worker thread, otherwise false.
13185  **/
13186 static bool
13187 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13188 {
13189         struct lpfc_cq_event *cq_event;
13190         unsigned long iflags;
13191
13192         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13193                         "0392 Async Event: word0:x%x, word1:x%x, "
13194                         "word2:x%x, word3:x%x\n", mcqe->word0,
13195                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13196
13197         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13198         if (!cq_event)
13199                 return false;
13200         spin_lock_irqsave(&phba->hbalock, iflags);
13201         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13202         /* Set the async event flag */
13203         phba->hba_flag |= ASYNC_EVENT;
13204         spin_unlock_irqrestore(&phba->hbalock, iflags);
13205
13206         return true;
13207 }
13208
13209 /**
13210  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13211  * @phba: Pointer to HBA context object.
13212  * @cqe: Pointer to mailbox completion queue entry.
13213  *
13214  * This routine process a mailbox completion queue entry with mailbox
13215  * completion event.
13216  *
13217  * Return: true if work posted to worker thread, otherwise false.
13218  **/
13219 static bool
13220 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13221 {
13222         uint32_t mcqe_status;
13223         MAILBOX_t *mbox, *pmbox;
13224         struct lpfc_mqe *mqe;
13225         struct lpfc_vport *vport;
13226         struct lpfc_nodelist *ndlp;
13227         struct lpfc_dmabuf *mp;
13228         unsigned long iflags;
13229         LPFC_MBOXQ_t *pmb;
13230         bool workposted = false;
13231         int rc;
13232
13233         /* If not a mailbox complete MCQE, out by checking mailbox consume */
13234         if (!bf_get(lpfc_trailer_completed, mcqe))
13235                 goto out_no_mqe_complete;
13236
13237         /* Get the reference to the active mbox command */
13238         spin_lock_irqsave(&phba->hbalock, iflags);
13239         pmb = phba->sli.mbox_active;
13240         if (unlikely(!pmb)) {
13241                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13242                                 "1832 No pending MBOX command to handle\n");
13243                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13244                 goto out_no_mqe_complete;
13245         }
13246         spin_unlock_irqrestore(&phba->hbalock, iflags);
13247         mqe = &pmb->u.mqe;
13248         pmbox = (MAILBOX_t *)&pmb->u.mqe;
13249         mbox = phba->mbox;
13250         vport = pmb->vport;
13251
13252         /* Reset heartbeat timer */
13253         phba->last_completion_time = jiffies;
13254         del_timer(&phba->sli.mbox_tmo);
13255
13256         /* Move mbox data to caller's mailbox region, do endian swapping */
13257         if (pmb->mbox_cmpl && mbox)
13258                 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13259
13260         /*
13261          * For mcqe errors, conditionally move a modified error code to
13262          * the mbox so that the error will not be missed.
13263          */
13264         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13265         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13266                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13267                         bf_set(lpfc_mqe_status, mqe,
13268                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
13269         }
13270         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13271                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13272                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13273                                       "MBOX dflt rpi: status:x%x rpi:x%x",
13274                                       mcqe_status,
13275                                       pmbox->un.varWords[0], 0);
13276                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13277                         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13278                         ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13279                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
13280                          * RID of the PPI using the same mbox buffer.
13281                          */
13282                         lpfc_unreg_login(phba, vport->vpi,
13283                                          pmbox->un.varWords[0], pmb);
13284                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13285                         pmb->ctx_buf = mp;
13286                         pmb->ctx_ndlp = ndlp;
13287                         pmb->vport = vport;
13288                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13289                         if (rc != MBX_BUSY)
13290                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13291                                                 LOG_SLI, "0385 rc should "
13292                                                 "have been MBX_BUSY\n");
13293                         if (rc != MBX_NOT_FINISHED)
13294                                 goto send_current_mbox;
13295                 }
13296         }
13297         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13298         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13299         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13300
13301         /* There is mailbox completion work to do */
13302         spin_lock_irqsave(&phba->hbalock, iflags);
13303         __lpfc_mbox_cmpl_put(phba, pmb);
13304         phba->work_ha |= HA_MBATT;
13305         spin_unlock_irqrestore(&phba->hbalock, iflags);
13306         workposted = true;
13307
13308 send_current_mbox:
13309         spin_lock_irqsave(&phba->hbalock, iflags);
13310         /* Release the mailbox command posting token */
13311         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13312         /* Setting active mailbox pointer need to be in sync to flag clear */
13313         phba->sli.mbox_active = NULL;
13314         spin_unlock_irqrestore(&phba->hbalock, iflags);
13315         /* Wake up worker thread to post the next pending mailbox command */
13316         lpfc_worker_wake_up(phba);
13317 out_no_mqe_complete:
13318         if (bf_get(lpfc_trailer_consumed, mcqe))
13319                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13320         return workposted;
13321 }
13322
13323 /**
13324  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13325  * @phba: Pointer to HBA context object.
13326  * @cqe: Pointer to mailbox completion queue entry.
13327  *
13328  * This routine process a mailbox completion queue entry, it invokes the
13329  * proper mailbox complete handling or asynchrous event handling routine
13330  * according to the MCQE's async bit.
13331  *
13332  * Return: true if work posted to worker thread, otherwise false.
13333  **/
13334 static bool
13335 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13336                          struct lpfc_cqe *cqe)
13337 {
13338         struct lpfc_mcqe mcqe;
13339         bool workposted;
13340
13341         cq->CQ_mbox++;
13342
13343         /* Copy the mailbox MCQE and convert endian order as needed */
13344         lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13345
13346         /* Invoke the proper event handling routine */
13347         if (!bf_get(lpfc_trailer_async, &mcqe))
13348                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13349         else
13350                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13351         return workposted;
13352 }
13353
13354 /**
13355  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13356  * @phba: Pointer to HBA context object.
13357  * @cq: Pointer to associated CQ
13358  * @wcqe: Pointer to work-queue completion queue entry.
13359  *
13360  * This routine handles an ELS work-queue completion event.
13361  *
13362  * Return: true if work posted to worker thread, otherwise false.
13363  **/
13364 static bool
13365 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13366                              struct lpfc_wcqe_complete *wcqe)
13367 {
13368         struct lpfc_iocbq *irspiocbq;
13369         unsigned long iflags;
13370         struct lpfc_sli_ring *pring = cq->pring;
13371         int txq_cnt = 0;
13372         int txcmplq_cnt = 0;
13373         int fcp_txcmplq_cnt = 0;
13374
13375         /* Check for response status */
13376         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13377                 /* Log the error status */
13378                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13379                                 "0357 ELS CQE error: status=x%x: "
13380                                 "CQE: %08x %08x %08x %08x\n",
13381                                 bf_get(lpfc_wcqe_c_status, wcqe),
13382                                 wcqe->word0, wcqe->total_data_placed,
13383                                 wcqe->parameter, wcqe->word3);
13384         }
13385
13386         /* Get an irspiocbq for later ELS response processing use */
13387         irspiocbq = lpfc_sli_get_iocbq(phba);
13388         if (!irspiocbq) {
13389                 if (!list_empty(&pring->txq))
13390                         txq_cnt++;
13391                 if (!list_empty(&pring->txcmplq))
13392                         txcmplq_cnt++;
13393                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13394                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13395                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13396                         txq_cnt, phba->iocb_cnt,
13397                         fcp_txcmplq_cnt,
13398                         txcmplq_cnt);
13399                 return false;
13400         }
13401
13402         /* Save off the slow-path queue event for work thread to process */
13403         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13404         spin_lock_irqsave(&phba->hbalock, iflags);
13405         list_add_tail(&irspiocbq->cq_event.list,
13406                       &phba->sli4_hba.sp_queue_event);
13407         phba->hba_flag |= HBA_SP_QUEUE_EVT;
13408         spin_unlock_irqrestore(&phba->hbalock, iflags);
13409
13410         return true;
13411 }
13412
13413 /**
13414  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13415  * @phba: Pointer to HBA context object.
13416  * @wcqe: Pointer to work-queue completion queue entry.
13417  *
13418  * This routine handles slow-path WQ entry consumed event by invoking the
13419  * proper WQ release routine to the slow-path WQ.
13420  **/
13421 static void
13422 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13423                              struct lpfc_wcqe_release *wcqe)
13424 {
13425         /* sanity check on queue memory */
13426         if (unlikely(!phba->sli4_hba.els_wq))
13427                 return;
13428         /* Check for the slow-path ELS work queue */
13429         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13430                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13431                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13432         else
13433                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13434                                 "2579 Slow-path wqe consume event carries "
13435                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13436                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13437                                 phba->sli4_hba.els_wq->queue_id);
13438 }
13439
13440 /**
13441  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13442  * @phba: Pointer to HBA context object.
13443  * @cq: Pointer to a WQ completion queue.
13444  * @wcqe: Pointer to work-queue completion queue entry.
13445  *
13446  * This routine handles an XRI abort event.
13447  *
13448  * Return: true if work posted to worker thread, otherwise false.
13449  **/
13450 static bool
13451 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13452                                    struct lpfc_queue *cq,
13453                                    struct sli4_wcqe_xri_aborted *wcqe)
13454 {
13455         bool workposted = false;
13456         struct lpfc_cq_event *cq_event;
13457         unsigned long iflags;
13458
13459         switch (cq->subtype) {
13460         case LPFC_FCP:
13461                 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13462                 workposted = false;
13463                 break;
13464         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13465         case LPFC_ELS:
13466                 cq_event = lpfc_cq_event_setup(
13467                         phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13468                 if (!cq_event)
13469                         return false;
13470                 cq_event->hdwq = cq->hdwq;
13471                 spin_lock_irqsave(&phba->hbalock, iflags);
13472                 list_add_tail(&cq_event->list,
13473                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13474                 /* Set the els xri abort event flag */
13475                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13476                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13477                 workposted = true;
13478                 break;
13479         case LPFC_NVME:
13480                 /* Notify aborted XRI for NVME work queue */
13481                 if (phba->nvmet_support)
13482                         lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13483                 else
13484                         lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13485
13486                 workposted = false;
13487                 break;
13488         default:
13489                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13490                                 "0603 Invalid CQ subtype %d: "
13491                                 "%08x %08x %08x %08x\n",
13492                                 cq->subtype, wcqe->word0, wcqe->parameter,
13493                                 wcqe->word2, wcqe->word3);
13494                 workposted = false;
13495                 break;
13496         }
13497         return workposted;
13498 }
13499
13500 #define FC_RCTL_MDS_DIAGS       0xF4
13501
13502 /**
13503  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13504  * @phba: Pointer to HBA context object.
13505  * @rcqe: Pointer to receive-queue completion queue entry.
13506  *
13507  * This routine process a receive-queue completion queue entry.
13508  *
13509  * Return: true if work posted to worker thread, otherwise false.
13510  **/
13511 static bool
13512 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13513 {
13514         bool workposted = false;
13515         struct fc_frame_header *fc_hdr;
13516         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13517         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13518         struct lpfc_nvmet_tgtport *tgtp;
13519         struct hbq_dmabuf *dma_buf;
13520         uint32_t status, rq_id;
13521         unsigned long iflags;
13522
13523         /* sanity check on queue memory */
13524         if (unlikely(!hrq) || unlikely(!drq))
13525                 return workposted;
13526
13527         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13528                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13529         else
13530                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13531         if (rq_id != hrq->queue_id)
13532                 goto out;
13533
13534         status = bf_get(lpfc_rcqe_status, rcqe);
13535         switch (status) {
13536         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13537                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13538                                 "2537 Receive Frame Truncated!!\n");
13539                 /* fall through */
13540         case FC_STATUS_RQ_SUCCESS:
13541                 spin_lock_irqsave(&phba->hbalock, iflags);
13542                 lpfc_sli4_rq_release(hrq, drq);
13543                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13544                 if (!dma_buf) {
13545                         hrq->RQ_no_buf_found++;
13546                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13547                         goto out;
13548                 }
13549                 hrq->RQ_rcv_buf++;
13550                 hrq->RQ_buf_posted--;
13551                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13552
13553                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13554
13555                 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13556                     fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13557                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13558                         /* Handle MDS Loopback frames */
13559                         lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13560                         break;
13561                 }
13562
13563                 /* save off the frame for the work thread to process */
13564                 list_add_tail(&dma_buf->cq_event.list,
13565                               &phba->sli4_hba.sp_queue_event);
13566                 /* Frame received */
13567                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13568                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13569                 workposted = true;
13570                 break;
13571         case FC_STATUS_INSUFF_BUF_FRM_DISC:
13572                 if (phba->nvmet_support) {
13573                         tgtp = phba->targetport->private;
13574                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13575                                         "6402 RQE Error x%x, posted %d err_cnt "
13576                                         "%d: %x %x %x\n",
13577                                         status, hrq->RQ_buf_posted,
13578                                         hrq->RQ_no_posted_buf,
13579                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
13580                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
13581                                         atomic_read(&tgtp->xmt_fcp_release));
13582                 }
13583                 /* fallthrough */
13584
13585         case FC_STATUS_INSUFF_BUF_NEED_BUF:
13586                 hrq->RQ_no_posted_buf++;
13587                 /* Post more buffers if possible */
13588                 spin_lock_irqsave(&phba->hbalock, iflags);
13589                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13590                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13591                 workposted = true;
13592                 break;
13593         }
13594 out:
13595         return workposted;
13596 }
13597
13598 /**
13599  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13600  * @phba: Pointer to HBA context object.
13601  * @cq: Pointer to the completion queue.
13602  * @cqe: Pointer to a completion queue entry.
13603  *
13604  * This routine process a slow-path work-queue or receive queue completion queue
13605  * entry.
13606  *
13607  * Return: true if work posted to worker thread, otherwise false.
13608  **/
13609 static bool
13610 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13611                          struct lpfc_cqe *cqe)
13612 {
13613         struct lpfc_cqe cqevt;
13614         bool workposted = false;
13615
13616         /* Copy the work queue CQE and convert endian order if needed */
13617         lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13618
13619         /* Check and process for different type of WCQE and dispatch */
13620         switch (bf_get(lpfc_cqe_code, &cqevt)) {
13621         case CQE_CODE_COMPL_WQE:
13622                 /* Process the WQ/RQ complete event */
13623                 phba->last_completion_time = jiffies;
13624                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13625                                 (struct lpfc_wcqe_complete *)&cqevt);
13626                 break;
13627         case CQE_CODE_RELEASE_WQE:
13628                 /* Process the WQ release event */
13629                 lpfc_sli4_sp_handle_rel_wcqe(phba,
13630                                 (struct lpfc_wcqe_release *)&cqevt);
13631                 break;
13632         case CQE_CODE_XRI_ABORTED:
13633                 /* Process the WQ XRI abort event */
13634                 phba->last_completion_time = jiffies;
13635                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13636                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
13637                 break;
13638         case CQE_CODE_RECEIVE:
13639         case CQE_CODE_RECEIVE_V1:
13640                 /* Process the RQ event */
13641                 phba->last_completion_time = jiffies;
13642                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13643                                 (struct lpfc_rcqe *)&cqevt);
13644                 break;
13645         default:
13646                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13647                                 "0388 Not a valid WCQE code: x%x\n",
13648                                 bf_get(lpfc_cqe_code, &cqevt));
13649                 break;
13650         }
13651         return workposted;
13652 }
13653
13654 /**
13655  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13656  * @phba: Pointer to HBA context object.
13657  * @eqe: Pointer to fast-path event queue entry.
13658  *
13659  * This routine process a event queue entry from the slow-path event queue.
13660  * It will check the MajorCode and MinorCode to determine this is for a
13661  * completion event on a completion queue, if not, an error shall be logged
13662  * and just return. Otherwise, it will get to the corresponding completion
13663  * queue and process all the entries on that completion queue, rearm the
13664  * completion queue, and then return.
13665  *
13666  **/
13667 static void
13668 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13669         struct lpfc_queue *speq)
13670 {
13671         struct lpfc_queue *cq = NULL, *childq;
13672         uint16_t cqid;
13673
13674         /* Get the reference to the corresponding CQ */
13675         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13676
13677         list_for_each_entry(childq, &speq->child_list, list) {
13678                 if (childq->queue_id == cqid) {
13679                         cq = childq;
13680                         break;
13681                 }
13682         }
13683         if (unlikely(!cq)) {
13684                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13685                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13686                                         "0365 Slow-path CQ identifier "
13687                                         "(%d) does not exist\n", cqid);
13688                 return;
13689         }
13690
13691         /* Save EQ associated with this CQ */
13692         cq->assoc_qp = speq;
13693
13694         if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13695                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13696                                 "0390 Cannot schedule soft IRQ "
13697                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13698                                 cqid, cq->queue_id, smp_processor_id());
13699 }
13700
13701 /**
13702  * __lpfc_sli4_process_cq - Process elements of a CQ
13703  * @phba: Pointer to HBA context object.
13704  * @cq: Pointer to CQ to be processed
13705  * @handler: Routine to process each cqe
13706  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13707  *
13708  * This routine processes completion queue entries in a CQ. While a valid
13709  * queue element is found, the handler is called. During processing checks
13710  * are made for periodic doorbell writes to let the hardware know of
13711  * element consumption.
13712  *
13713  * If the max limit on cqes to process is hit, or there are no more valid
13714  * entries, the loop stops. If we processed a sufficient number of elements,
13715  * meaning there is sufficient load, rather than rearming and generating
13716  * another interrupt, a cq rescheduling delay will be set. A delay of 0
13717  * indicates no rescheduling.
13718  *
13719  * Returns True if work scheduled, False otherwise.
13720  **/
13721 static bool
13722 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13723         bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13724                         struct lpfc_cqe *), unsigned long *delay)
13725 {
13726         struct lpfc_cqe *cqe;
13727         bool workposted = false;
13728         int count = 0, consumed = 0;
13729         bool arm = true;
13730
13731         /* default - no reschedule */
13732         *delay = 0;
13733
13734         if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13735                 goto rearm_and_exit;
13736
13737         /* Process all the entries to the CQ */
13738         cqe = lpfc_sli4_cq_get(cq);
13739         while (cqe) {
13740 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
13741                 if (phba->ktime_on)
13742                         cq->isr_timestamp = ktime_get_ns();
13743                 else
13744                         cq->isr_timestamp = 0;
13745 #endif
13746                 workposted |= handler(phba, cq, cqe);
13747                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13748
13749                 consumed++;
13750                 if (!(++count % cq->max_proc_limit))
13751                         break;
13752
13753                 if (!(count % cq->notify_interval)) {
13754                         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13755                                                 LPFC_QUEUE_NOARM);
13756                         consumed = 0;
13757                 }
13758
13759                 cqe = lpfc_sli4_cq_get(cq);
13760         }
13761         if (count >= phba->cfg_cq_poll_threshold) {
13762                 *delay = 1;
13763                 arm = false;
13764         }
13765
13766         /* Track the max number of CQEs processed in 1 EQ */
13767         if (count > cq->CQ_max_cqe)
13768                 cq->CQ_max_cqe = count;
13769
13770         cq->assoc_qp->EQ_cqe_cnt += count;
13771
13772         /* Catch the no cq entry condition */
13773         if (unlikely(count == 0))
13774                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13775                                 "0369 No entry from completion queue "
13776                                 "qid=%d\n", cq->queue_id);
13777
13778         cq->queue_claimed = 0;
13779
13780 rearm_and_exit:
13781         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13782                         arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13783
13784         return workposted;
13785 }
13786
13787 /**
13788  * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13789  * @cq: pointer to CQ to process
13790  *
13791  * This routine calls the cq processing routine with a handler specific
13792  * to the type of queue bound to it.
13793  *
13794  * The CQ routine returns two values: the first is the calling status,
13795  * which indicates whether work was queued to the  background discovery
13796  * thread. If true, the routine should wakeup the discovery thread;
13797  * the second is the delay parameter. If non-zero, rather than rearming
13798  * the CQ and yet another interrupt, the CQ handler should be queued so
13799  * that it is processed in a subsequent polling action. The value of
13800  * the delay indicates when to reschedule it.
13801  **/
13802 static void
13803 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13804 {
13805         struct lpfc_hba *phba = cq->phba;
13806         unsigned long delay;
13807         bool workposted = false;
13808
13809         /* Process and rearm the CQ */
13810         switch (cq->type) {
13811         case LPFC_MCQ:
13812                 workposted |= __lpfc_sli4_process_cq(phba, cq,
13813                                                 lpfc_sli4_sp_handle_mcqe,
13814                                                 &delay);
13815                 break;
13816         case LPFC_WCQ:
13817                 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13818                         workposted |= __lpfc_sli4_process_cq(phba, cq,
13819                                                 lpfc_sli4_fp_handle_cqe,
13820                                                 &delay);
13821                 else
13822                         workposted |= __lpfc_sli4_process_cq(phba, cq,
13823                                                 lpfc_sli4_sp_handle_cqe,
13824                                                 &delay);
13825                 break;
13826         default:
13827                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13828                                 "0370 Invalid completion queue type (%d)\n",
13829                                 cq->type);
13830                 return;
13831         }
13832
13833         if (delay) {
13834                 if (!queue_delayed_work_on(cq->chann, phba->wq,
13835                                            &cq->sched_spwork, delay))
13836                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13837                                 "0394 Cannot schedule soft IRQ "
13838                                 "for cqid=%d on CPU %d\n",
13839                                 cq->queue_id, cq->chann);
13840         }
13841
13842         /* wake up worker thread if there are works to be done */
13843         if (workposted)
13844                 lpfc_worker_wake_up(phba);
13845 }
13846
13847 /**
13848  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13849  *   interrupt
13850  * @work: pointer to work element
13851  *
13852  * translates from the work handler and calls the slow-path handler.
13853  **/
13854 static void
13855 lpfc_sli4_sp_process_cq(struct work_struct *work)
13856 {
13857         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13858
13859         __lpfc_sli4_sp_process_cq(cq);
13860 }
13861
13862 /**
13863  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13864  * @work: pointer to work element
13865  *
13866  * translates from the work handler and calls the slow-path handler.
13867  **/
13868 static void
13869 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13870 {
13871         struct lpfc_queue *cq = container_of(to_delayed_work(work),
13872                                         struct lpfc_queue, sched_spwork);
13873
13874         __lpfc_sli4_sp_process_cq(cq);
13875 }
13876
13877 /**
13878  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13879  * @phba: Pointer to HBA context object.
13880  * @cq: Pointer to associated CQ
13881  * @wcqe: Pointer to work-queue completion queue entry.
13882  *
13883  * This routine process a fast-path work queue completion entry from fast-path
13884  * event queue for FCP command response completion.
13885  **/
13886 static void
13887 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13888                              struct lpfc_wcqe_complete *wcqe)
13889 {
13890         struct lpfc_sli_ring *pring = cq->pring;
13891         struct lpfc_iocbq *cmdiocbq;
13892         struct lpfc_iocbq irspiocbq;
13893         unsigned long iflags;
13894
13895         /* Check for response status */
13896         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13897                 /* If resource errors reported from HBA, reduce queue
13898                  * depth of the SCSI device.
13899                  */
13900                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13901                      IOSTAT_LOCAL_REJECT)) &&
13902                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
13903                      IOERR_NO_RESOURCES))
13904                         phba->lpfc_rampdown_queue_depth(phba);
13905
13906                 /* Log the error status */
13907                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13908                                 "0373 FCP CQE error: status=x%x: "
13909                                 "CQE: %08x %08x %08x %08x\n",
13910                                 bf_get(lpfc_wcqe_c_status, wcqe),
13911                                 wcqe->word0, wcqe->total_data_placed,
13912                                 wcqe->parameter, wcqe->word3);
13913         }
13914
13915         /* Look up the FCP command IOCB and create pseudo response IOCB */
13916         spin_lock_irqsave(&pring->ring_lock, iflags);
13917         pring->stats.iocb_event++;
13918         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13919                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13920         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13921         if (unlikely(!cmdiocbq)) {
13922                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13923                                 "0374 FCP complete with no corresponding "
13924                                 "cmdiocb: iotag (%d)\n",
13925                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13926                 return;
13927         }
13928 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13929         cmdiocbq->isr_timestamp = cq->isr_timestamp;
13930 #endif
13931         if (cmdiocbq->iocb_cmpl == NULL) {
13932                 if (cmdiocbq->wqe_cmpl) {
13933                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13934                                 spin_lock_irqsave(&phba->hbalock, iflags);
13935                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13936                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13937                         }
13938
13939                         /* Pass the cmd_iocb and the wcqe to the upper layer */
13940                         (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13941                         return;
13942                 }
13943                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13944                                 "0375 FCP cmdiocb not callback function "
13945                                 "iotag: (%d)\n",
13946                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13947                 return;
13948         }
13949
13950         /* Fake the irspiocb and copy necessary response information */
13951         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13952
13953         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13954                 spin_lock_irqsave(&phba->hbalock, iflags);
13955                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13956                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13957         }
13958
13959         /* Pass the cmd_iocb and the rsp state to the upper layer */
13960         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13961 }
13962
13963 /**
13964  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13965  * @phba: Pointer to HBA context object.
13966  * @cq: Pointer to completion queue.
13967  * @wcqe: Pointer to work-queue completion queue entry.
13968  *
13969  * This routine handles an fast-path WQ entry consumed event by invoking the
13970  * proper WQ release routine to the slow-path WQ.
13971  **/
13972 static void
13973 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13974                              struct lpfc_wcqe_release *wcqe)
13975 {
13976         struct lpfc_queue *childwq;
13977         bool wqid_matched = false;
13978         uint16_t hba_wqid;
13979
13980         /* Check for fast-path FCP work queue release */
13981         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13982         list_for_each_entry(childwq, &cq->child_list, list) {
13983                 if (childwq->queue_id == hba_wqid) {
13984                         lpfc_sli4_wq_release(childwq,
13985                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13986                         if (childwq->q_flag & HBA_NVMET_WQFULL)
13987                                 lpfc_nvmet_wqfull_process(phba, childwq);
13988                         wqid_matched = true;
13989                         break;
13990                 }
13991         }
13992         /* Report warning log message if no match found */
13993         if (wqid_matched != true)
13994                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13995                                 "2580 Fast-path wqe consume event carries "
13996                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13997 }
13998
13999 /**
14000  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14001  * @phba: Pointer to HBA context object.
14002  * @rcqe: Pointer to receive-queue completion queue entry.
14003  *
14004  * This routine process a receive-queue completion queue entry.
14005  *
14006  * Return: true if work posted to worker thread, otherwise false.
14007  **/
14008 static bool
14009 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14010                             struct lpfc_rcqe *rcqe)
14011 {
14012         bool workposted = false;
14013         struct lpfc_queue *hrq;
14014         struct lpfc_queue *drq;
14015         struct rqb_dmabuf *dma_buf;
14016         struct fc_frame_header *fc_hdr;
14017         struct lpfc_nvmet_tgtport *tgtp;
14018         uint32_t status, rq_id;
14019         unsigned long iflags;
14020         uint32_t fctl, idx;
14021
14022         if ((phba->nvmet_support == 0) ||
14023             (phba->sli4_hba.nvmet_cqset == NULL))
14024                 return workposted;
14025
14026         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14027         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14028         drq = phba->sli4_hba.nvmet_mrq_data[idx];
14029
14030         /* sanity check on queue memory */
14031         if (unlikely(!hrq) || unlikely(!drq))
14032                 return workposted;
14033
14034         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14035                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14036         else
14037                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14038
14039         if ((phba->nvmet_support == 0) ||
14040             (rq_id != hrq->queue_id))
14041                 return workposted;
14042
14043         status = bf_get(lpfc_rcqe_status, rcqe);
14044         switch (status) {
14045         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14046                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14047                                 "6126 Receive Frame Truncated!!\n");
14048                 /* fall through */
14049         case FC_STATUS_RQ_SUCCESS:
14050                 spin_lock_irqsave(&phba->hbalock, iflags);
14051                 lpfc_sli4_rq_release(hrq, drq);
14052                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14053                 if (!dma_buf) {
14054                         hrq->RQ_no_buf_found++;
14055                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14056                         goto out;
14057                 }
14058                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14059                 hrq->RQ_rcv_buf++;
14060                 hrq->RQ_buf_posted--;
14061                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14062
14063                 /* Just some basic sanity checks on FCP Command frame */
14064                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14065                 fc_hdr->fh_f_ctl[1] << 8 |
14066                 fc_hdr->fh_f_ctl[2]);
14067                 if (((fctl &
14068                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14069                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14070                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14071                         goto drop;
14072
14073                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14074                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
14075                         lpfc_nvmet_unsol_fcp_event(
14076                                 phba, idx, dma_buf,
14077                                 cq->isr_timestamp);
14078                         return false;
14079                 }
14080 drop:
14081                 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14082                 break;
14083         case FC_STATUS_INSUFF_BUF_FRM_DISC:
14084                 if (phba->nvmet_support) {
14085                         tgtp = phba->targetport->private;
14086                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14087                                         "6401 RQE Error x%x, posted %d err_cnt "
14088                                         "%d: %x %x %x\n",
14089                                         status, hrq->RQ_buf_posted,
14090                                         hrq->RQ_no_posted_buf,
14091                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
14092                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
14093                                         atomic_read(&tgtp->xmt_fcp_release));
14094                 }
14095                 /* fallthrough */
14096
14097         case FC_STATUS_INSUFF_BUF_NEED_BUF:
14098                 hrq->RQ_no_posted_buf++;
14099                 /* Post more buffers if possible */
14100                 break;
14101         }
14102 out:
14103         return workposted;
14104 }
14105
14106 /**
14107  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14108  * @phba: adapter with cq
14109  * @cq: Pointer to the completion queue.
14110  * @eqe: Pointer to fast-path completion queue entry.
14111  *
14112  * This routine process a fast-path work queue completion entry from fast-path
14113  * event queue for FCP command response completion.
14114  *
14115  * Return: true if work posted to worker thread, otherwise false.
14116  **/
14117 static bool
14118 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14119                          struct lpfc_cqe *cqe)
14120 {
14121         struct lpfc_wcqe_release wcqe;
14122         bool workposted = false;
14123
14124         /* Copy the work queue CQE and convert endian order if needed */
14125         lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14126
14127         /* Check and process for different type of WCQE and dispatch */
14128         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14129         case CQE_CODE_COMPL_WQE:
14130         case CQE_CODE_NVME_ERSP:
14131                 cq->CQ_wq++;
14132                 /* Process the WQ complete event */
14133                 phba->last_completion_time = jiffies;
14134                 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14135                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14136                                 (struct lpfc_wcqe_complete *)&wcqe);
14137                 if (cq->subtype == LPFC_NVME_LS)
14138                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14139                                 (struct lpfc_wcqe_complete *)&wcqe);
14140                 break;
14141         case CQE_CODE_RELEASE_WQE:
14142                 cq->CQ_release_wqe++;
14143                 /* Process the WQ release event */
14144                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14145                                 (struct lpfc_wcqe_release *)&wcqe);
14146                 break;
14147         case CQE_CODE_XRI_ABORTED:
14148                 cq->CQ_xri_aborted++;
14149                 /* Process the WQ XRI abort event */
14150                 phba->last_completion_time = jiffies;
14151                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14152                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
14153                 break;
14154         case CQE_CODE_RECEIVE_V1:
14155         case CQE_CODE_RECEIVE:
14156                 phba->last_completion_time = jiffies;
14157                 if (cq->subtype == LPFC_NVMET) {
14158                         workposted = lpfc_sli4_nvmet_handle_rcqe(
14159                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
14160                 }
14161                 break;
14162         default:
14163                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14164                                 "0144 Not a valid CQE code: x%x\n",
14165                                 bf_get(lpfc_wcqe_c_code, &wcqe));
14166                 break;
14167         }
14168         return workposted;
14169 }
14170
14171 /**
14172  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14173  * @phba: Pointer to HBA context object.
14174  * @eqe: Pointer to fast-path event queue entry.
14175  *
14176  * This routine process a event queue entry from the fast-path event queue.
14177  * It will check the MajorCode and MinorCode to determine this is for a
14178  * completion event on a completion queue, if not, an error shall be logged
14179  * and just return. Otherwise, it will get to the corresponding completion
14180  * queue and process all the entries on the completion queue, rearm the
14181  * completion queue, and then return.
14182  **/
14183 static void
14184 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14185                          struct lpfc_eqe *eqe)
14186 {
14187         struct lpfc_queue *cq = NULL;
14188         uint32_t qidx = eq->hdwq;
14189         uint16_t cqid, id;
14190
14191         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14192                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14193                                 "0366 Not a valid completion "
14194                                 "event: majorcode=x%x, minorcode=x%x\n",
14195                                 bf_get_le32(lpfc_eqe_major_code, eqe),
14196                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
14197                 return;
14198         }
14199
14200         /* Get the reference to the corresponding CQ */
14201         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14202
14203         /* Use the fast lookup method first */
14204         if (cqid <= phba->sli4_hba.cq_max) {
14205                 cq = phba->sli4_hba.cq_lookup[cqid];
14206                 if (cq)
14207                         goto  work_cq;
14208         }
14209
14210         /* Next check for NVMET completion */
14211         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14212                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14213                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14214                         /* Process NVMET unsol rcv */
14215                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14216                         goto  process_cq;
14217                 }
14218         }
14219
14220         if (phba->sli4_hba.nvmels_cq &&
14221             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14222                 /* Process NVME unsol rcv */
14223                 cq = phba->sli4_hba.nvmels_cq;
14224         }
14225
14226         /* Otherwise this is a Slow path event */
14227         if (cq == NULL) {
14228                 lpfc_sli4_sp_handle_eqe(phba, eqe,
14229                                         phba->sli4_hba.hdwq[qidx].hba_eq);
14230                 return;
14231         }
14232
14233 process_cq:
14234         if (unlikely(cqid != cq->queue_id)) {
14235                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14236                                 "0368 Miss-matched fast-path completion "
14237                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14238                                 cqid, cq->queue_id);
14239                 return;
14240         }
14241
14242 work_cq:
14243         if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14244                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14245                                 "0363 Cannot schedule soft IRQ "
14246                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14247                                 cqid, cq->queue_id, smp_processor_id());
14248 }
14249
14250 /**
14251  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14252  * @cq: Pointer to CQ to be processed
14253  *
14254  * This routine calls the cq processing routine with the handler for
14255  * fast path CQEs.
14256  *
14257  * The CQ routine returns two values: the first is the calling status,
14258  * which indicates whether work was queued to the  background discovery
14259  * thread. If true, the routine should wakeup the discovery thread;
14260  * the second is the delay parameter. If non-zero, rather than rearming
14261  * the CQ and yet another interrupt, the CQ handler should be queued so
14262  * that it is processed in a subsequent polling action. The value of
14263  * the delay indicates when to reschedule it.
14264  **/
14265 static void
14266 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14267 {
14268         struct lpfc_hba *phba = cq->phba;
14269         unsigned long delay;
14270         bool workposted = false;
14271
14272         /* process and rearm the CQ */
14273         workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14274                                              &delay);
14275
14276         if (delay) {
14277                 if (!queue_delayed_work_on(cq->chann, phba->wq,
14278                                            &cq->sched_irqwork, delay))
14279                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14280                                 "0367 Cannot schedule soft IRQ "
14281                                 "for cqid=%d on CPU %d\n",
14282                                 cq->queue_id, cq->chann);
14283         }
14284
14285         /* wake up worker thread if there are works to be done */
14286         if (workposted)
14287                 lpfc_worker_wake_up(phba);
14288 }
14289
14290 /**
14291  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14292  *   interrupt
14293  * @work: pointer to work element
14294  *
14295  * translates from the work handler and calls the fast-path handler.
14296  **/
14297 static void
14298 lpfc_sli4_hba_process_cq(struct work_struct *work)
14299 {
14300         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14301
14302         __lpfc_sli4_hba_process_cq(cq);
14303 }
14304
14305 /**
14306  * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14307  * @work: pointer to work element
14308  *
14309  * translates from the work handler and calls the fast-path handler.
14310  **/
14311 static void
14312 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14313 {
14314         struct lpfc_queue *cq = container_of(to_delayed_work(work),
14315                                         struct lpfc_queue, sched_irqwork);
14316
14317         __lpfc_sli4_hba_process_cq(cq);
14318 }
14319
14320 /**
14321  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14322  * @irq: Interrupt number.
14323  * @dev_id: The device context pointer.
14324  *
14325  * This function is directly called from the PCI layer as an interrupt
14326  * service routine when device with SLI-4 interface spec is enabled with
14327  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14328  * ring event in the HBA. However, when the device is enabled with either
14329  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14330  * device-level interrupt handler. When the PCI slot is in error recovery
14331  * or the HBA is undergoing initialization, the interrupt handler will not
14332  * process the interrupt. The SCSI FCP fast-path ring event are handled in
14333  * the intrrupt context. This function is called without any lock held.
14334  * It gets the hbalock to access and update SLI data structures. Note that,
14335  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14336  * equal to that of FCP CQ index.
14337  *
14338  * The link attention and ELS ring attention events are handled
14339  * by the worker thread. The interrupt handler signals the worker thread
14340  * and returns for these events. This function is called without any lock
14341  * held. It gets the hbalock to access and update SLI data structures.
14342  *
14343  * This function returns IRQ_HANDLED when interrupt is handled else it
14344  * returns IRQ_NONE.
14345  **/
14346 irqreturn_t
14347 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14348 {
14349         struct lpfc_hba *phba;
14350         struct lpfc_hba_eq_hdl *hba_eq_hdl;
14351         struct lpfc_queue *fpeq;
14352         unsigned long iflag;
14353         int ecount = 0;
14354         int hba_eqidx;
14355         struct lpfc_eq_intr_info *eqi;
14356         uint32_t icnt;
14357
14358         /* Get the driver's phba structure from the dev_id */
14359         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14360         phba = hba_eq_hdl->phba;
14361         hba_eqidx = hba_eq_hdl->idx;
14362
14363         if (unlikely(!phba))
14364                 return IRQ_NONE;
14365         if (unlikely(!phba->sli4_hba.hdwq))
14366                 return IRQ_NONE;
14367
14368         /* Get to the EQ struct associated with this vector */
14369         fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
14370         if (unlikely(!fpeq))
14371                 return IRQ_NONE;
14372
14373         /* Check device state for handling interrupt */
14374         if (unlikely(lpfc_intr_state_check(phba))) {
14375                 /* Check again for link_state with lock held */
14376                 spin_lock_irqsave(&phba->hbalock, iflag);
14377                 if (phba->link_state < LPFC_LINK_DOWN)
14378                         /* Flush, clear interrupt, and rearm the EQ */
14379                         lpfc_sli4_eq_flush(phba, fpeq);
14380                 spin_unlock_irqrestore(&phba->hbalock, iflag);
14381                 return IRQ_NONE;
14382         }
14383
14384         eqi = phba->sli4_hba.eq_info;
14385         icnt = this_cpu_inc_return(eqi->icnt);
14386         fpeq->last_cpu = smp_processor_id();
14387
14388         if (icnt > LPFC_EQD_ISR_TRIGGER &&
14389             phba->cfg_irq_chann == 1 &&
14390             phba->cfg_auto_imax &&
14391             fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14392             phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14393                 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14394
14395         /* process and rearm the EQ */
14396         ecount = lpfc_sli4_process_eq(phba, fpeq);
14397
14398         if (unlikely(ecount == 0)) {
14399                 fpeq->EQ_no_entry++;
14400                 if (phba->intr_type == MSIX)
14401                         /* MSI-X treated interrupt served as no EQ share INT */
14402                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14403                                         "0358 MSI-X interrupt with no EQE\n");
14404                 else
14405                         /* Non MSI-X treated on interrupt as EQ share INT */
14406                         return IRQ_NONE;
14407         }
14408
14409         return IRQ_HANDLED;
14410 } /* lpfc_sli4_fp_intr_handler */
14411
14412 /**
14413  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14414  * @irq: Interrupt number.
14415  * @dev_id: The device context pointer.
14416  *
14417  * This function is the device-level interrupt handler to device with SLI-4
14418  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14419  * interrupt mode is enabled and there is an event in the HBA which requires
14420  * driver attention. This function invokes the slow-path interrupt attention
14421  * handling function and fast-path interrupt attention handling function in
14422  * turn to process the relevant HBA attention events. This function is called
14423  * without any lock held. It gets the hbalock to access and update SLI data
14424  * structures.
14425  *
14426  * This function returns IRQ_HANDLED when interrupt is handled, else it
14427  * returns IRQ_NONE.
14428  **/
14429 irqreturn_t
14430 lpfc_sli4_intr_handler(int irq, void *dev_id)
14431 {
14432         struct lpfc_hba  *phba;
14433         irqreturn_t hba_irq_rc;
14434         bool hba_handled = false;
14435         int qidx;
14436
14437         /* Get the driver's phba structure from the dev_id */
14438         phba = (struct lpfc_hba *)dev_id;
14439
14440         if (unlikely(!phba))
14441                 return IRQ_NONE;
14442
14443         /*
14444          * Invoke fast-path host attention interrupt handling as appropriate.
14445          */
14446         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14447                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14448                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
14449                 if (hba_irq_rc == IRQ_HANDLED)
14450                         hba_handled |= true;
14451         }
14452
14453         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14454 } /* lpfc_sli4_intr_handler */
14455
14456 /**
14457  * lpfc_sli4_queue_free - free a queue structure and associated memory
14458  * @queue: The queue structure to free.
14459  *
14460  * This function frees a queue structure and the DMAable memory used for
14461  * the host resident queue. This function must be called after destroying the
14462  * queue on the HBA.
14463  **/
14464 void
14465 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14466 {
14467         struct lpfc_dmabuf *dmabuf;
14468
14469         if (!queue)
14470                 return;
14471
14472         if (!list_empty(&queue->wq_list))
14473                 list_del(&queue->wq_list);
14474
14475         while (!list_empty(&queue->page_list)) {
14476                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14477                                  list);
14478                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14479                                   dmabuf->virt, dmabuf->phys);
14480                 kfree(dmabuf);
14481         }
14482         if (queue->rqbp) {
14483                 lpfc_free_rq_buffer(queue->phba, queue);
14484                 kfree(queue->rqbp);
14485         }
14486
14487         if (!list_empty(&queue->cpu_list))
14488                 list_del(&queue->cpu_list);
14489
14490         kfree(queue);
14491         return;
14492 }
14493
14494 /**
14495  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14496  * @phba: The HBA that this queue is being created on.
14497  * @page_size: The size of a queue page
14498  * @entry_size: The size of each queue entry for this queue.
14499  * @entry count: The number of entries that this queue will handle.
14500  * @cpu: The cpu that will primarily utilize this queue.
14501  *
14502  * This function allocates a queue structure and the DMAable memory used for
14503  * the host resident queue. This function must be called before creating the
14504  * queue on the HBA.
14505  **/
14506 struct lpfc_queue *
14507 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14508                       uint32_t entry_size, uint32_t entry_count, int cpu)
14509 {
14510         struct lpfc_queue *queue;
14511         struct lpfc_dmabuf *dmabuf;
14512         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14513         uint16_t x, pgcnt;
14514
14515         if (!phba->sli4_hba.pc_sli4_params.supported)
14516                 hw_page_size = page_size;
14517
14518         pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14519
14520         /* If needed, Adjust page count to match the max the adapter supports */
14521         if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14522                 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14523
14524         queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14525                              GFP_KERNEL, cpu_to_node(cpu));
14526         if (!queue)
14527                 return NULL;
14528
14529         INIT_LIST_HEAD(&queue->list);
14530         INIT_LIST_HEAD(&queue->wq_list);
14531         INIT_LIST_HEAD(&queue->wqfull_list);
14532         INIT_LIST_HEAD(&queue->page_list);
14533         INIT_LIST_HEAD(&queue->child_list);
14534         INIT_LIST_HEAD(&queue->cpu_list);
14535
14536         /* Set queue parameters now.  If the system cannot provide memory
14537          * resources, the free routine needs to know what was allocated.
14538          */
14539         queue->page_count = pgcnt;
14540         queue->q_pgs = (void **)&queue[1];
14541         queue->entry_cnt_per_pg = hw_page_size / entry_size;
14542         queue->entry_size = entry_size;
14543         queue->entry_count = entry_count;
14544         queue->page_size = hw_page_size;
14545         queue->phba = phba;
14546
14547         for (x = 0; x < queue->page_count; x++) {
14548                 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14549                                       dev_to_node(&phba->pcidev->dev));
14550                 if (!dmabuf)
14551                         goto out_fail;
14552                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14553                                                   hw_page_size, &dmabuf->phys,
14554                                                   GFP_KERNEL);
14555                 if (!dmabuf->virt) {
14556                         kfree(dmabuf);
14557                         goto out_fail;
14558                 }
14559                 dmabuf->buffer_tag = x;
14560                 list_add_tail(&dmabuf->list, &queue->page_list);
14561                 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14562                 queue->q_pgs[x] = dmabuf->virt;
14563         }
14564         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14565         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14566         INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14567         INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14568
14569         /* notify_interval will be set during q creation */
14570
14571         return queue;
14572 out_fail:
14573         lpfc_sli4_queue_free(queue);
14574         return NULL;
14575 }
14576
14577 /**
14578  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14579  * @phba: HBA structure that indicates port to create a queue on.
14580  * @pci_barset: PCI BAR set flag.
14581  *
14582  * This function shall perform iomap of the specified PCI BAR address to host
14583  * memory address if not already done so and return it. The returned host
14584  * memory address can be NULL.
14585  */
14586 static void __iomem *
14587 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14588 {
14589         if (!phba->pcidev)
14590                 return NULL;
14591
14592         switch (pci_barset) {
14593         case WQ_PCI_BAR_0_AND_1:
14594                 return phba->pci_bar0_memmap_p;
14595         case WQ_PCI_BAR_2_AND_3:
14596                 return phba->pci_bar2_memmap_p;
14597         case WQ_PCI_BAR_4_AND_5:
14598                 return phba->pci_bar4_memmap_p;
14599         default:
14600                 break;
14601         }
14602         return NULL;
14603 }
14604
14605 /**
14606  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14607  * @phba: HBA structure that EQs are on.
14608  * @startq: The starting EQ index to modify
14609  * @numq: The number of EQs (consecutive indexes) to modify
14610  * @usdelay: amount of delay
14611  *
14612  * This function revises the EQ delay on 1 or more EQs. The EQ delay
14613  * is set either by writing to a register (if supported by the SLI Port)
14614  * or by mailbox command. The mailbox command allows several EQs to be
14615  * updated at once.
14616  *
14617  * The @phba struct is used to send a mailbox command to HBA. The @startq
14618  * is used to get the starting EQ index to change. The @numq value is
14619  * used to specify how many consecutive EQ indexes, starting at EQ index,
14620  * are to be changed. This function is asynchronous and will wait for any
14621  * mailbox commands to finish before returning.
14622  *
14623  * On success this function will return a zero. If unable to allocate
14624  * enough memory this function will return -ENOMEM. If a mailbox command
14625  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14626  * have had their delay multipler changed.
14627  **/
14628 void
14629 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14630                          uint32_t numq, uint32_t usdelay)
14631 {
14632         struct lpfc_mbx_modify_eq_delay *eq_delay;
14633         LPFC_MBOXQ_t *mbox;
14634         struct lpfc_queue *eq;
14635         int cnt = 0, rc, length;
14636         uint32_t shdr_status, shdr_add_status;
14637         uint32_t dmult;
14638         int qidx;
14639         union lpfc_sli4_cfg_shdr *shdr;
14640
14641         if (startq >= phba->cfg_irq_chann)
14642                 return;
14643
14644         if (usdelay > 0xFFFF) {
14645                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14646                                 "6429 usdelay %d too large. Scaled down to "
14647                                 "0xFFFF.\n", usdelay);
14648                 usdelay = 0xFFFF;
14649         }
14650
14651         /* set values by EQ_DELAY register if supported */
14652         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14653                 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14654                         eq = phba->sli4_hba.hdwq[qidx].hba_eq;
14655                         if (!eq)
14656                                 continue;
14657
14658                         lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14659
14660                         if (++cnt >= numq)
14661                                 break;
14662                 }
14663
14664                 return;
14665         }
14666
14667         /* Otherwise, set values by mailbox cmd */
14668
14669         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14670         if (!mbox) {
14671                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14672                                 "6428 Failed allocating mailbox cmd buffer."
14673                                 " EQ delay was not set.\n");
14674                 return;
14675         }
14676         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14677                   sizeof(struct lpfc_sli4_cfg_mhdr));
14678         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14679                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14680                          length, LPFC_SLI4_MBX_EMBED);
14681         eq_delay = &mbox->u.mqe.un.eq_delay;
14682
14683         /* Calculate delay multiper from maximum interrupt per second */
14684         dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14685         if (dmult)
14686                 dmult--;
14687         if (dmult > LPFC_DMULT_MAX)
14688                 dmult = LPFC_DMULT_MAX;
14689
14690         for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14691                 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
14692                 if (!eq)
14693                         continue;
14694                 eq->q_mode = usdelay;
14695                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14696                 eq_delay->u.request.eq[cnt].phase = 0;
14697                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14698
14699                 if (++cnt >= numq)
14700                         break;
14701         }
14702         eq_delay->u.request.num_eq = cnt;
14703
14704         mbox->vport = phba->pport;
14705         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14706         mbox->ctx_buf = NULL;
14707         mbox->ctx_ndlp = NULL;
14708         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14709         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14710         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14711         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14712         if (shdr_status || shdr_add_status || rc) {
14713                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14714                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
14715                                 "status x%x add_status x%x, mbx status x%x\n",
14716                                 shdr_status, shdr_add_status, rc);
14717         }
14718         mempool_free(mbox, phba->mbox_mem_pool);
14719         return;
14720 }
14721
14722 /**
14723  * lpfc_eq_create - Create an Event Queue on the HBA
14724  * @phba: HBA structure that indicates port to create a queue on.
14725  * @eq: The queue structure to use to create the event queue.
14726  * @imax: The maximum interrupt per second limit.
14727  *
14728  * This function creates an event queue, as detailed in @eq, on a port,
14729  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14730  *
14731  * The @phba struct is used to send mailbox command to HBA. The @eq struct
14732  * is used to get the entry count and entry size that are necessary to
14733  * determine the number of pages to allocate and use for this queue. This
14734  * function will send the EQ_CREATE mailbox command to the HBA to setup the
14735  * event queue. This function is asynchronous and will wait for the mailbox
14736  * command to finish before continuing.
14737  *
14738  * On success this function will return a zero. If unable to allocate enough
14739  * memory this function will return -ENOMEM. If the queue create mailbox command
14740  * fails this function will return -ENXIO.
14741  **/
14742 int
14743 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14744 {
14745         struct lpfc_mbx_eq_create *eq_create;
14746         LPFC_MBOXQ_t *mbox;
14747         int rc, length, status = 0;
14748         struct lpfc_dmabuf *dmabuf;
14749         uint32_t shdr_status, shdr_add_status;
14750         union lpfc_sli4_cfg_shdr *shdr;
14751         uint16_t dmult;
14752         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14753
14754         /* sanity check on queue memory */
14755         if (!eq)
14756                 return -ENODEV;
14757         if (!phba->sli4_hba.pc_sli4_params.supported)
14758                 hw_page_size = SLI4_PAGE_SIZE;
14759
14760         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14761         if (!mbox)
14762                 return -ENOMEM;
14763         length = (sizeof(struct lpfc_mbx_eq_create) -
14764                   sizeof(struct lpfc_sli4_cfg_mhdr));
14765         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14766                          LPFC_MBOX_OPCODE_EQ_CREATE,
14767                          length, LPFC_SLI4_MBX_EMBED);
14768         eq_create = &mbox->u.mqe.un.eq_create;
14769         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14770         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14771                eq->page_count);
14772         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14773                LPFC_EQE_SIZE);
14774         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14775
14776         /* Use version 2 of CREATE_EQ if eqav is set */
14777         if (phba->sli4_hba.pc_sli4_params.eqav) {
14778                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14779                        LPFC_Q_CREATE_VERSION_2);
14780                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14781                        phba->sli4_hba.pc_sli4_params.eqav);
14782         }
14783
14784         /* don't setup delay multiplier using EQ_CREATE */
14785         dmult = 0;
14786         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14787                dmult);
14788         switch (eq->entry_count) {
14789         default:
14790                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14791                                 "0360 Unsupported EQ count. (%d)\n",
14792                                 eq->entry_count);
14793                 if (eq->entry_count < 256)
14794                         return -EINVAL;
14795                 /* fall through - otherwise default to smallest count */
14796         case 256:
14797                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14798                        LPFC_EQ_CNT_256);
14799                 break;
14800         case 512:
14801                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14802                        LPFC_EQ_CNT_512);
14803                 break;
14804         case 1024:
14805                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14806                        LPFC_EQ_CNT_1024);
14807                 break;
14808         case 2048:
14809                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14810                        LPFC_EQ_CNT_2048);
14811                 break;
14812         case 4096:
14813                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14814                        LPFC_EQ_CNT_4096);
14815                 break;
14816         }
14817         list_for_each_entry(dmabuf, &eq->page_list, list) {
14818                 memset(dmabuf->virt, 0, hw_page_size);
14819                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14820                                         putPaddrLow(dmabuf->phys);
14821                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14822                                         putPaddrHigh(dmabuf->phys);
14823         }
14824         mbox->vport = phba->pport;
14825         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14826         mbox->ctx_buf = NULL;
14827         mbox->ctx_ndlp = NULL;
14828         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14829         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14830         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14831         if (shdr_status || shdr_add_status || rc) {
14832                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14833                                 "2500 EQ_CREATE mailbox failed with "
14834                                 "status x%x add_status x%x, mbx status x%x\n",
14835                                 shdr_status, shdr_add_status, rc);
14836                 status = -ENXIO;
14837         }
14838         eq->type = LPFC_EQ;
14839         eq->subtype = LPFC_NONE;
14840         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14841         if (eq->queue_id == 0xFFFF)
14842                 status = -ENXIO;
14843         eq->host_index = 0;
14844         eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14845         eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14846
14847         mempool_free(mbox, phba->mbox_mem_pool);
14848         return status;
14849 }
14850
14851 /**
14852  * lpfc_cq_create - Create a Completion Queue on the HBA
14853  * @phba: HBA structure that indicates port to create a queue on.
14854  * @cq: The queue structure to use to create the completion queue.
14855  * @eq: The event queue to bind this completion queue to.
14856  *
14857  * This function creates a completion queue, as detailed in @wq, on a port,
14858  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14859  *
14860  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14861  * is used to get the entry count and entry size that are necessary to
14862  * determine the number of pages to allocate and use for this queue. The @eq
14863  * is used to indicate which event queue to bind this completion queue to. This
14864  * function will send the CQ_CREATE mailbox command to the HBA to setup the
14865  * completion queue. This function is asynchronous and will wait for the mailbox
14866  * command to finish before continuing.
14867  *
14868  * On success this function will return a zero. If unable to allocate enough
14869  * memory this function will return -ENOMEM. If the queue create mailbox command
14870  * fails this function will return -ENXIO.
14871  **/
14872 int
14873 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14874                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14875 {
14876         struct lpfc_mbx_cq_create *cq_create;
14877         struct lpfc_dmabuf *dmabuf;
14878         LPFC_MBOXQ_t *mbox;
14879         int rc, length, status = 0;
14880         uint32_t shdr_status, shdr_add_status;
14881         union lpfc_sli4_cfg_shdr *shdr;
14882
14883         /* sanity check on queue memory */
14884         if (!cq || !eq)
14885                 return -ENODEV;
14886
14887         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14888         if (!mbox)
14889                 return -ENOMEM;
14890         length = (sizeof(struct lpfc_mbx_cq_create) -
14891                   sizeof(struct lpfc_sli4_cfg_mhdr));
14892         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14893                          LPFC_MBOX_OPCODE_CQ_CREATE,
14894                          length, LPFC_SLI4_MBX_EMBED);
14895         cq_create = &mbox->u.mqe.un.cq_create;
14896         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14897         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14898                     cq->page_count);
14899         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14900         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14901         bf_set(lpfc_mbox_hdr_version, &shdr->request,
14902                phba->sli4_hba.pc_sli4_params.cqv);
14903         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14904                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14905                        (cq->page_size / SLI4_PAGE_SIZE));
14906                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14907                        eq->queue_id);
14908                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14909                        phba->sli4_hba.pc_sli4_params.cqav);
14910         } else {
14911                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14912                        eq->queue_id);
14913         }
14914         switch (cq->entry_count) {
14915         case 2048:
14916         case 4096:
14917                 if (phba->sli4_hba.pc_sli4_params.cqv ==
14918                     LPFC_Q_CREATE_VERSION_2) {
14919                         cq_create->u.request.context.lpfc_cq_context_count =
14920                                 cq->entry_count;
14921                         bf_set(lpfc_cq_context_count,
14922                                &cq_create->u.request.context,
14923                                LPFC_CQ_CNT_WORD7);
14924                         break;
14925                 }
14926                 /* fall through */
14927         default:
14928                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14929                                 "0361 Unsupported CQ count: "
14930                                 "entry cnt %d sz %d pg cnt %d\n",
14931                                 cq->entry_count, cq->entry_size,
14932                                 cq->page_count);
14933                 if (cq->entry_count < 256) {
14934                         status = -EINVAL;
14935                         goto out;
14936                 }
14937                 /* fall through - otherwise default to smallest count */
14938         case 256:
14939                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14940                        LPFC_CQ_CNT_256);
14941                 break;
14942         case 512:
14943                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14944                        LPFC_CQ_CNT_512);
14945                 break;
14946         case 1024:
14947                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14948                        LPFC_CQ_CNT_1024);
14949                 break;
14950         }
14951         list_for_each_entry(dmabuf, &cq->page_list, list) {
14952                 memset(dmabuf->virt, 0, cq->page_size);
14953                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14954                                         putPaddrLow(dmabuf->phys);
14955                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14956                                         putPaddrHigh(dmabuf->phys);
14957         }
14958         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14959
14960         /* The IOCTL status is embedded in the mailbox subheader. */
14961         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14962         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14963         if (shdr_status || shdr_add_status || rc) {
14964                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14965                                 "2501 CQ_CREATE mailbox failed with "
14966                                 "status x%x add_status x%x, mbx status x%x\n",
14967                                 shdr_status, shdr_add_status, rc);
14968                 status = -ENXIO;
14969                 goto out;
14970         }
14971         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14972         if (cq->queue_id == 0xFFFF) {
14973                 status = -ENXIO;
14974                 goto out;
14975         }
14976         /* link the cq onto the parent eq child list */
14977         list_add_tail(&cq->list, &eq->child_list);
14978         /* Set up completion queue's type and subtype */
14979         cq->type = type;
14980         cq->subtype = subtype;
14981         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14982         cq->assoc_qid = eq->queue_id;
14983         cq->assoc_qp = eq;
14984         cq->host_index = 0;
14985         cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14986         cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14987
14988         if (cq->queue_id > phba->sli4_hba.cq_max)
14989                 phba->sli4_hba.cq_max = cq->queue_id;
14990 out:
14991         mempool_free(mbox, phba->mbox_mem_pool);
14992         return status;
14993 }
14994
14995 /**
14996  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14997  * @phba: HBA structure that indicates port to create a queue on.
14998  * @cqp: The queue structure array to use to create the completion queues.
14999  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
15000  *
15001  * This function creates a set of  completion queue, s to support MRQ
15002  * as detailed in @cqp, on a port,
15003  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15004  *
15005  * The @phba struct is used to send mailbox command to HBA. The @cq struct
15006  * is used to get the entry count and entry size that are necessary to
15007  * determine the number of pages to allocate and use for this queue. The @eq
15008  * is used to indicate which event queue to bind this completion queue to. This
15009  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15010  * completion queue. This function is asynchronous and will wait for the mailbox
15011  * command to finish before continuing.
15012  *
15013  * On success this function will return a zero. If unable to allocate enough
15014  * memory this function will return -ENOMEM. If the queue create mailbox command
15015  * fails this function will return -ENXIO.
15016  **/
15017 int
15018 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15019                    struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15020                    uint32_t subtype)
15021 {
15022         struct lpfc_queue *cq;
15023         struct lpfc_queue *eq;
15024         struct lpfc_mbx_cq_create_set *cq_set;
15025         struct lpfc_dmabuf *dmabuf;
15026         LPFC_MBOXQ_t *mbox;
15027         int rc, length, alloclen, status = 0;
15028         int cnt, idx, numcq, page_idx = 0;
15029         uint32_t shdr_status, shdr_add_status;
15030         union lpfc_sli4_cfg_shdr *shdr;
15031         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15032
15033         /* sanity check on queue memory */
15034         numcq = phba->cfg_nvmet_mrq;
15035         if (!cqp || !hdwq || !numcq)
15036                 return -ENODEV;
15037
15038         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15039         if (!mbox)
15040                 return -ENOMEM;
15041
15042         length = sizeof(struct lpfc_mbx_cq_create_set);
15043         length += ((numcq * cqp[0]->page_count) *
15044                    sizeof(struct dma_address));
15045         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15046                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15047                         LPFC_SLI4_MBX_NEMBED);
15048         if (alloclen < length) {
15049                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15050                                 "3098 Allocated DMA memory size (%d) is "
15051                                 "less than the requested DMA memory size "
15052                                 "(%d)\n", alloclen, length);
15053                 status = -ENOMEM;
15054                 goto out;
15055         }
15056         cq_set = mbox->sge_array->addr[0];
15057         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15058         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15059
15060         for (idx = 0; idx < numcq; idx++) {
15061                 cq = cqp[idx];
15062                 eq = hdwq[idx].hba_eq;
15063                 if (!cq || !eq) {
15064                         status = -ENOMEM;
15065                         goto out;
15066                 }
15067                 if (!phba->sli4_hba.pc_sli4_params.supported)
15068                         hw_page_size = cq->page_size;
15069
15070                 switch (idx) {
15071                 case 0:
15072                         bf_set(lpfc_mbx_cq_create_set_page_size,
15073                                &cq_set->u.request,
15074                                (hw_page_size / SLI4_PAGE_SIZE));
15075                         bf_set(lpfc_mbx_cq_create_set_num_pages,
15076                                &cq_set->u.request, cq->page_count);
15077                         bf_set(lpfc_mbx_cq_create_set_evt,
15078                                &cq_set->u.request, 1);
15079                         bf_set(lpfc_mbx_cq_create_set_valid,
15080                                &cq_set->u.request, 1);
15081                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
15082                                &cq_set->u.request, 0);
15083                         bf_set(lpfc_mbx_cq_create_set_num_cq,
15084                                &cq_set->u.request, numcq);
15085                         bf_set(lpfc_mbx_cq_create_set_autovalid,
15086                                &cq_set->u.request,
15087                                phba->sli4_hba.pc_sli4_params.cqav);
15088                         switch (cq->entry_count) {
15089                         case 2048:
15090                         case 4096:
15091                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
15092                                     LPFC_Q_CREATE_VERSION_2) {
15093                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15094                                                &cq_set->u.request,
15095                                                 cq->entry_count);
15096                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15097                                                &cq_set->u.request,
15098                                                LPFC_CQ_CNT_WORD7);
15099                                         break;
15100                                 }
15101                                 /* fall through */
15102                         default:
15103                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15104                                                 "3118 Bad CQ count. (%d)\n",
15105                                                 cq->entry_count);
15106                                 if (cq->entry_count < 256) {
15107                                         status = -EINVAL;
15108                                         goto out;
15109                                 }
15110                                 /* fall through - otherwise default to smallest */
15111                         case 256:
15112                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15113                                        &cq_set->u.request, LPFC_CQ_CNT_256);
15114                                 break;
15115                         case 512:
15116                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15117                                        &cq_set->u.request, LPFC_CQ_CNT_512);
15118                                 break;
15119                         case 1024:
15120                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15121                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
15122                                 break;
15123                         }
15124                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
15125                                &cq_set->u.request, eq->queue_id);
15126                         break;
15127                 case 1:
15128                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
15129                                &cq_set->u.request, eq->queue_id);
15130                         break;
15131                 case 2:
15132                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
15133                                &cq_set->u.request, eq->queue_id);
15134                         break;
15135                 case 3:
15136                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
15137                                &cq_set->u.request, eq->queue_id);
15138                         break;
15139                 case 4:
15140                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
15141                                &cq_set->u.request, eq->queue_id);
15142                         break;
15143                 case 5:
15144                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
15145                                &cq_set->u.request, eq->queue_id);
15146                         break;
15147                 case 6:
15148                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
15149                                &cq_set->u.request, eq->queue_id);
15150                         break;
15151                 case 7:
15152                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
15153                                &cq_set->u.request, eq->queue_id);
15154                         break;
15155                 case 8:
15156                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
15157                                &cq_set->u.request, eq->queue_id);
15158                         break;
15159                 case 9:
15160                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
15161                                &cq_set->u.request, eq->queue_id);
15162                         break;
15163                 case 10:
15164                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
15165                                &cq_set->u.request, eq->queue_id);
15166                         break;
15167                 case 11:
15168                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
15169                                &cq_set->u.request, eq->queue_id);
15170                         break;
15171                 case 12:
15172                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
15173                                &cq_set->u.request, eq->queue_id);
15174                         break;
15175                 case 13:
15176                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
15177                                &cq_set->u.request, eq->queue_id);
15178                         break;
15179                 case 14:
15180                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
15181                                &cq_set->u.request, eq->queue_id);
15182                         break;
15183                 case 15:
15184                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
15185                                &cq_set->u.request, eq->queue_id);
15186                         break;
15187                 }
15188
15189                 /* link the cq onto the parent eq child list */
15190                 list_add_tail(&cq->list, &eq->child_list);
15191                 /* Set up completion queue's type and subtype */
15192                 cq->type = type;
15193                 cq->subtype = subtype;
15194                 cq->assoc_qid = eq->queue_id;
15195                 cq->assoc_qp = eq;
15196                 cq->host_index = 0;
15197                 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15198                 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15199                                          cq->entry_count);
15200                 cq->chann = idx;
15201
15202                 rc = 0;
15203                 list_for_each_entry(dmabuf, &cq->page_list, list) {
15204                         memset(dmabuf->virt, 0, hw_page_size);
15205                         cnt = page_idx + dmabuf->buffer_tag;
15206                         cq_set->u.request.page[cnt].addr_lo =
15207                                         putPaddrLow(dmabuf->phys);
15208                         cq_set->u.request.page[cnt].addr_hi =
15209                                         putPaddrHigh(dmabuf->phys);
15210                         rc++;
15211                 }
15212                 page_idx += rc;
15213         }
15214
15215         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15216
15217         /* The IOCTL status is embedded in the mailbox subheader. */
15218         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15219         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15220         if (shdr_status || shdr_add_status || rc) {
15221                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15222                                 "3119 CQ_CREATE_SET mailbox failed with "
15223                                 "status x%x add_status x%x, mbx status x%x\n",
15224                                 shdr_status, shdr_add_status, rc);
15225                 status = -ENXIO;
15226                 goto out;
15227         }
15228         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15229         if (rc == 0xFFFF) {
15230                 status = -ENXIO;
15231                 goto out;
15232         }
15233
15234         for (idx = 0; idx < numcq; idx++) {
15235                 cq = cqp[idx];
15236                 cq->queue_id = rc + idx;
15237                 if (cq->queue_id > phba->sli4_hba.cq_max)
15238                         phba->sli4_hba.cq_max = cq->queue_id;
15239         }
15240
15241 out:
15242         lpfc_sli4_mbox_cmd_free(phba, mbox);
15243         return status;
15244 }
15245
15246 /**
15247  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15248  * @phba: HBA structure that indicates port to create a queue on.
15249  * @mq: The queue structure to use to create the mailbox queue.
15250  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15251  * @cq: The completion queue to associate with this cq.
15252  *
15253  * This function provides failback (fb) functionality when the
15254  * mq_create_ext fails on older FW generations.  It's purpose is identical
15255  * to mq_create_ext otherwise.
15256  *
15257  * This routine cannot fail as all attributes were previously accessed and
15258  * initialized in mq_create_ext.
15259  **/
15260 static void
15261 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15262                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15263 {
15264         struct lpfc_mbx_mq_create *mq_create;
15265         struct lpfc_dmabuf *dmabuf;
15266         int length;
15267
15268         length = (sizeof(struct lpfc_mbx_mq_create) -
15269                   sizeof(struct lpfc_sli4_cfg_mhdr));
15270         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15271                          LPFC_MBOX_OPCODE_MQ_CREATE,
15272                          length, LPFC_SLI4_MBX_EMBED);
15273         mq_create = &mbox->u.mqe.un.mq_create;
15274         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15275                mq->page_count);
15276         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15277                cq->queue_id);
15278         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15279         switch (mq->entry_count) {
15280         case 16:
15281                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15282                        LPFC_MQ_RING_SIZE_16);
15283                 break;
15284         case 32:
15285                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15286                        LPFC_MQ_RING_SIZE_32);
15287                 break;
15288         case 64:
15289                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15290                        LPFC_MQ_RING_SIZE_64);
15291                 break;
15292         case 128:
15293                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15294                        LPFC_MQ_RING_SIZE_128);
15295                 break;
15296         }
15297         list_for_each_entry(dmabuf, &mq->page_list, list) {
15298                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15299                         putPaddrLow(dmabuf->phys);
15300                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15301                         putPaddrHigh(dmabuf->phys);
15302         }
15303 }
15304
15305 /**
15306  * lpfc_mq_create - Create a mailbox Queue on the HBA
15307  * @phba: HBA structure that indicates port to create a queue on.
15308  * @mq: The queue structure to use to create the mailbox queue.
15309  * @cq: The completion queue to associate with this cq.
15310  * @subtype: The queue's subtype.
15311  *
15312  * This function creates a mailbox queue, as detailed in @mq, on a port,
15313  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15314  *
15315  * The @phba struct is used to send mailbox command to HBA. The @cq struct
15316  * is used to get the entry count and entry size that are necessary to
15317  * determine the number of pages to allocate and use for this queue. This
15318  * function will send the MQ_CREATE mailbox command to the HBA to setup the
15319  * mailbox queue. This function is asynchronous and will wait for the mailbox
15320  * command to finish before continuing.
15321  *
15322  * On success this function will return a zero. If unable to allocate enough
15323  * memory this function will return -ENOMEM. If the queue create mailbox command
15324  * fails this function will return -ENXIO.
15325  **/
15326 int32_t
15327 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15328                struct lpfc_queue *cq, uint32_t subtype)
15329 {
15330         struct lpfc_mbx_mq_create *mq_create;
15331         struct lpfc_mbx_mq_create_ext *mq_create_ext;
15332         struct lpfc_dmabuf *dmabuf;
15333         LPFC_MBOXQ_t *mbox;
15334         int rc, length, status = 0;
15335         uint32_t shdr_status, shdr_add_status;
15336         union lpfc_sli4_cfg_shdr *shdr;
15337         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15338
15339         /* sanity check on queue memory */
15340         if (!mq || !cq)
15341                 return -ENODEV;
15342         if (!phba->sli4_hba.pc_sli4_params.supported)
15343                 hw_page_size = SLI4_PAGE_SIZE;
15344
15345         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15346         if (!mbox)
15347                 return -ENOMEM;
15348         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15349                   sizeof(struct lpfc_sli4_cfg_mhdr));
15350         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15351                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15352                          length, LPFC_SLI4_MBX_EMBED);
15353
15354         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15355         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15356         bf_set(lpfc_mbx_mq_create_ext_num_pages,
15357                &mq_create_ext->u.request, mq->page_count);
15358         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15359                &mq_create_ext->u.request, 1);
15360         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15361                &mq_create_ext->u.request, 1);
15362         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15363                &mq_create_ext->u.request, 1);
15364         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15365                &mq_create_ext->u.request, 1);
15366         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15367                &mq_create_ext->u.request, 1);
15368         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15369         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15370                phba->sli4_hba.pc_sli4_params.mqv);
15371         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15372                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15373                        cq->queue_id);
15374         else
15375                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15376                        cq->queue_id);
15377         switch (mq->entry_count) {
15378         default:
15379                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15380                                 "0362 Unsupported MQ count. (%d)\n",
15381                                 mq->entry_count);
15382                 if (mq->entry_count < 16) {
15383                         status = -EINVAL;
15384                         goto out;
15385                 }
15386                 /* fall through - otherwise default to smallest count */
15387         case 16:
15388                 bf_set(lpfc_mq_context_ring_size,
15389                        &mq_create_ext->u.request.context,
15390                        LPFC_MQ_RING_SIZE_16);
15391                 break;
15392         case 32:
15393                 bf_set(lpfc_mq_context_ring_size,
15394                        &mq_create_ext->u.request.context,
15395                        LPFC_MQ_RING_SIZE_32);
15396                 break;
15397         case 64:
15398                 bf_set(lpfc_mq_context_ring_size,
15399                        &mq_create_ext->u.request.context,
15400                        LPFC_MQ_RING_SIZE_64);
15401                 break;
15402         case 128:
15403                 bf_set(lpfc_mq_context_ring_size,
15404                        &mq_create_ext->u.request.context,
15405                        LPFC_MQ_RING_SIZE_128);
15406                 break;
15407         }
15408         list_for_each_entry(dmabuf, &mq->page_list, list) {
15409                 memset(dmabuf->virt, 0, hw_page_size);
15410                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15411                                         putPaddrLow(dmabuf->phys);
15412                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15413                                         putPaddrHigh(dmabuf->phys);
15414         }
15415         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15416         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15417                               &mq_create_ext->u.response);
15418         if (rc != MBX_SUCCESS) {
15419                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15420                                 "2795 MQ_CREATE_EXT failed with "
15421                                 "status x%x. Failback to MQ_CREATE.\n",
15422                                 rc);
15423                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15424                 mq_create = &mbox->u.mqe.un.mq_create;
15425                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15426                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15427                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15428                                       &mq_create->u.response);
15429         }
15430
15431         /* The IOCTL status is embedded in the mailbox subheader. */
15432         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15433         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15434         if (shdr_status || shdr_add_status || rc) {
15435                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15436                                 "2502 MQ_CREATE mailbox failed with "
15437                                 "status x%x add_status x%x, mbx status x%x\n",
15438                                 shdr_status, shdr_add_status, rc);
15439                 status = -ENXIO;
15440                 goto out;
15441         }
15442         if (mq->queue_id == 0xFFFF) {
15443                 status = -ENXIO;
15444                 goto out;
15445         }
15446         mq->type = LPFC_MQ;
15447         mq->assoc_qid = cq->queue_id;
15448         mq->subtype = subtype;
15449         mq->host_index = 0;
15450         mq->hba_index = 0;
15451
15452         /* link the mq onto the parent cq child list */
15453         list_add_tail(&mq->list, &cq->child_list);
15454 out:
15455         mempool_free(mbox, phba->mbox_mem_pool);
15456         return status;
15457 }
15458
15459 /**
15460  * lpfc_wq_create - Create a Work Queue on the HBA
15461  * @phba: HBA structure that indicates port to create a queue on.
15462  * @wq: The queue structure to use to create the work queue.
15463  * @cq: The completion queue to bind this work queue to.
15464  * @subtype: The subtype of the work queue indicating its functionality.
15465  *
15466  * This function creates a work queue, as detailed in @wq, on a port, described
15467  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15468  *
15469  * The @phba struct is used to send mailbox command to HBA. The @wq struct
15470  * is used to get the entry count and entry size that are necessary to
15471  * determine the number of pages to allocate and use for this queue. The @cq
15472  * is used to indicate which completion queue to bind this work queue to. This
15473  * function will send the WQ_CREATE mailbox command to the HBA to setup the
15474  * work queue. This function is asynchronous and will wait for the mailbox
15475  * command to finish before continuing.
15476  *
15477  * On success this function will return a zero. If unable to allocate enough
15478  * memory this function will return -ENOMEM. If the queue create mailbox command
15479  * fails this function will return -ENXIO.
15480  **/
15481 int
15482 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15483                struct lpfc_queue *cq, uint32_t subtype)
15484 {
15485         struct lpfc_mbx_wq_create *wq_create;
15486         struct lpfc_dmabuf *dmabuf;
15487         LPFC_MBOXQ_t *mbox;
15488         int rc, length, status = 0;
15489         uint32_t shdr_status, shdr_add_status;
15490         union lpfc_sli4_cfg_shdr *shdr;
15491         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15492         struct dma_address *page;
15493         void __iomem *bar_memmap_p;
15494         uint32_t db_offset;
15495         uint16_t pci_barset;
15496         uint8_t dpp_barset;
15497         uint32_t dpp_offset;
15498         unsigned long pg_addr;
15499         uint8_t wq_create_version;
15500
15501         /* sanity check on queue memory */
15502         if (!wq || !cq)
15503                 return -ENODEV;
15504         if (!phba->sli4_hba.pc_sli4_params.supported)
15505                 hw_page_size = wq->page_size;
15506
15507         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15508         if (!mbox)
15509                 return -ENOMEM;
15510         length = (sizeof(struct lpfc_mbx_wq_create) -
15511                   sizeof(struct lpfc_sli4_cfg_mhdr));
15512         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15513                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15514                          length, LPFC_SLI4_MBX_EMBED);
15515         wq_create = &mbox->u.mqe.un.wq_create;
15516         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15517         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15518                     wq->page_count);
15519         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15520                     cq->queue_id);
15521
15522         /* wqv is the earliest version supported, NOT the latest */
15523         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15524                phba->sli4_hba.pc_sli4_params.wqv);
15525
15526         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15527             (wq->page_size > SLI4_PAGE_SIZE))
15528                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15529         else
15530                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15531
15532
15533         if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15534                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15535         else
15536                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15537
15538         switch (wq_create_version) {
15539         case LPFC_Q_CREATE_VERSION_1:
15540                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15541                        wq->entry_count);
15542                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15543                        LPFC_Q_CREATE_VERSION_1);
15544
15545                 switch (wq->entry_size) {
15546                 default:
15547                 case 64:
15548                         bf_set(lpfc_mbx_wq_create_wqe_size,
15549                                &wq_create->u.request_1,
15550                                LPFC_WQ_WQE_SIZE_64);
15551                         break;
15552                 case 128:
15553                         bf_set(lpfc_mbx_wq_create_wqe_size,
15554                                &wq_create->u.request_1,
15555                                LPFC_WQ_WQE_SIZE_128);
15556                         break;
15557                 }
15558                 /* Request DPP by default */
15559                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15560                 bf_set(lpfc_mbx_wq_create_page_size,
15561                        &wq_create->u.request_1,
15562                        (wq->page_size / SLI4_PAGE_SIZE));
15563                 page = wq_create->u.request_1.page;
15564                 break;
15565         default:
15566                 page = wq_create->u.request.page;
15567                 break;
15568         }
15569
15570         list_for_each_entry(dmabuf, &wq->page_list, list) {
15571                 memset(dmabuf->virt, 0, hw_page_size);
15572                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15573                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15574         }
15575
15576         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15577                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15578
15579         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15580         /* The IOCTL status is embedded in the mailbox subheader. */
15581         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15582         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15583         if (shdr_status || shdr_add_status || rc) {
15584                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15585                                 "2503 WQ_CREATE mailbox failed with "
15586                                 "status x%x add_status x%x, mbx status x%x\n",
15587                                 shdr_status, shdr_add_status, rc);
15588                 status = -ENXIO;
15589                 goto out;
15590         }
15591
15592         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15593                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15594                                         &wq_create->u.response);
15595         else
15596                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15597                                         &wq_create->u.response_1);
15598
15599         if (wq->queue_id == 0xFFFF) {
15600                 status = -ENXIO;
15601                 goto out;
15602         }
15603
15604         wq->db_format = LPFC_DB_LIST_FORMAT;
15605         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15606                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15607                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15608                                                &wq_create->u.response);
15609                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15610                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
15611                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15612                                                 "3265 WQ[%d] doorbell format "
15613                                                 "not supported: x%x\n",
15614                                                 wq->queue_id, wq->db_format);
15615                                 status = -EINVAL;
15616                                 goto out;
15617                         }
15618                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15619                                             &wq_create->u.response);
15620                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15621                                                                    pci_barset);
15622                         if (!bar_memmap_p) {
15623                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15624                                                 "3263 WQ[%d] failed to memmap "
15625                                                 "pci barset:x%x\n",
15626                                                 wq->queue_id, pci_barset);
15627                                 status = -ENOMEM;
15628                                 goto out;
15629                         }
15630                         db_offset = wq_create->u.response.doorbell_offset;
15631                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15632                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15633                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15634                                                 "3252 WQ[%d] doorbell offset "
15635                                                 "not supported: x%x\n",
15636                                                 wq->queue_id, db_offset);
15637                                 status = -EINVAL;
15638                                 goto out;
15639                         }
15640                         wq->db_regaddr = bar_memmap_p + db_offset;
15641                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15642                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
15643                                         "format:x%x\n", wq->queue_id,
15644                                         pci_barset, db_offset, wq->db_format);
15645                 } else
15646                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15647         } else {
15648                 /* Check if DPP was honored by the firmware */
15649                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15650                                     &wq_create->u.response_1);
15651                 if (wq->dpp_enable) {
15652                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15653                                             &wq_create->u.response_1);
15654                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15655                                                                    pci_barset);
15656                         if (!bar_memmap_p) {
15657                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15658                                                 "3267 WQ[%d] failed to memmap "
15659                                                 "pci barset:x%x\n",
15660                                                 wq->queue_id, pci_barset);
15661                                 status = -ENOMEM;
15662                                 goto out;
15663                         }
15664                         db_offset = wq_create->u.response_1.doorbell_offset;
15665                         wq->db_regaddr = bar_memmap_p + db_offset;
15666                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15667                                             &wq_create->u.response_1);
15668                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15669                                             &wq_create->u.response_1);
15670                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15671                                                                    dpp_barset);
15672                         if (!bar_memmap_p) {
15673                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15674                                                 "3268 WQ[%d] failed to memmap "
15675                                                 "pci barset:x%x\n",
15676                                                 wq->queue_id, dpp_barset);
15677                                 status = -ENOMEM;
15678                                 goto out;
15679                         }
15680                         dpp_offset = wq_create->u.response_1.dpp_offset;
15681                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15682                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15683                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
15684                                         "dpp_id:x%x dpp_barset:x%x "
15685                                         "dpp_offset:x%x\n",
15686                                         wq->queue_id, pci_barset, db_offset,
15687                                         wq->dpp_id, dpp_barset, dpp_offset);
15688
15689                         /* Enable combined writes for DPP aperture */
15690                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15691 #ifdef CONFIG_X86
15692                         rc = set_memory_wc(pg_addr, 1);
15693                         if (rc) {
15694                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15695                                         "3272 Cannot setup Combined "
15696                                         "Write on WQ[%d] - disable DPP\n",
15697                                         wq->queue_id);
15698                                 phba->cfg_enable_dpp = 0;
15699                         }
15700 #else
15701                         phba->cfg_enable_dpp = 0;
15702 #endif
15703                 } else
15704                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15705         }
15706         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15707         if (wq->pring == NULL) {
15708                 status = -ENOMEM;
15709                 goto out;
15710         }
15711         wq->type = LPFC_WQ;
15712         wq->assoc_qid = cq->queue_id;
15713         wq->subtype = subtype;
15714         wq->host_index = 0;
15715         wq->hba_index = 0;
15716         wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15717
15718         /* link the wq onto the parent cq child list */
15719         list_add_tail(&wq->list, &cq->child_list);
15720 out:
15721         mempool_free(mbox, phba->mbox_mem_pool);
15722         return status;
15723 }
15724
15725 /**
15726  * lpfc_rq_create - Create a Receive Queue on the HBA
15727  * @phba: HBA structure that indicates port to create a queue on.
15728  * @hrq: The queue structure to use to create the header receive queue.
15729  * @drq: The queue structure to use to create the data receive queue.
15730  * @cq: The completion queue to bind this work queue to.
15731  *
15732  * This function creates a receive buffer queue pair , as detailed in @hrq and
15733  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15734  * to the HBA.
15735  *
15736  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15737  * struct is used to get the entry count that is necessary to determine the
15738  * number of pages to use for this queue. The @cq is used to indicate which
15739  * completion queue to bind received buffers that are posted to these queues to.
15740  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15741  * receive queue pair. This function is asynchronous and will wait for the
15742  * mailbox command to finish before continuing.
15743  *
15744  * On success this function will return a zero. If unable to allocate enough
15745  * memory this function will return -ENOMEM. If the queue create mailbox command
15746  * fails this function will return -ENXIO.
15747  **/
15748 int
15749 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15750                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15751 {
15752         struct lpfc_mbx_rq_create *rq_create;
15753         struct lpfc_dmabuf *dmabuf;
15754         LPFC_MBOXQ_t *mbox;
15755         int rc, length, status = 0;
15756         uint32_t shdr_status, shdr_add_status;
15757         union lpfc_sli4_cfg_shdr *shdr;
15758         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15759         void __iomem *bar_memmap_p;
15760         uint32_t db_offset;
15761         uint16_t pci_barset;
15762
15763         /* sanity check on queue memory */
15764         if (!hrq || !drq || !cq)
15765                 return -ENODEV;
15766         if (!phba->sli4_hba.pc_sli4_params.supported)
15767                 hw_page_size = SLI4_PAGE_SIZE;
15768
15769         if (hrq->entry_count != drq->entry_count)
15770                 return -EINVAL;
15771         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15772         if (!mbox)
15773                 return -ENOMEM;
15774         length = (sizeof(struct lpfc_mbx_rq_create) -
15775                   sizeof(struct lpfc_sli4_cfg_mhdr));
15776         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15777                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15778                          length, LPFC_SLI4_MBX_EMBED);
15779         rq_create = &mbox->u.mqe.un.rq_create;
15780         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15781         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15782                phba->sli4_hba.pc_sli4_params.rqv);
15783         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15784                 bf_set(lpfc_rq_context_rqe_count_1,
15785                        &rq_create->u.request.context,
15786                        hrq->entry_count);
15787                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15788                 bf_set(lpfc_rq_context_rqe_size,
15789                        &rq_create->u.request.context,
15790                        LPFC_RQE_SIZE_8);
15791                 bf_set(lpfc_rq_context_page_size,
15792                        &rq_create->u.request.context,
15793                        LPFC_RQ_PAGE_SIZE_4096);
15794         } else {
15795                 switch (hrq->entry_count) {
15796                 default:
15797                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15798                                         "2535 Unsupported RQ count. (%d)\n",
15799                                         hrq->entry_count);
15800                         if (hrq->entry_count < 512) {
15801                                 status = -EINVAL;
15802                                 goto out;
15803                         }
15804                         /* fall through - otherwise default to smallest count */
15805                 case 512:
15806                         bf_set(lpfc_rq_context_rqe_count,
15807                                &rq_create->u.request.context,
15808                                LPFC_RQ_RING_SIZE_512);
15809                         break;
15810                 case 1024:
15811                         bf_set(lpfc_rq_context_rqe_count,
15812                                &rq_create->u.request.context,
15813                                LPFC_RQ_RING_SIZE_1024);
15814                         break;
15815                 case 2048:
15816                         bf_set(lpfc_rq_context_rqe_count,
15817                                &rq_create->u.request.context,
15818                                LPFC_RQ_RING_SIZE_2048);
15819                         break;
15820                 case 4096:
15821                         bf_set(lpfc_rq_context_rqe_count,
15822                                &rq_create->u.request.context,
15823                                LPFC_RQ_RING_SIZE_4096);
15824                         break;
15825                 }
15826                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15827                        LPFC_HDR_BUF_SIZE);
15828         }
15829         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15830                cq->queue_id);
15831         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15832                hrq->page_count);
15833         list_for_each_entry(dmabuf, &hrq->page_list, list) {
15834                 memset(dmabuf->virt, 0, hw_page_size);
15835                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15836                                         putPaddrLow(dmabuf->phys);
15837                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15838                                         putPaddrHigh(dmabuf->phys);
15839         }
15840         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15841                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15842
15843         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15844         /* The IOCTL status is embedded in the mailbox subheader. */
15845         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15846         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15847         if (shdr_status || shdr_add_status || rc) {
15848                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15849                                 "2504 RQ_CREATE mailbox failed with "
15850                                 "status x%x add_status x%x, mbx status x%x\n",
15851                                 shdr_status, shdr_add_status, rc);
15852                 status = -ENXIO;
15853                 goto out;
15854         }
15855         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15856         if (hrq->queue_id == 0xFFFF) {
15857                 status = -ENXIO;
15858                 goto out;
15859         }
15860
15861         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15862                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15863                                         &rq_create->u.response);
15864                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15865                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15866                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15867                                         "3262 RQ [%d] doorbell format not "
15868                                         "supported: x%x\n", hrq->queue_id,
15869                                         hrq->db_format);
15870                         status = -EINVAL;
15871                         goto out;
15872                 }
15873
15874                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15875                                     &rq_create->u.response);
15876                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15877                 if (!bar_memmap_p) {
15878                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15879                                         "3269 RQ[%d] failed to memmap pci "
15880                                         "barset:x%x\n", hrq->queue_id,
15881                                         pci_barset);
15882                         status = -ENOMEM;
15883                         goto out;
15884                 }
15885
15886                 db_offset = rq_create->u.response.doorbell_offset;
15887                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15888                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15889                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15890                                         "3270 RQ[%d] doorbell offset not "
15891                                         "supported: x%x\n", hrq->queue_id,
15892                                         db_offset);
15893                         status = -EINVAL;
15894                         goto out;
15895                 }
15896                 hrq->db_regaddr = bar_memmap_p + db_offset;
15897                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15898                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15899                                 "format:x%x\n", hrq->queue_id, pci_barset,
15900                                 db_offset, hrq->db_format);
15901         } else {
15902                 hrq->db_format = LPFC_DB_RING_FORMAT;
15903                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15904         }
15905         hrq->type = LPFC_HRQ;
15906         hrq->assoc_qid = cq->queue_id;
15907         hrq->subtype = subtype;
15908         hrq->host_index = 0;
15909         hrq->hba_index = 0;
15910         hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15911
15912         /* now create the data queue */
15913         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15914                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15915                          length, LPFC_SLI4_MBX_EMBED);
15916         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15917                phba->sli4_hba.pc_sli4_params.rqv);
15918         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15919                 bf_set(lpfc_rq_context_rqe_count_1,
15920                        &rq_create->u.request.context, hrq->entry_count);
15921                 if (subtype == LPFC_NVMET)
15922                         rq_create->u.request.context.buffer_size =
15923                                 LPFC_NVMET_DATA_BUF_SIZE;
15924                 else
15925                         rq_create->u.request.context.buffer_size =
15926                                 LPFC_DATA_BUF_SIZE;
15927                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15928                        LPFC_RQE_SIZE_8);
15929                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15930                        (PAGE_SIZE/SLI4_PAGE_SIZE));
15931         } else {
15932                 switch (drq->entry_count) {
15933                 default:
15934                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15935                                         "2536 Unsupported RQ count. (%d)\n",
15936                                         drq->entry_count);
15937                         if (drq->entry_count < 512) {
15938                                 status = -EINVAL;
15939                                 goto out;
15940                         }
15941                         /* fall through - otherwise default to smallest count */
15942                 case 512:
15943                         bf_set(lpfc_rq_context_rqe_count,
15944                                &rq_create->u.request.context,
15945                                LPFC_RQ_RING_SIZE_512);
15946                         break;
15947                 case 1024:
15948                         bf_set(lpfc_rq_context_rqe_count,
15949                                &rq_create->u.request.context,
15950                                LPFC_RQ_RING_SIZE_1024);
15951                         break;
15952                 case 2048:
15953                         bf_set(lpfc_rq_context_rqe_count,
15954                                &rq_create->u.request.context,
15955                                LPFC_RQ_RING_SIZE_2048);
15956                         break;
15957                 case 4096:
15958                         bf_set(lpfc_rq_context_rqe_count,
15959                                &rq_create->u.request.context,
15960                                LPFC_RQ_RING_SIZE_4096);
15961                         break;
15962                 }
15963                 if (subtype == LPFC_NVMET)
15964                         bf_set(lpfc_rq_context_buf_size,
15965                                &rq_create->u.request.context,
15966                                LPFC_NVMET_DATA_BUF_SIZE);
15967                 else
15968                         bf_set(lpfc_rq_context_buf_size,
15969                                &rq_create->u.request.context,
15970                                LPFC_DATA_BUF_SIZE);
15971         }
15972         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15973                cq->queue_id);
15974         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15975                drq->page_count);
15976         list_for_each_entry(dmabuf, &drq->page_list, list) {
15977                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15978                                         putPaddrLow(dmabuf->phys);
15979                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15980                                         putPaddrHigh(dmabuf->phys);
15981         }
15982         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15983                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15984         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15985         /* The IOCTL status is embedded in the mailbox subheader. */
15986         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15987         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15988         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15989         if (shdr_status || shdr_add_status || rc) {
15990                 status = -ENXIO;
15991                 goto out;
15992         }
15993         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15994         if (drq->queue_id == 0xFFFF) {
15995                 status = -ENXIO;
15996                 goto out;
15997         }
15998         drq->type = LPFC_DRQ;
15999         drq->assoc_qid = cq->queue_id;
16000         drq->subtype = subtype;
16001         drq->host_index = 0;
16002         drq->hba_index = 0;
16003         drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16004
16005         /* link the header and data RQs onto the parent cq child list */
16006         list_add_tail(&hrq->list, &cq->child_list);
16007         list_add_tail(&drq->list, &cq->child_list);
16008
16009 out:
16010         mempool_free(mbox, phba->mbox_mem_pool);
16011         return status;
16012 }
16013
16014 /**
16015  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16016  * @phba: HBA structure that indicates port to create a queue on.
16017  * @hrqp: The queue structure array to use to create the header receive queues.
16018  * @drqp: The queue structure array to use to create the data receive queues.
16019  * @cqp: The completion queue array to bind these receive queues to.
16020  *
16021  * This function creates a receive buffer queue pair , as detailed in @hrq and
16022  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16023  * to the HBA.
16024  *
16025  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16026  * struct is used to get the entry count that is necessary to determine the
16027  * number of pages to use for this queue. The @cq is used to indicate which
16028  * completion queue to bind received buffers that are posted to these queues to.
16029  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16030  * receive queue pair. This function is asynchronous and will wait for the
16031  * mailbox command to finish before continuing.
16032  *
16033  * On success this function will return a zero. If unable to allocate enough
16034  * memory this function will return -ENOMEM. If the queue create mailbox command
16035  * fails this function will return -ENXIO.
16036  **/
16037 int
16038 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16039                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16040                 uint32_t subtype)
16041 {
16042         struct lpfc_queue *hrq, *drq, *cq;
16043         struct lpfc_mbx_rq_create_v2 *rq_create;
16044         struct lpfc_dmabuf *dmabuf;
16045         LPFC_MBOXQ_t *mbox;
16046         int rc, length, alloclen, status = 0;
16047         int cnt, idx, numrq, page_idx = 0;
16048         uint32_t shdr_status, shdr_add_status;
16049         union lpfc_sli4_cfg_shdr *shdr;
16050         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16051
16052         numrq = phba->cfg_nvmet_mrq;
16053         /* sanity check on array memory */
16054         if (!hrqp || !drqp || !cqp || !numrq)
16055                 return -ENODEV;
16056         if (!phba->sli4_hba.pc_sli4_params.supported)
16057                 hw_page_size = SLI4_PAGE_SIZE;
16058
16059         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16060         if (!mbox)
16061                 return -ENOMEM;
16062
16063         length = sizeof(struct lpfc_mbx_rq_create_v2);
16064         length += ((2 * numrq * hrqp[0]->page_count) *
16065                    sizeof(struct dma_address));
16066
16067         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16068                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16069                                     LPFC_SLI4_MBX_NEMBED);
16070         if (alloclen < length) {
16071                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16072                                 "3099 Allocated DMA memory size (%d) is "
16073                                 "less than the requested DMA memory size "
16074                                 "(%d)\n", alloclen, length);
16075                 status = -ENOMEM;
16076                 goto out;
16077         }
16078
16079
16080
16081         rq_create = mbox->sge_array->addr[0];
16082         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16083
16084         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16085         cnt = 0;
16086
16087         for (idx = 0; idx < numrq; idx++) {
16088                 hrq = hrqp[idx];
16089                 drq = drqp[idx];
16090                 cq  = cqp[idx];
16091
16092                 /* sanity check on queue memory */
16093                 if (!hrq || !drq || !cq) {
16094                         status = -ENODEV;
16095                         goto out;
16096                 }
16097
16098                 if (hrq->entry_count != drq->entry_count) {
16099                         status = -EINVAL;
16100                         goto out;
16101                 }
16102
16103                 if (idx == 0) {
16104                         bf_set(lpfc_mbx_rq_create_num_pages,
16105                                &rq_create->u.request,
16106                                hrq->page_count);
16107                         bf_set(lpfc_mbx_rq_create_rq_cnt,
16108                                &rq_create->u.request, (numrq * 2));
16109                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16110                                1);
16111                         bf_set(lpfc_rq_context_base_cq,
16112                                &rq_create->u.request.context,
16113                                cq->queue_id);
16114                         bf_set(lpfc_rq_context_data_size,
16115                                &rq_create->u.request.context,
16116                                LPFC_NVMET_DATA_BUF_SIZE);
16117                         bf_set(lpfc_rq_context_hdr_size,
16118                                &rq_create->u.request.context,
16119                                LPFC_HDR_BUF_SIZE);
16120                         bf_set(lpfc_rq_context_rqe_count_1,
16121                                &rq_create->u.request.context,
16122                                hrq->entry_count);
16123                         bf_set(lpfc_rq_context_rqe_size,
16124                                &rq_create->u.request.context,
16125                                LPFC_RQE_SIZE_8);
16126                         bf_set(lpfc_rq_context_page_size,
16127                                &rq_create->u.request.context,
16128                                (PAGE_SIZE/SLI4_PAGE_SIZE));
16129                 }
16130                 rc = 0;
16131                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16132                         memset(dmabuf->virt, 0, hw_page_size);
16133                         cnt = page_idx + dmabuf->buffer_tag;
16134                         rq_create->u.request.page[cnt].addr_lo =
16135                                         putPaddrLow(dmabuf->phys);
16136                         rq_create->u.request.page[cnt].addr_hi =
16137                                         putPaddrHigh(dmabuf->phys);
16138                         rc++;
16139                 }
16140                 page_idx += rc;
16141
16142                 rc = 0;
16143                 list_for_each_entry(dmabuf, &drq->page_list, list) {
16144                         memset(dmabuf->virt, 0, hw_page_size);
16145                         cnt = page_idx + dmabuf->buffer_tag;
16146                         rq_create->u.request.page[cnt].addr_lo =
16147                                         putPaddrLow(dmabuf->phys);
16148                         rq_create->u.request.page[cnt].addr_hi =
16149                                         putPaddrHigh(dmabuf->phys);
16150                         rc++;
16151                 }
16152                 page_idx += rc;
16153
16154                 hrq->db_format = LPFC_DB_RING_FORMAT;
16155                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16156                 hrq->type = LPFC_HRQ;
16157                 hrq->assoc_qid = cq->queue_id;
16158                 hrq->subtype = subtype;
16159                 hrq->host_index = 0;
16160                 hrq->hba_index = 0;
16161                 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16162
16163                 drq->db_format = LPFC_DB_RING_FORMAT;
16164                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16165                 drq->type = LPFC_DRQ;
16166                 drq->assoc_qid = cq->queue_id;
16167                 drq->subtype = subtype;
16168                 drq->host_index = 0;
16169                 drq->hba_index = 0;
16170                 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16171
16172                 list_add_tail(&hrq->list, &cq->child_list);
16173                 list_add_tail(&drq->list, &cq->child_list);
16174         }
16175
16176         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16177         /* The IOCTL status is embedded in the mailbox subheader. */
16178         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16179         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16180         if (shdr_status || shdr_add_status || rc) {
16181                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16182                                 "3120 RQ_CREATE mailbox failed with "
16183                                 "status x%x add_status x%x, mbx status x%x\n",
16184                                 shdr_status, shdr_add_status, rc);
16185                 status = -ENXIO;
16186                 goto out;
16187         }
16188         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16189         if (rc == 0xFFFF) {
16190                 status = -ENXIO;
16191                 goto out;
16192         }
16193
16194         /* Initialize all RQs with associated queue id */
16195         for (idx = 0; idx < numrq; idx++) {
16196                 hrq = hrqp[idx];
16197                 hrq->queue_id = rc + (2 * idx);
16198                 drq = drqp[idx];
16199                 drq->queue_id = rc + (2 * idx) + 1;
16200         }
16201
16202 out:
16203         lpfc_sli4_mbox_cmd_free(phba, mbox);
16204         return status;
16205 }
16206
16207 /**
16208  * lpfc_eq_destroy - Destroy an event Queue on the HBA
16209  * @eq: The queue structure associated with the queue to destroy.
16210  *
16211  * This function destroys a queue, as detailed in @eq by sending an mailbox
16212  * command, specific to the type of queue, to the HBA.
16213  *
16214  * The @eq struct is used to get the queue ID of the queue to destroy.
16215  *
16216  * On success this function will return a zero. If the queue destroy mailbox
16217  * command fails this function will return -ENXIO.
16218  **/
16219 int
16220 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16221 {
16222         LPFC_MBOXQ_t *mbox;
16223         int rc, length, status = 0;
16224         uint32_t shdr_status, shdr_add_status;
16225         union lpfc_sli4_cfg_shdr *shdr;
16226
16227         /* sanity check on queue memory */
16228         if (!eq)
16229                 return -ENODEV;
16230
16231         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16232         if (!mbox)
16233                 return -ENOMEM;
16234         length = (sizeof(struct lpfc_mbx_eq_destroy) -
16235                   sizeof(struct lpfc_sli4_cfg_mhdr));
16236         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16237                          LPFC_MBOX_OPCODE_EQ_DESTROY,
16238                          length, LPFC_SLI4_MBX_EMBED);
16239         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16240                eq->queue_id);
16241         mbox->vport = eq->phba->pport;
16242         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16243
16244         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16245         /* The IOCTL status is embedded in the mailbox subheader. */
16246         shdr = (union lpfc_sli4_cfg_shdr *)
16247                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16248         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16249         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16250         if (shdr_status || shdr_add_status || rc) {
16251                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16252                                 "2505 EQ_DESTROY mailbox failed with "
16253                                 "status x%x add_status x%x, mbx status x%x\n",
16254                                 shdr_status, shdr_add_status, rc);
16255                 status = -ENXIO;
16256         }
16257
16258         /* Remove eq from any list */
16259         list_del_init(&eq->list);
16260         mempool_free(mbox, eq->phba->mbox_mem_pool);
16261         return status;
16262 }
16263
16264 /**
16265  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16266  * @cq: The queue structure associated with the queue to destroy.
16267  *
16268  * This function destroys a queue, as detailed in @cq by sending an mailbox
16269  * command, specific to the type of queue, to the HBA.
16270  *
16271  * The @cq struct is used to get the queue ID of the queue to destroy.
16272  *
16273  * On success this function will return a zero. If the queue destroy mailbox
16274  * command fails this function will return -ENXIO.
16275  **/
16276 int
16277 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16278 {
16279         LPFC_MBOXQ_t *mbox;
16280         int rc, length, status = 0;
16281         uint32_t shdr_status, shdr_add_status;
16282         union lpfc_sli4_cfg_shdr *shdr;
16283
16284         /* sanity check on queue memory */
16285         if (!cq)
16286                 return -ENODEV;
16287         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16288         if (!mbox)
16289                 return -ENOMEM;
16290         length = (sizeof(struct lpfc_mbx_cq_destroy) -
16291                   sizeof(struct lpfc_sli4_cfg_mhdr));
16292         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16293                          LPFC_MBOX_OPCODE_CQ_DESTROY,
16294                          length, LPFC_SLI4_MBX_EMBED);
16295         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16296                cq->queue_id);
16297         mbox->vport = cq->phba->pport;
16298         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16299         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16300         /* The IOCTL status is embedded in the mailbox subheader. */
16301         shdr = (union lpfc_sli4_cfg_shdr *)
16302                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16303         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16304         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16305         if (shdr_status || shdr_add_status || rc) {
16306                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16307                                 "2506 CQ_DESTROY mailbox failed with "
16308                                 "status x%x add_status x%x, mbx status x%x\n",
16309                                 shdr_status, shdr_add_status, rc);
16310                 status = -ENXIO;
16311         }
16312         /* Remove cq from any list */
16313         list_del_init(&cq->list);
16314         mempool_free(mbox, cq->phba->mbox_mem_pool);
16315         return status;
16316 }
16317
16318 /**
16319  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16320  * @qm: The queue structure associated with the queue to destroy.
16321  *
16322  * This function destroys a queue, as detailed in @mq by sending an mailbox
16323  * command, specific to the type of queue, to the HBA.
16324  *
16325  * The @mq struct is used to get the queue ID of the queue to destroy.
16326  *
16327  * On success this function will return a zero. If the queue destroy mailbox
16328  * command fails this function will return -ENXIO.
16329  **/
16330 int
16331 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16332 {
16333         LPFC_MBOXQ_t *mbox;
16334         int rc, length, status = 0;
16335         uint32_t shdr_status, shdr_add_status;
16336         union lpfc_sli4_cfg_shdr *shdr;
16337
16338         /* sanity check on queue memory */
16339         if (!mq)
16340                 return -ENODEV;
16341         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16342         if (!mbox)
16343                 return -ENOMEM;
16344         length = (sizeof(struct lpfc_mbx_mq_destroy) -
16345                   sizeof(struct lpfc_sli4_cfg_mhdr));
16346         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16347                          LPFC_MBOX_OPCODE_MQ_DESTROY,
16348                          length, LPFC_SLI4_MBX_EMBED);
16349         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16350                mq->queue_id);
16351         mbox->vport = mq->phba->pport;
16352         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16353         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16354         /* The IOCTL status is embedded in the mailbox subheader. */
16355         shdr = (union lpfc_sli4_cfg_shdr *)
16356                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16357         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16358         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16359         if (shdr_status || shdr_add_status || rc) {
16360                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16361                                 "2507 MQ_DESTROY mailbox failed with "
16362                                 "status x%x add_status x%x, mbx status x%x\n",
16363                                 shdr_status, shdr_add_status, rc);
16364                 status = -ENXIO;
16365         }
16366         /* Remove mq from any list */
16367         list_del_init(&mq->list);
16368         mempool_free(mbox, mq->phba->mbox_mem_pool);
16369         return status;
16370 }
16371
16372 /**
16373  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16374  * @wq: The queue structure associated with the queue to destroy.
16375  *
16376  * This function destroys a queue, as detailed in @wq by sending an mailbox
16377  * command, specific to the type of queue, to the HBA.
16378  *
16379  * The @wq struct is used to get the queue ID of the queue to destroy.
16380  *
16381  * On success this function will return a zero. If the queue destroy mailbox
16382  * command fails this function will return -ENXIO.
16383  **/
16384 int
16385 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16386 {
16387         LPFC_MBOXQ_t *mbox;
16388         int rc, length, status = 0;
16389         uint32_t shdr_status, shdr_add_status;
16390         union lpfc_sli4_cfg_shdr *shdr;
16391
16392         /* sanity check on queue memory */
16393         if (!wq)
16394                 return -ENODEV;
16395         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16396         if (!mbox)
16397                 return -ENOMEM;
16398         length = (sizeof(struct lpfc_mbx_wq_destroy) -
16399                   sizeof(struct lpfc_sli4_cfg_mhdr));
16400         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16401                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16402                          length, LPFC_SLI4_MBX_EMBED);
16403         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16404                wq->queue_id);
16405         mbox->vport = wq->phba->pport;
16406         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16407         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16408         shdr = (union lpfc_sli4_cfg_shdr *)
16409                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16410         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16411         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16412         if (shdr_status || shdr_add_status || rc) {
16413                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16414                                 "2508 WQ_DESTROY mailbox failed with "
16415                                 "status x%x add_status x%x, mbx status x%x\n",
16416                                 shdr_status, shdr_add_status, rc);
16417                 status = -ENXIO;
16418         }
16419         /* Remove wq from any list */
16420         list_del_init(&wq->list);
16421         kfree(wq->pring);
16422         wq->pring = NULL;
16423         mempool_free(mbox, wq->phba->mbox_mem_pool);
16424         return status;
16425 }
16426
16427 /**
16428  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16429  * @rq: The queue structure associated with the queue to destroy.
16430  *
16431  * This function destroys a queue, as detailed in @rq by sending an mailbox
16432  * command, specific to the type of queue, to the HBA.
16433  *
16434  * The @rq struct is used to get the queue ID of the queue to destroy.
16435  *
16436  * On success this function will return a zero. If the queue destroy mailbox
16437  * command fails this function will return -ENXIO.
16438  **/
16439 int
16440 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16441                 struct lpfc_queue *drq)
16442 {
16443         LPFC_MBOXQ_t *mbox;
16444         int rc, length, status = 0;
16445         uint32_t shdr_status, shdr_add_status;
16446         union lpfc_sli4_cfg_shdr *shdr;
16447
16448         /* sanity check on queue memory */
16449         if (!hrq || !drq)
16450                 return -ENODEV;
16451         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16452         if (!mbox)
16453                 return -ENOMEM;
16454         length = (sizeof(struct lpfc_mbx_rq_destroy) -
16455                   sizeof(struct lpfc_sli4_cfg_mhdr));
16456         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16457                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16458                          length, LPFC_SLI4_MBX_EMBED);
16459         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16460                hrq->queue_id);
16461         mbox->vport = hrq->phba->pport;
16462         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16463         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16464         /* The IOCTL status is embedded in the mailbox subheader. */
16465         shdr = (union lpfc_sli4_cfg_shdr *)
16466                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16467         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16468         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16469         if (shdr_status || shdr_add_status || rc) {
16470                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16471                                 "2509 RQ_DESTROY mailbox failed with "
16472                                 "status x%x add_status x%x, mbx status x%x\n",
16473                                 shdr_status, shdr_add_status, rc);
16474                 if (rc != MBX_TIMEOUT)
16475                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16476                 return -ENXIO;
16477         }
16478         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16479                drq->queue_id);
16480         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16481         shdr = (union lpfc_sli4_cfg_shdr *)
16482                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16483         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16484         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16485         if (shdr_status || shdr_add_status || rc) {
16486                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16487                                 "2510 RQ_DESTROY mailbox failed with "
16488                                 "status x%x add_status x%x, mbx status x%x\n",
16489                                 shdr_status, shdr_add_status, rc);
16490                 status = -ENXIO;
16491         }
16492         list_del_init(&hrq->list);
16493         list_del_init(&drq->list);
16494         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16495         return status;
16496 }
16497
16498 /**
16499  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16500  * @phba: The virtual port for which this call being executed.
16501  * @pdma_phys_addr0: Physical address of the 1st SGL page.
16502  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16503  * @xritag: the xritag that ties this io to the SGL pages.
16504  *
16505  * This routine will post the sgl pages for the IO that has the xritag
16506  * that is in the iocbq structure. The xritag is assigned during iocbq
16507  * creation and persists for as long as the driver is loaded.
16508  * if the caller has fewer than 256 scatter gather segments to map then
16509  * pdma_phys_addr1 should be 0.
16510  * If the caller needs to map more than 256 scatter gather segment then
16511  * pdma_phys_addr1 should be a valid physical address.
16512  * physical address for SGLs must be 64 byte aligned.
16513  * If you are going to map 2 SGL's then the first one must have 256 entries
16514  * the second sgl can have between 1 and 256 entries.
16515  *
16516  * Return codes:
16517  *      0 - Success
16518  *      -ENXIO, -ENOMEM - Failure
16519  **/
16520 int
16521 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16522                 dma_addr_t pdma_phys_addr0,
16523                 dma_addr_t pdma_phys_addr1,
16524                 uint16_t xritag)
16525 {
16526         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16527         LPFC_MBOXQ_t *mbox;
16528         int rc;
16529         uint32_t shdr_status, shdr_add_status;
16530         uint32_t mbox_tmo;
16531         union lpfc_sli4_cfg_shdr *shdr;
16532
16533         if (xritag == NO_XRI) {
16534                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16535                                 "0364 Invalid param:\n");
16536                 return -EINVAL;
16537         }
16538
16539         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16540         if (!mbox)
16541                 return -ENOMEM;
16542
16543         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16544                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16545                         sizeof(struct lpfc_mbx_post_sgl_pages) -
16546                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16547
16548         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16549                                 &mbox->u.mqe.un.post_sgl_pages;
16550         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16551         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16552
16553         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16554                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16555         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16556                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16557
16558         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16559                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16560         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16561                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16562         if (!phba->sli4_hba.intr_enable)
16563                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16564         else {
16565                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16566                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16567         }
16568         /* The IOCTL status is embedded in the mailbox subheader. */
16569         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16570         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16571         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16572         if (rc != MBX_TIMEOUT)
16573                 mempool_free(mbox, phba->mbox_mem_pool);
16574         if (shdr_status || shdr_add_status || rc) {
16575                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16576                                 "2511 POST_SGL mailbox failed with "
16577                                 "status x%x add_status x%x, mbx status x%x\n",
16578                                 shdr_status, shdr_add_status, rc);
16579         }
16580         return 0;
16581 }
16582
16583 /**
16584  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16585  * @phba: pointer to lpfc hba data structure.
16586  *
16587  * This routine is invoked to post rpi header templates to the
16588  * HBA consistent with the SLI-4 interface spec.  This routine
16589  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16590  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16591  *
16592  * Returns
16593  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16594  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
16595  **/
16596 static uint16_t
16597 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16598 {
16599         unsigned long xri;
16600
16601         /*
16602          * Fetch the next logical xri.  Because this index is logical,
16603          * the driver starts at 0 each time.
16604          */
16605         spin_lock_irq(&phba->hbalock);
16606         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16607                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
16608         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16609                 spin_unlock_irq(&phba->hbalock);
16610                 return NO_XRI;
16611         } else {
16612                 set_bit(xri, phba->sli4_hba.xri_bmask);
16613                 phba->sli4_hba.max_cfg_param.xri_used++;
16614         }
16615         spin_unlock_irq(&phba->hbalock);
16616         return xri;
16617 }
16618
16619 /**
16620  * lpfc_sli4_free_xri - Release an xri for reuse.
16621  * @phba: pointer to lpfc hba data structure.
16622  *
16623  * This routine is invoked to release an xri to the pool of
16624  * available rpis maintained by the driver.
16625  **/
16626 static void
16627 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16628 {
16629         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16630                 phba->sli4_hba.max_cfg_param.xri_used--;
16631         }
16632 }
16633
16634 /**
16635  * lpfc_sli4_free_xri - Release an xri for reuse.
16636  * @phba: pointer to lpfc hba data structure.
16637  *
16638  * This routine is invoked to release an xri to the pool of
16639  * available rpis maintained by the driver.
16640  **/
16641 void
16642 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16643 {
16644         spin_lock_irq(&phba->hbalock);
16645         __lpfc_sli4_free_xri(phba, xri);
16646         spin_unlock_irq(&phba->hbalock);
16647 }
16648
16649 /**
16650  * lpfc_sli4_next_xritag - Get an xritag for the io
16651  * @phba: Pointer to HBA context object.
16652  *
16653  * This function gets an xritag for the iocb. If there is no unused xritag
16654  * it will return 0xffff.
16655  * The function returns the allocated xritag if successful, else returns zero.
16656  * Zero is not a valid xritag.
16657  * The caller is not required to hold any lock.
16658  **/
16659 uint16_t
16660 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16661 {
16662         uint16_t xri_index;
16663
16664         xri_index = lpfc_sli4_alloc_xri(phba);
16665         if (xri_index == NO_XRI)
16666                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16667                                 "2004 Failed to allocate XRI.last XRITAG is %d"
16668                                 " Max XRI is %d, Used XRI is %d\n",
16669                                 xri_index,
16670                                 phba->sli4_hba.max_cfg_param.max_xri,
16671                                 phba->sli4_hba.max_cfg_param.xri_used);
16672         return xri_index;
16673 }
16674
16675 /**
16676  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16677  * @phba: pointer to lpfc hba data structure.
16678  * @post_sgl_list: pointer to els sgl entry list.
16679  * @count: number of els sgl entries on the list.
16680  *
16681  * This routine is invoked to post a block of driver's sgl pages to the
16682  * HBA using non-embedded mailbox command. No Lock is held. This routine
16683  * is only called when the driver is loading and after all IO has been
16684  * stopped.
16685  **/
16686 static int
16687 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16688                             struct list_head *post_sgl_list,
16689                             int post_cnt)
16690 {
16691         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16692         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16693         struct sgl_page_pairs *sgl_pg_pairs;
16694         void *viraddr;
16695         LPFC_MBOXQ_t *mbox;
16696         uint32_t reqlen, alloclen, pg_pairs;
16697         uint32_t mbox_tmo;
16698         uint16_t xritag_start = 0;
16699         int rc = 0;
16700         uint32_t shdr_status, shdr_add_status;
16701         union lpfc_sli4_cfg_shdr *shdr;
16702
16703         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16704                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16705         if (reqlen > SLI4_PAGE_SIZE) {
16706                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16707                                 "2559 Block sgl registration required DMA "
16708                                 "size (%d) great than a page\n", reqlen);
16709                 return -ENOMEM;
16710         }
16711
16712         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16713         if (!mbox)
16714                 return -ENOMEM;
16715
16716         /* Allocate DMA memory and set up the non-embedded mailbox command */
16717         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16718                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16719                          LPFC_SLI4_MBX_NEMBED);
16720
16721         if (alloclen < reqlen) {
16722                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16723                                 "0285 Allocated DMA memory size (%d) is "
16724                                 "less than the requested DMA memory "
16725                                 "size (%d)\n", alloclen, reqlen);
16726                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16727                 return -ENOMEM;
16728         }
16729         /* Set up the SGL pages in the non-embedded DMA pages */
16730         viraddr = mbox->sge_array->addr[0];
16731         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16732         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16733
16734         pg_pairs = 0;
16735         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16736                 /* Set up the sge entry */
16737                 sgl_pg_pairs->sgl_pg0_addr_lo =
16738                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16739                 sgl_pg_pairs->sgl_pg0_addr_hi =
16740                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16741                 sgl_pg_pairs->sgl_pg1_addr_lo =
16742                                 cpu_to_le32(putPaddrLow(0));
16743                 sgl_pg_pairs->sgl_pg1_addr_hi =
16744                                 cpu_to_le32(putPaddrHigh(0));
16745
16746                 /* Keep the first xritag on the list */
16747                 if (pg_pairs == 0)
16748                         xritag_start = sglq_entry->sli4_xritag;
16749                 sgl_pg_pairs++;
16750                 pg_pairs++;
16751         }
16752
16753         /* Complete initialization and perform endian conversion. */
16754         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16755         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16756         sgl->word0 = cpu_to_le32(sgl->word0);
16757
16758         if (!phba->sli4_hba.intr_enable)
16759                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16760         else {
16761                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16762                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16763         }
16764         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16765         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16766         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16767         if (rc != MBX_TIMEOUT)
16768                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16769         if (shdr_status || shdr_add_status || rc) {
16770                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16771                                 "2513 POST_SGL_BLOCK mailbox command failed "
16772                                 "status x%x add_status x%x mbx status x%x\n",
16773                                 shdr_status, shdr_add_status, rc);
16774                 rc = -ENXIO;
16775         }
16776         return rc;
16777 }
16778
16779 /**
16780  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16781  * @phba: pointer to lpfc hba data structure.
16782  * @nblist: pointer to nvme buffer list.
16783  * @count: number of scsi buffers on the list.
16784  *
16785  * This routine is invoked to post a block of @count scsi sgl pages from a
16786  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16787  * No Lock is held.
16788  *
16789  **/
16790 static int
16791 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16792                             int count)
16793 {
16794         struct lpfc_io_buf *lpfc_ncmd;
16795         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16796         struct sgl_page_pairs *sgl_pg_pairs;
16797         void *viraddr;
16798         LPFC_MBOXQ_t *mbox;
16799         uint32_t reqlen, alloclen, pg_pairs;
16800         uint32_t mbox_tmo;
16801         uint16_t xritag_start = 0;
16802         int rc = 0;
16803         uint32_t shdr_status, shdr_add_status;
16804         dma_addr_t pdma_phys_bpl1;
16805         union lpfc_sli4_cfg_shdr *shdr;
16806
16807         /* Calculate the requested length of the dma memory */
16808         reqlen = count * sizeof(struct sgl_page_pairs) +
16809                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16810         if (reqlen > SLI4_PAGE_SIZE) {
16811                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16812                                 "6118 Block sgl registration required DMA "
16813                                 "size (%d) great than a page\n", reqlen);
16814                 return -ENOMEM;
16815         }
16816         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16817         if (!mbox) {
16818                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16819                                 "6119 Failed to allocate mbox cmd memory\n");
16820                 return -ENOMEM;
16821         }
16822
16823         /* Allocate DMA memory and set up the non-embedded mailbox command */
16824         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16825                                     LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16826                                     reqlen, LPFC_SLI4_MBX_NEMBED);
16827
16828         if (alloclen < reqlen) {
16829                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16830                                 "6120 Allocated DMA memory size (%d) is "
16831                                 "less than the requested DMA memory "
16832                                 "size (%d)\n", alloclen, reqlen);
16833                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16834                 return -ENOMEM;
16835         }
16836
16837         /* Get the first SGE entry from the non-embedded DMA memory */
16838         viraddr = mbox->sge_array->addr[0];
16839
16840         /* Set up the SGL pages in the non-embedded DMA pages */
16841         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16842         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16843
16844         pg_pairs = 0;
16845         list_for_each_entry(lpfc_ncmd, nblist, list) {
16846                 /* Set up the sge entry */
16847                 sgl_pg_pairs->sgl_pg0_addr_lo =
16848                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16849                 sgl_pg_pairs->sgl_pg0_addr_hi =
16850                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16851                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16852                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16853                                                 SGL_PAGE_SIZE;
16854                 else
16855                         pdma_phys_bpl1 = 0;
16856                 sgl_pg_pairs->sgl_pg1_addr_lo =
16857                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16858                 sgl_pg_pairs->sgl_pg1_addr_hi =
16859                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16860                 /* Keep the first xritag on the list */
16861                 if (pg_pairs == 0)
16862                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16863                 sgl_pg_pairs++;
16864                 pg_pairs++;
16865         }
16866         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16867         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16868         /* Perform endian conversion if necessary */
16869         sgl->word0 = cpu_to_le32(sgl->word0);
16870
16871         if (!phba->sli4_hba.intr_enable) {
16872                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16873         } else {
16874                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16875                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16876         }
16877         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16878         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16879         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16880         if (rc != MBX_TIMEOUT)
16881                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16882         if (shdr_status || shdr_add_status || rc) {
16883                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16884                                 "6125 POST_SGL_BLOCK mailbox command failed "
16885                                 "status x%x add_status x%x mbx status x%x\n",
16886                                 shdr_status, shdr_add_status, rc);
16887                 rc = -ENXIO;
16888         }
16889         return rc;
16890 }
16891
16892 /**
16893  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16894  * @phba: pointer to lpfc hba data structure.
16895  * @post_nblist: pointer to the nvme buffer list.
16896  *
16897  * This routine walks a list of nvme buffers that was passed in. It attempts
16898  * to construct blocks of nvme buffer sgls which contains contiguous xris and
16899  * uses the non-embedded SGL block post mailbox commands to post to the port.
16900  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16901  * embedded SGL post mailbox command for posting. The @post_nblist passed in
16902  * must be local list, thus no lock is needed when manipulate the list.
16903  *
16904  * Returns: 0 = failure, non-zero number of successfully posted buffers.
16905  **/
16906 int
16907 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16908                            struct list_head *post_nblist, int sb_count)
16909 {
16910         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16911         int status, sgl_size;
16912         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16913         dma_addr_t pdma_phys_sgl1;
16914         int last_xritag = NO_XRI;
16915         int cur_xritag;
16916         LIST_HEAD(prep_nblist);
16917         LIST_HEAD(blck_nblist);
16918         LIST_HEAD(nvme_nblist);
16919
16920         /* sanity check */
16921         if (sb_count <= 0)
16922                 return -EINVAL;
16923
16924         sgl_size = phba->cfg_sg_dma_buf_size;
16925         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16926                 list_del_init(&lpfc_ncmd->list);
16927                 block_cnt++;
16928                 if ((last_xritag != NO_XRI) &&
16929                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16930                         /* a hole in xri block, form a sgl posting block */
16931                         list_splice_init(&prep_nblist, &blck_nblist);
16932                         post_cnt = block_cnt - 1;
16933                         /* prepare list for next posting block */
16934                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16935                         block_cnt = 1;
16936                 } else {
16937                         /* prepare list for next posting block */
16938                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16939                         /* enough sgls for non-embed sgl mbox command */
16940                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16941                                 list_splice_init(&prep_nblist, &blck_nblist);
16942                                 post_cnt = block_cnt;
16943                                 block_cnt = 0;
16944                         }
16945                 }
16946                 num_posting++;
16947                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16948
16949                 /* end of repost sgl list condition for NVME buffers */
16950                 if (num_posting == sb_count) {
16951                         if (post_cnt == 0) {
16952                                 /* last sgl posting block */
16953                                 list_splice_init(&prep_nblist, &blck_nblist);
16954                                 post_cnt = block_cnt;
16955                         } else if (block_cnt == 1) {
16956                                 /* last single sgl with non-contiguous xri */
16957                                 if (sgl_size > SGL_PAGE_SIZE)
16958                                         pdma_phys_sgl1 =
16959                                                 lpfc_ncmd->dma_phys_sgl +
16960                                                 SGL_PAGE_SIZE;
16961                                 else
16962                                         pdma_phys_sgl1 = 0;
16963                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16964                                 status = lpfc_sli4_post_sgl(
16965                                                 phba, lpfc_ncmd->dma_phys_sgl,
16966                                                 pdma_phys_sgl1, cur_xritag);
16967                                 if (status) {
16968                                         /* Post error.  Buffer unavailable. */
16969                                         lpfc_ncmd->flags |=
16970                                                 LPFC_SBUF_NOT_POSTED;
16971                                 } else {
16972                                         /* Post success. Bffer available. */
16973                                         lpfc_ncmd->flags &=
16974                                                 ~LPFC_SBUF_NOT_POSTED;
16975                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
16976                                         num_posted++;
16977                                 }
16978                                 /* success, put on NVME buffer sgl list */
16979                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16980                         }
16981                 }
16982
16983                 /* continue until a nembed page worth of sgls */
16984                 if (post_cnt == 0)
16985                         continue;
16986
16987                 /* post block of NVME buffer list sgls */
16988                 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16989                                                      post_cnt);
16990
16991                 /* don't reset xirtag due to hole in xri block */
16992                 if (block_cnt == 0)
16993                         last_xritag = NO_XRI;
16994
16995                 /* reset NVME buffer post count for next round of posting */
16996                 post_cnt = 0;
16997
16998                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16999                 while (!list_empty(&blck_nblist)) {
17000                         list_remove_head(&blck_nblist, lpfc_ncmd,
17001                                          struct lpfc_io_buf, list);
17002                         if (status) {
17003                                 /* Post error.  Mark buffer unavailable. */
17004                                 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17005                         } else {
17006                                 /* Post success, Mark buffer available. */
17007                                 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17008                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
17009                                 num_posted++;
17010                         }
17011                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17012                 }
17013         }
17014         /* Push NVME buffers with sgl posted to the available list */
17015         lpfc_io_buf_replenish(phba, &nvme_nblist);
17016
17017         return num_posted;
17018 }
17019
17020 /**
17021  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17022  * @phba: pointer to lpfc_hba struct that the frame was received on
17023  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17024  *
17025  * This function checks the fields in the @fc_hdr to see if the FC frame is a
17026  * valid type of frame that the LPFC driver will handle. This function will
17027  * return a zero if the frame is a valid frame or a non zero value when the
17028  * frame does not pass the check.
17029  **/
17030 static int
17031 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17032 {
17033         /*  make rctl_names static to save stack space */
17034         struct fc_vft_header *fc_vft_hdr;
17035         uint32_t *header = (uint32_t *) fc_hdr;
17036
17037         switch (fc_hdr->fh_r_ctl) {
17038         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
17039         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
17040         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
17041         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
17042         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
17043         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
17044         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
17045         case FC_RCTL_DD_CMD_STATUS:     /* command status */
17046         case FC_RCTL_ELS_REQ:   /* extended link services request */
17047         case FC_RCTL_ELS_REP:   /* extended link services reply */
17048         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
17049         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
17050         case FC_RCTL_BA_NOP:    /* basic link service NOP */
17051         case FC_RCTL_BA_ABTS:   /* basic link service abort */
17052         case FC_RCTL_BA_RMC:    /* remove connection */
17053         case FC_RCTL_BA_ACC:    /* basic accept */
17054         case FC_RCTL_BA_RJT:    /* basic reject */
17055         case FC_RCTL_BA_PRMT:
17056         case FC_RCTL_ACK_1:     /* acknowledge_1 */
17057         case FC_RCTL_ACK_0:     /* acknowledge_0 */
17058         case FC_RCTL_P_RJT:     /* port reject */
17059         case FC_RCTL_F_RJT:     /* fabric reject */
17060         case FC_RCTL_P_BSY:     /* port busy */
17061         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
17062         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
17063         case FC_RCTL_LCR:       /* link credit reset */
17064         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17065         case FC_RCTL_END:       /* end */
17066                 break;
17067         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
17068                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17069                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17070                 return lpfc_fc_frame_check(phba, fc_hdr);
17071         default:
17072                 goto drop;
17073         }
17074
17075         switch (fc_hdr->fh_type) {
17076         case FC_TYPE_BLS:
17077         case FC_TYPE_ELS:
17078         case FC_TYPE_FCP:
17079         case FC_TYPE_CT:
17080         case FC_TYPE_NVME:
17081                 break;
17082         case FC_TYPE_IP:
17083         case FC_TYPE_ILS:
17084         default:
17085                 goto drop;
17086         }
17087
17088         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17089                         "2538 Received frame rctl:x%x, type:x%x, "
17090                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17091                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17092                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17093                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17094                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17095                         be32_to_cpu(header[6]));
17096         return 0;
17097 drop:
17098         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17099                         "2539 Dropped frame rctl:x%x type:x%x\n",
17100                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17101         return 1;
17102 }
17103
17104 /**
17105  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17106  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17107  *
17108  * This function processes the FC header to retrieve the VFI from the VF
17109  * header, if one exists. This function will return the VFI if one exists
17110  * or 0 if no VSAN Header exists.
17111  **/
17112 static uint32_t
17113 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17114 {
17115         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17116
17117         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17118                 return 0;
17119         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17120 }
17121
17122 /**
17123  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17124  * @phba: Pointer to the HBA structure to search for the vport on
17125  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17126  * @fcfi: The FC Fabric ID that the frame came from
17127  *
17128  * This function searches the @phba for a vport that matches the content of the
17129  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17130  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17131  * returns the matching vport pointer or NULL if unable to match frame to a
17132  * vport.
17133  **/
17134 static struct lpfc_vport *
17135 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17136                        uint16_t fcfi, uint32_t did)
17137 {
17138         struct lpfc_vport **vports;
17139         struct lpfc_vport *vport = NULL;
17140         int i;
17141
17142         if (did == Fabric_DID)
17143                 return phba->pport;
17144         if ((phba->pport->fc_flag & FC_PT2PT) &&
17145                 !(phba->link_state == LPFC_HBA_READY))
17146                 return phba->pport;
17147
17148         vports = lpfc_create_vport_work_array(phba);
17149         if (vports != NULL) {
17150                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17151                         if (phba->fcf.fcfi == fcfi &&
17152                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17153                             vports[i]->fc_myDID == did) {
17154                                 vport = vports[i];
17155                                 break;
17156                         }
17157                 }
17158         }
17159         lpfc_destroy_vport_work_array(phba, vports);
17160         return vport;
17161 }
17162
17163 /**
17164  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17165  * @vport: The vport to work on.
17166  *
17167  * This function updates the receive sequence time stamp for this vport. The
17168  * receive sequence time stamp indicates the time that the last frame of the
17169  * the sequence that has been idle for the longest amount of time was received.
17170  * the driver uses this time stamp to indicate if any received sequences have
17171  * timed out.
17172  **/
17173 static void
17174 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17175 {
17176         struct lpfc_dmabuf *h_buf;
17177         struct hbq_dmabuf *dmabuf = NULL;
17178
17179         /* get the oldest sequence on the rcv list */
17180         h_buf = list_get_first(&vport->rcv_buffer_list,
17181                                struct lpfc_dmabuf, list);
17182         if (!h_buf)
17183                 return;
17184         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17185         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17186 }
17187
17188 /**
17189  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17190  * @vport: The vport that the received sequences were sent to.
17191  *
17192  * This function cleans up all outstanding received sequences. This is called
17193  * by the driver when a link event or user action invalidates all the received
17194  * sequences.
17195  **/
17196 void
17197 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17198 {
17199         struct lpfc_dmabuf *h_buf, *hnext;
17200         struct lpfc_dmabuf *d_buf, *dnext;
17201         struct hbq_dmabuf *dmabuf = NULL;
17202
17203         /* start with the oldest sequence on the rcv list */
17204         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17205                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17206                 list_del_init(&dmabuf->hbuf.list);
17207                 list_for_each_entry_safe(d_buf, dnext,
17208                                          &dmabuf->dbuf.list, list) {
17209                         list_del_init(&d_buf->list);
17210                         lpfc_in_buf_free(vport->phba, d_buf);
17211                 }
17212                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17213         }
17214 }
17215
17216 /**
17217  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17218  * @vport: The vport that the received sequences were sent to.
17219  *
17220  * This function determines whether any received sequences have timed out by
17221  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17222  * indicates that there is at least one timed out sequence this routine will
17223  * go through the received sequences one at a time from most inactive to most
17224  * active to determine which ones need to be cleaned up. Once it has determined
17225  * that a sequence needs to be cleaned up it will simply free up the resources
17226  * without sending an abort.
17227  **/
17228 void
17229 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17230 {
17231         struct lpfc_dmabuf *h_buf, *hnext;
17232         struct lpfc_dmabuf *d_buf, *dnext;
17233         struct hbq_dmabuf *dmabuf = NULL;
17234         unsigned long timeout;
17235         int abort_count = 0;
17236
17237         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17238                    vport->rcv_buffer_time_stamp);
17239         if (list_empty(&vport->rcv_buffer_list) ||
17240             time_before(jiffies, timeout))
17241                 return;
17242         /* start with the oldest sequence on the rcv list */
17243         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17244                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17245                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17246                            dmabuf->time_stamp);
17247                 if (time_before(jiffies, timeout))
17248                         break;
17249                 abort_count++;
17250                 list_del_init(&dmabuf->hbuf.list);
17251                 list_for_each_entry_safe(d_buf, dnext,
17252                                          &dmabuf->dbuf.list, list) {
17253                         list_del_init(&d_buf->list);
17254                         lpfc_in_buf_free(vport->phba, d_buf);
17255                 }
17256                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17257         }
17258         if (abort_count)
17259                 lpfc_update_rcv_time_stamp(vport);
17260 }
17261
17262 /**
17263  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17264  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17265  *
17266  * This function searches through the existing incomplete sequences that have
17267  * been sent to this @vport. If the frame matches one of the incomplete
17268  * sequences then the dbuf in the @dmabuf is added to the list of frames that
17269  * make up that sequence. If no sequence is found that matches this frame then
17270  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17271  * This function returns a pointer to the first dmabuf in the sequence list that
17272  * the frame was linked to.
17273  **/
17274 static struct hbq_dmabuf *
17275 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17276 {
17277         struct fc_frame_header *new_hdr;
17278         struct fc_frame_header *temp_hdr;
17279         struct lpfc_dmabuf *d_buf;
17280         struct lpfc_dmabuf *h_buf;
17281         struct hbq_dmabuf *seq_dmabuf = NULL;
17282         struct hbq_dmabuf *temp_dmabuf = NULL;
17283         uint8_t found = 0;
17284
17285         INIT_LIST_HEAD(&dmabuf->dbuf.list);
17286         dmabuf->time_stamp = jiffies;
17287         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17288
17289         /* Use the hdr_buf to find the sequence that this frame belongs to */
17290         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17291                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17292                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17293                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17294                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17295                         continue;
17296                 /* found a pending sequence that matches this frame */
17297                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17298                 break;
17299         }
17300         if (!seq_dmabuf) {
17301                 /*
17302                  * This indicates first frame received for this sequence.
17303                  * Queue the buffer on the vport's rcv_buffer_list.
17304                  */
17305                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17306                 lpfc_update_rcv_time_stamp(vport);
17307                 return dmabuf;
17308         }
17309         temp_hdr = seq_dmabuf->hbuf.virt;
17310         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17311                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17312                 list_del_init(&seq_dmabuf->hbuf.list);
17313                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17314                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17315                 lpfc_update_rcv_time_stamp(vport);
17316                 return dmabuf;
17317         }
17318         /* move this sequence to the tail to indicate a young sequence */
17319         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17320         seq_dmabuf->time_stamp = jiffies;
17321         lpfc_update_rcv_time_stamp(vport);
17322         if (list_empty(&seq_dmabuf->dbuf.list)) {
17323                 temp_hdr = dmabuf->hbuf.virt;
17324                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17325                 return seq_dmabuf;
17326         }
17327         /* find the correct place in the sequence to insert this frame */
17328         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17329         while (!found) {
17330                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17331                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17332                 /*
17333                  * If the frame's sequence count is greater than the frame on
17334                  * the list then insert the frame right after this frame
17335                  */
17336                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17337                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17338                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17339                         found = 1;
17340                         break;
17341                 }
17342
17343                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17344                         break;
17345                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17346         }
17347
17348         if (found)
17349                 return seq_dmabuf;
17350         return NULL;
17351 }
17352
17353 /**
17354  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17355  * @vport: pointer to a vitural port
17356  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17357  *
17358  * This function tries to abort from the partially assembed sequence, described
17359  * by the information from basic abbort @dmabuf. It checks to see whether such
17360  * partially assembled sequence held by the driver. If so, it shall free up all
17361  * the frames from the partially assembled sequence.
17362  *
17363  * Return
17364  * true  -- if there is matching partially assembled sequence present and all
17365  *          the frames freed with the sequence;
17366  * false -- if there is no matching partially assembled sequence present so
17367  *          nothing got aborted in the lower layer driver
17368  **/
17369 static bool
17370 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17371                             struct hbq_dmabuf *dmabuf)
17372 {
17373         struct fc_frame_header *new_hdr;
17374         struct fc_frame_header *temp_hdr;
17375         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17376         struct hbq_dmabuf *seq_dmabuf = NULL;
17377
17378         /* Use the hdr_buf to find the sequence that matches this frame */
17379         INIT_LIST_HEAD(&dmabuf->dbuf.list);
17380         INIT_LIST_HEAD(&dmabuf->hbuf.list);
17381         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17382         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17383                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17384                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17385                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17386                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17387                         continue;
17388                 /* found a pending sequence that matches this frame */
17389                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17390                 break;
17391         }
17392
17393         /* Free up all the frames from the partially assembled sequence */
17394         if (seq_dmabuf) {
17395                 list_for_each_entry_safe(d_buf, n_buf,
17396                                          &seq_dmabuf->dbuf.list, list) {
17397                         list_del_init(&d_buf->list);
17398                         lpfc_in_buf_free(vport->phba, d_buf);
17399                 }
17400                 return true;
17401         }
17402         return false;
17403 }
17404
17405 /**
17406  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17407  * @vport: pointer to a vitural port
17408  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17409  *
17410  * This function tries to abort from the assembed sequence from upper level
17411  * protocol, described by the information from basic abbort @dmabuf. It
17412  * checks to see whether such pending context exists at upper level protocol.
17413  * If so, it shall clean up the pending context.
17414  *
17415  * Return
17416  * true  -- if there is matching pending context of the sequence cleaned
17417  *          at ulp;
17418  * false -- if there is no matching pending context of the sequence present
17419  *          at ulp.
17420  **/
17421 static bool
17422 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17423 {
17424         struct lpfc_hba *phba = vport->phba;
17425         int handled;
17426
17427         /* Accepting abort at ulp with SLI4 only */
17428         if (phba->sli_rev < LPFC_SLI_REV4)
17429                 return false;
17430
17431         /* Register all caring upper level protocols to attend abort */
17432         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17433         if (handled)
17434                 return true;
17435
17436         return false;
17437 }
17438
17439 /**
17440  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17441  * @phba: Pointer to HBA context object.
17442  * @cmd_iocbq: pointer to the command iocbq structure.
17443  * @rsp_iocbq: pointer to the response iocbq structure.
17444  *
17445  * This function handles the sequence abort response iocb command complete
17446  * event. It properly releases the memory allocated to the sequence abort
17447  * accept iocb.
17448  **/
17449 static void
17450 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17451                              struct lpfc_iocbq *cmd_iocbq,
17452                              struct lpfc_iocbq *rsp_iocbq)
17453 {
17454         struct lpfc_nodelist *ndlp;
17455
17456         if (cmd_iocbq) {
17457                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17458                 lpfc_nlp_put(ndlp);
17459                 lpfc_nlp_not_used(ndlp);
17460                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17461         }
17462
17463         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17464         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17465                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17466                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
17467                         rsp_iocbq->iocb.ulpStatus,
17468                         rsp_iocbq->iocb.un.ulpWord[4]);
17469 }
17470
17471 /**
17472  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17473  * @phba: Pointer to HBA context object.
17474  * @xri: xri id in transaction.
17475  *
17476  * This function validates the xri maps to the known range of XRIs allocated an
17477  * used by the driver.
17478  **/
17479 uint16_t
17480 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17481                       uint16_t xri)
17482 {
17483         uint16_t i;
17484
17485         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17486                 if (xri == phba->sli4_hba.xri_ids[i])
17487                         return i;
17488         }
17489         return NO_XRI;
17490 }
17491
17492 /**
17493  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17494  * @phba: Pointer to HBA context object.
17495  * @fc_hdr: pointer to a FC frame header.
17496  *
17497  * This function sends a basic response to a previous unsol sequence abort
17498  * event after aborting the sequence handling.
17499  **/
17500 void
17501 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17502                         struct fc_frame_header *fc_hdr, bool aborted)
17503 {
17504         struct lpfc_hba *phba = vport->phba;
17505         struct lpfc_iocbq *ctiocb = NULL;
17506         struct lpfc_nodelist *ndlp;
17507         uint16_t oxid, rxid, xri, lxri;
17508         uint32_t sid, fctl;
17509         IOCB_t *icmd;
17510         int rc;
17511
17512         if (!lpfc_is_link_up(phba))
17513                 return;
17514
17515         sid = sli4_sid_from_fc_hdr(fc_hdr);
17516         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17517         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17518
17519         ndlp = lpfc_findnode_did(vport, sid);
17520         if (!ndlp) {
17521                 ndlp = lpfc_nlp_init(vport, sid);
17522                 if (!ndlp) {
17523                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17524                                          "1268 Failed to allocate ndlp for "
17525                                          "oxid:x%x SID:x%x\n", oxid, sid);
17526                         return;
17527                 }
17528                 /* Put ndlp onto pport node list */
17529                 lpfc_enqueue_node(vport, ndlp);
17530         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17531                 /* re-setup ndlp without removing from node list */
17532                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17533                 if (!ndlp) {
17534                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17535                                          "3275 Failed to active ndlp found "
17536                                          "for oxid:x%x SID:x%x\n", oxid, sid);
17537                         return;
17538                 }
17539         }
17540
17541         /* Allocate buffer for rsp iocb */
17542         ctiocb = lpfc_sli_get_iocbq(phba);
17543         if (!ctiocb)
17544                 return;
17545
17546         /* Extract the F_CTL field from FC_HDR */
17547         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17548
17549         icmd = &ctiocb->iocb;
17550         icmd->un.xseq64.bdl.bdeSize = 0;
17551         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17552         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17553         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17554         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17555
17556         /* Fill in the rest of iocb fields */
17557         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17558         icmd->ulpBdeCount = 0;
17559         icmd->ulpLe = 1;
17560         icmd->ulpClass = CLASS3;
17561         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17562         ctiocb->context1 = lpfc_nlp_get(ndlp);
17563
17564         ctiocb->iocb_cmpl = NULL;
17565         ctiocb->vport = phba->pport;
17566         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17567         ctiocb->sli4_lxritag = NO_XRI;
17568         ctiocb->sli4_xritag = NO_XRI;
17569
17570         if (fctl & FC_FC_EX_CTX)
17571                 /* Exchange responder sent the abort so we
17572                  * own the oxid.
17573                  */
17574                 xri = oxid;
17575         else
17576                 xri = rxid;
17577         lxri = lpfc_sli4_xri_inrange(phba, xri);
17578         if (lxri != NO_XRI)
17579                 lpfc_set_rrq_active(phba, ndlp, lxri,
17580                         (xri == oxid) ? rxid : oxid, 0);
17581         /* For BA_ABTS from exchange responder, if the logical xri with
17582          * the oxid maps to the FCP XRI range, the port no longer has
17583          * that exchange context, send a BLS_RJT. Override the IOCB for
17584          * a BA_RJT.
17585          */
17586         if ((fctl & FC_FC_EX_CTX) &&
17587             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17588                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17589                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17590                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17591                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17592         }
17593
17594         /* If BA_ABTS failed to abort a partially assembled receive sequence,
17595          * the driver no longer has that exchange, send a BLS_RJT. Override
17596          * the IOCB for a BA_RJT.
17597          */
17598         if (aborted == false) {
17599                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17600                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17601                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17602                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17603         }
17604
17605         if (fctl & FC_FC_EX_CTX) {
17606                 /* ABTS sent by responder to CT exchange, construction
17607                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17608                  * field and RX_ID from ABTS for RX_ID field.
17609                  */
17610                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17611         } else {
17612                 /* ABTS sent by initiator to CT exchange, construction
17613                  * of BA_ACC will need to allocate a new XRI as for the
17614                  * XRI_TAG field.
17615                  */
17616                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17617         }
17618         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17619         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17620
17621         /* Xmit CT abts response on exchange <xid> */
17622         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17623                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17624                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17625
17626         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17627         if (rc == IOCB_ERROR) {
17628                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17629                                  "2925 Failed to issue CT ABTS RSP x%x on "
17630                                  "xri x%x, Data x%x\n",
17631                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17632                                  phba->link_state);
17633                 lpfc_nlp_put(ndlp);
17634                 ctiocb->context1 = NULL;
17635                 lpfc_sli_release_iocbq(phba, ctiocb);
17636         }
17637 }
17638
17639 /**
17640  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17641  * @vport: Pointer to the vport on which this sequence was received
17642  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17643  *
17644  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17645  * receive sequence is only partially assembed by the driver, it shall abort
17646  * the partially assembled frames for the sequence. Otherwise, if the
17647  * unsolicited receive sequence has been completely assembled and passed to
17648  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17649  * unsolicited sequence has been aborted. After that, it will issue a basic
17650  * accept to accept the abort.
17651  **/
17652 static void
17653 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17654                              struct hbq_dmabuf *dmabuf)
17655 {
17656         struct lpfc_hba *phba = vport->phba;
17657         struct fc_frame_header fc_hdr;
17658         uint32_t fctl;
17659         bool aborted;
17660
17661         /* Make a copy of fc_hdr before the dmabuf being released */
17662         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17663         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17664
17665         if (fctl & FC_FC_EX_CTX) {
17666                 /* ABTS by responder to exchange, no cleanup needed */
17667                 aborted = true;
17668         } else {
17669                 /* ABTS by initiator to exchange, need to do cleanup */
17670                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17671                 if (aborted == false)
17672                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17673         }
17674         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17675
17676         if (phba->nvmet_support) {
17677                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17678                 return;
17679         }
17680
17681         /* Respond with BA_ACC or BA_RJT accordingly */
17682         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17683 }
17684
17685 /**
17686  * lpfc_seq_complete - Indicates if a sequence is complete
17687  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17688  *
17689  * This function checks the sequence, starting with the frame described by
17690  * @dmabuf, to see if all the frames associated with this sequence are present.
17691  * the frames associated with this sequence are linked to the @dmabuf using the
17692  * dbuf list. This function looks for two major things. 1) That the first frame
17693  * has a sequence count of zero. 2) There is a frame with last frame of sequence
17694  * set. 3) That there are no holes in the sequence count. The function will
17695  * return 1 when the sequence is complete, otherwise it will return 0.
17696  **/
17697 static int
17698 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17699 {
17700         struct fc_frame_header *hdr;
17701         struct lpfc_dmabuf *d_buf;
17702         struct hbq_dmabuf *seq_dmabuf;
17703         uint32_t fctl;
17704         int seq_count = 0;
17705
17706         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17707         /* make sure first fame of sequence has a sequence count of zero */
17708         if (hdr->fh_seq_cnt != seq_count)
17709                 return 0;
17710         fctl = (hdr->fh_f_ctl[0] << 16 |
17711                 hdr->fh_f_ctl[1] << 8 |
17712                 hdr->fh_f_ctl[2]);
17713         /* If last frame of sequence we can return success. */
17714         if (fctl & FC_FC_END_SEQ)
17715                 return 1;
17716         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17717                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17718                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17719                 /* If there is a hole in the sequence count then fail. */
17720                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17721                         return 0;
17722                 fctl = (hdr->fh_f_ctl[0] << 16 |
17723                         hdr->fh_f_ctl[1] << 8 |
17724                         hdr->fh_f_ctl[2]);
17725                 /* If last frame of sequence we can return success. */
17726                 if (fctl & FC_FC_END_SEQ)
17727                         return 1;
17728         }
17729         return 0;
17730 }
17731
17732 /**
17733  * lpfc_prep_seq - Prep sequence for ULP processing
17734  * @vport: Pointer to the vport on which this sequence was received
17735  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17736  *
17737  * This function takes a sequence, described by a list of frames, and creates
17738  * a list of iocbq structures to describe the sequence. This iocbq list will be
17739  * used to issue to the generic unsolicited sequence handler. This routine
17740  * returns a pointer to the first iocbq in the list. If the function is unable
17741  * to allocate an iocbq then it throw out the received frames that were not
17742  * able to be described and return a pointer to the first iocbq. If unable to
17743  * allocate any iocbqs (including the first) this function will return NULL.
17744  **/
17745 static struct lpfc_iocbq *
17746 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17747 {
17748         struct hbq_dmabuf *hbq_buf;
17749         struct lpfc_dmabuf *d_buf, *n_buf;
17750         struct lpfc_iocbq *first_iocbq, *iocbq;
17751         struct fc_frame_header *fc_hdr;
17752         uint32_t sid;
17753         uint32_t len, tot_len;
17754         struct ulp_bde64 *pbde;
17755
17756         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17757         /* remove from receive buffer list */
17758         list_del_init(&seq_dmabuf->hbuf.list);
17759         lpfc_update_rcv_time_stamp(vport);
17760         /* get the Remote Port's SID */
17761         sid = sli4_sid_from_fc_hdr(fc_hdr);
17762         tot_len = 0;
17763         /* Get an iocbq struct to fill in. */
17764         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17765         if (first_iocbq) {
17766                 /* Initialize the first IOCB. */
17767                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17768                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17769                 first_iocbq->vport = vport;
17770
17771                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17772                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17773                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17774                         first_iocbq->iocb.un.rcvels.parmRo =
17775                                 sli4_did_from_fc_hdr(fc_hdr);
17776                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17777                 } else
17778                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17779                 first_iocbq->iocb.ulpContext = NO_XRI;
17780                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17781                         be16_to_cpu(fc_hdr->fh_ox_id);
17782                 /* iocbq is prepped for internal consumption.  Physical vpi. */
17783                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17784                         vport->phba->vpi_ids[vport->vpi];
17785                 /* put the first buffer into the first IOCBq */
17786                 tot_len = bf_get(lpfc_rcqe_length,
17787                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17788
17789                 first_iocbq->context2 = &seq_dmabuf->dbuf;
17790                 first_iocbq->context3 = NULL;
17791                 first_iocbq->iocb.ulpBdeCount = 1;
17792                 if (tot_len > LPFC_DATA_BUF_SIZE)
17793                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17794                                                         LPFC_DATA_BUF_SIZE;
17795                 else
17796                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17797
17798                 first_iocbq->iocb.un.rcvels.remoteID = sid;
17799
17800                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17801         }
17802         iocbq = first_iocbq;
17803         /*
17804          * Each IOCBq can have two Buffers assigned, so go through the list
17805          * of buffers for this sequence and save two buffers in each IOCBq
17806          */
17807         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17808                 if (!iocbq) {
17809                         lpfc_in_buf_free(vport->phba, d_buf);
17810                         continue;
17811                 }
17812                 if (!iocbq->context3) {
17813                         iocbq->context3 = d_buf;
17814                         iocbq->iocb.ulpBdeCount++;
17815                         /* We need to get the size out of the right CQE */
17816                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17817                         len = bf_get(lpfc_rcqe_length,
17818                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17819                         pbde = (struct ulp_bde64 *)
17820                                         &iocbq->iocb.unsli3.sli3Words[4];
17821                         if (len > LPFC_DATA_BUF_SIZE)
17822                                 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17823                         else
17824                                 pbde->tus.f.bdeSize = len;
17825
17826                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17827                         tot_len += len;
17828                 } else {
17829                         iocbq = lpfc_sli_get_iocbq(vport->phba);
17830                         if (!iocbq) {
17831                                 if (first_iocbq) {
17832                                         first_iocbq->iocb.ulpStatus =
17833                                                         IOSTAT_FCP_RSP_ERROR;
17834                                         first_iocbq->iocb.un.ulpWord[4] =
17835                                                         IOERR_NO_RESOURCES;
17836                                 }
17837                                 lpfc_in_buf_free(vport->phba, d_buf);
17838                                 continue;
17839                         }
17840                         /* We need to get the size out of the right CQE */
17841                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17842                         len = bf_get(lpfc_rcqe_length,
17843                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17844                         iocbq->context2 = d_buf;
17845                         iocbq->context3 = NULL;
17846                         iocbq->iocb.ulpBdeCount = 1;
17847                         if (len > LPFC_DATA_BUF_SIZE)
17848                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17849                                                         LPFC_DATA_BUF_SIZE;
17850                         else
17851                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17852
17853                         tot_len += len;
17854                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17855
17856                         iocbq->iocb.un.rcvels.remoteID = sid;
17857                         list_add_tail(&iocbq->list, &first_iocbq->list);
17858                 }
17859         }
17860         return first_iocbq;
17861 }
17862
17863 static void
17864 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17865                           struct hbq_dmabuf *seq_dmabuf)
17866 {
17867         struct fc_frame_header *fc_hdr;
17868         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17869         struct lpfc_hba *phba = vport->phba;
17870
17871         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17872         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17873         if (!iocbq) {
17874                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17875                                 "2707 Ring %d handler: Failed to allocate "
17876                                 "iocb Rctl x%x Type x%x received\n",
17877                                 LPFC_ELS_RING,
17878                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17879                 return;
17880         }
17881         if (!lpfc_complete_unsol_iocb(phba,
17882                                       phba->sli4_hba.els_wq->pring,
17883                                       iocbq, fc_hdr->fh_r_ctl,
17884                                       fc_hdr->fh_type))
17885                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17886                                 "2540 Ring %d handler: unexpected Rctl "
17887                                 "x%x Type x%x received\n",
17888                                 LPFC_ELS_RING,
17889                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17890
17891         /* Free iocb created in lpfc_prep_seq */
17892         list_for_each_entry_safe(curr_iocb, next_iocb,
17893                 &iocbq->list, list) {
17894                 list_del_init(&curr_iocb->list);
17895                 lpfc_sli_release_iocbq(phba, curr_iocb);
17896         }
17897         lpfc_sli_release_iocbq(phba, iocbq);
17898 }
17899
17900 static void
17901 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17902                             struct lpfc_iocbq *rspiocb)
17903 {
17904         struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17905
17906         if (pcmd && pcmd->virt)
17907                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17908         kfree(pcmd);
17909         lpfc_sli_release_iocbq(phba, cmdiocb);
17910         lpfc_drain_txq(phba);
17911 }
17912
17913 static void
17914 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17915                               struct hbq_dmabuf *dmabuf)
17916 {
17917         struct fc_frame_header *fc_hdr;
17918         struct lpfc_hba *phba = vport->phba;
17919         struct lpfc_iocbq *iocbq = NULL;
17920         union  lpfc_wqe *wqe;
17921         struct lpfc_dmabuf *pcmd = NULL;
17922         uint32_t frame_len;
17923         int rc;
17924         unsigned long iflags;
17925
17926         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17927         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17928
17929         /* Send the received frame back */
17930         iocbq = lpfc_sli_get_iocbq(phba);
17931         if (!iocbq) {
17932                 /* Queue cq event and wakeup worker thread to process it */
17933                 spin_lock_irqsave(&phba->hbalock, iflags);
17934                 list_add_tail(&dmabuf->cq_event.list,
17935                               &phba->sli4_hba.sp_queue_event);
17936                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17937                 spin_unlock_irqrestore(&phba->hbalock, iflags);
17938                 lpfc_worker_wake_up(phba);
17939                 return;
17940         }
17941
17942         /* Allocate buffer for command payload */
17943         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17944         if (pcmd)
17945                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17946                                             &pcmd->phys);
17947         if (!pcmd || !pcmd->virt)
17948                 goto exit;
17949
17950         INIT_LIST_HEAD(&pcmd->list);
17951
17952         /* copyin the payload */
17953         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17954
17955         /* fill in BDE's for command */
17956         iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17957         iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17958         iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17959         iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17960
17961         iocbq->context2 = pcmd;
17962         iocbq->vport = vport;
17963         iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17964         iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17965
17966         /*
17967          * Setup rest of the iocb as though it were a WQE
17968          * Build the SEND_FRAME WQE
17969          */
17970         wqe = (union lpfc_wqe *)&iocbq->iocb;
17971
17972         wqe->send_frame.frame_len = frame_len;
17973         wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17974         wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17975         wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17976         wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17977         wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17978         wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17979
17980         iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17981         iocbq->iocb.ulpLe = 1;
17982         iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17983         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17984         if (rc == IOCB_ERROR)
17985                 goto exit;
17986
17987         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17988         return;
17989
17990 exit:
17991         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17992                         "2023 Unable to process MDS loopback frame\n");
17993         if (pcmd && pcmd->virt)
17994                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17995         kfree(pcmd);
17996         if (iocbq)
17997                 lpfc_sli_release_iocbq(phba, iocbq);
17998         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17999 }
18000
18001 /**
18002  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18003  * @phba: Pointer to HBA context object.
18004  *
18005  * This function is called with no lock held. This function processes all
18006  * the received buffers and gives it to upper layers when a received buffer
18007  * indicates that it is the final frame in the sequence. The interrupt
18008  * service routine processes received buffers at interrupt contexts.
18009  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18010  * appropriate receive function when the final frame in a sequence is received.
18011  **/
18012 void
18013 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18014                                  struct hbq_dmabuf *dmabuf)
18015 {
18016         struct hbq_dmabuf *seq_dmabuf;
18017         struct fc_frame_header *fc_hdr;
18018         struct lpfc_vport *vport;
18019         uint32_t fcfi;
18020         uint32_t did;
18021
18022         /* Process each received buffer */
18023         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18024
18025         if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18026             fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18027                 vport = phba->pport;
18028                 /* Handle MDS Loopback frames */
18029                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18030                 return;
18031         }
18032
18033         /* check to see if this a valid type of frame */
18034         if (lpfc_fc_frame_check(phba, fc_hdr)) {
18035                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18036                 return;
18037         }
18038
18039         if ((bf_get(lpfc_cqe_code,
18040                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18041                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18042                               &dmabuf->cq_event.cqe.rcqe_cmpl);
18043         else
18044                 fcfi = bf_get(lpfc_rcqe_fcf_id,
18045                               &dmabuf->cq_event.cqe.rcqe_cmpl);
18046
18047         /* d_id this frame is directed to */
18048         did = sli4_did_from_fc_hdr(fc_hdr);
18049
18050         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18051         if (!vport) {
18052                 /* throw out the frame */
18053                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18054                 return;
18055         }
18056
18057         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18058         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18059                 (did != Fabric_DID)) {
18060                 /*
18061                  * Throw out the frame if we are not pt2pt.
18062                  * The pt2pt protocol allows for discovery frames
18063                  * to be received without a registered VPI.
18064                  */
18065                 if (!(vport->fc_flag & FC_PT2PT) ||
18066                         (phba->link_state == LPFC_HBA_READY)) {
18067                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
18068                         return;
18069                 }
18070         }
18071
18072         /* Handle the basic abort sequence (BA_ABTS) event */
18073         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18074                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18075                 return;
18076         }
18077
18078         /* Link this frame */
18079         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18080         if (!seq_dmabuf) {
18081                 /* unable to add frame to vport - throw it out */
18082                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18083                 return;
18084         }
18085         /* If not last frame in sequence continue processing frames. */
18086         if (!lpfc_seq_complete(seq_dmabuf))
18087                 return;
18088
18089         /* Send the complete sequence to the upper layer protocol */
18090         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18091 }
18092
18093 /**
18094  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18095  * @phba: pointer to lpfc hba data structure.
18096  *
18097  * This routine is invoked to post rpi header templates to the
18098  * HBA consistent with the SLI-4 interface spec.  This routine
18099  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18100  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18101  *
18102  * This routine does not require any locks.  It's usage is expected
18103  * to be driver load or reset recovery when the driver is
18104  * sequential.
18105  *
18106  * Return codes
18107  *      0 - successful
18108  *      -EIO - The mailbox failed to complete successfully.
18109  *      When this error occurs, the driver is not guaranteed
18110  *      to have any rpi regions posted to the device and
18111  *      must either attempt to repost the regions or take a
18112  *      fatal error.
18113  **/
18114 int
18115 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18116 {
18117         struct lpfc_rpi_hdr *rpi_page;
18118         uint32_t rc = 0;
18119         uint16_t lrpi = 0;
18120
18121         /* SLI4 ports that support extents do not require RPI headers. */
18122         if (!phba->sli4_hba.rpi_hdrs_in_use)
18123                 goto exit;
18124         if (phba->sli4_hba.extents_in_use)
18125                 return -EIO;
18126
18127         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18128                 /*
18129                  * Assign the rpi headers a physical rpi only if the driver
18130                  * has not initialized those resources.  A port reset only
18131                  * needs the headers posted.
18132                  */
18133                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18134                     LPFC_RPI_RSRC_RDY)
18135                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18136
18137                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18138                 if (rc != MBX_SUCCESS) {
18139                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18140                                         "2008 Error %d posting all rpi "
18141                                         "headers\n", rc);
18142                         rc = -EIO;
18143                         break;
18144                 }
18145         }
18146
18147  exit:
18148         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18149                LPFC_RPI_RSRC_RDY);
18150         return rc;
18151 }
18152
18153 /**
18154  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18155  * @phba: pointer to lpfc hba data structure.
18156  * @rpi_page:  pointer to the rpi memory region.
18157  *
18158  * This routine is invoked to post a single rpi header to the
18159  * HBA consistent with the SLI-4 interface spec.  This memory region
18160  * maps up to 64 rpi context regions.
18161  *
18162  * Return codes
18163  *      0 - successful
18164  *      -ENOMEM - No available memory
18165  *      -EIO - The mailbox failed to complete successfully.
18166  **/
18167 int
18168 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18169 {
18170         LPFC_MBOXQ_t *mboxq;
18171         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18172         uint32_t rc = 0;
18173         uint32_t shdr_status, shdr_add_status;
18174         union lpfc_sli4_cfg_shdr *shdr;
18175
18176         /* SLI4 ports that support extents do not require RPI headers. */
18177         if (!phba->sli4_hba.rpi_hdrs_in_use)
18178                 return rc;
18179         if (phba->sli4_hba.extents_in_use)
18180                 return -EIO;
18181
18182         /* The port is notified of the header region via a mailbox command. */
18183         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18184         if (!mboxq) {
18185                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18186                                 "2001 Unable to allocate memory for issuing "
18187                                 "SLI_CONFIG_SPECIAL mailbox command\n");
18188                 return -ENOMEM;
18189         }
18190
18191         /* Post all rpi memory regions to the port. */
18192         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18193         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18194                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18195                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18196                          sizeof(struct lpfc_sli4_cfg_mhdr),
18197                          LPFC_SLI4_MBX_EMBED);
18198
18199
18200         /* Post the physical rpi to the port for this rpi header. */
18201         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18202                rpi_page->start_rpi);
18203         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18204                hdr_tmpl, rpi_page->page_count);
18205
18206         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18207         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18208         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18209         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18210         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18211         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18212         if (rc != MBX_TIMEOUT)
18213                 mempool_free(mboxq, phba->mbox_mem_pool);
18214         if (shdr_status || shdr_add_status || rc) {
18215                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18216                                 "2514 POST_RPI_HDR mailbox failed with "
18217                                 "status x%x add_status x%x, mbx status x%x\n",
18218                                 shdr_status, shdr_add_status, rc);
18219                 rc = -ENXIO;
18220         } else {
18221                 /*
18222                  * The next_rpi stores the next logical module-64 rpi value used
18223                  * to post physical rpis in subsequent rpi postings.
18224                  */
18225                 spin_lock_irq(&phba->hbalock);
18226                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18227                 spin_unlock_irq(&phba->hbalock);
18228         }
18229         return rc;
18230 }
18231
18232 /**
18233  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18234  * @phba: pointer to lpfc hba data structure.
18235  *
18236  * This routine is invoked to post rpi header templates to the
18237  * HBA consistent with the SLI-4 interface spec.  This routine
18238  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18239  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18240  *
18241  * Returns
18242  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18243  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
18244  **/
18245 int
18246 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18247 {
18248         unsigned long rpi;
18249         uint16_t max_rpi, rpi_limit;
18250         uint16_t rpi_remaining, lrpi = 0;
18251         struct lpfc_rpi_hdr *rpi_hdr;
18252         unsigned long iflag;
18253
18254         /*
18255          * Fetch the next logical rpi.  Because this index is logical,
18256          * the  driver starts at 0 each time.
18257          */
18258         spin_lock_irqsave(&phba->hbalock, iflag);
18259         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18260         rpi_limit = phba->sli4_hba.next_rpi;
18261
18262         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18263         if (rpi >= rpi_limit)
18264                 rpi = LPFC_RPI_ALLOC_ERROR;
18265         else {
18266                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18267                 phba->sli4_hba.max_cfg_param.rpi_used++;
18268                 phba->sli4_hba.rpi_count++;
18269         }
18270         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18271                         "0001 rpi:%x max:%x lim:%x\n",
18272                         (int) rpi, max_rpi, rpi_limit);
18273
18274         /*
18275          * Don't try to allocate more rpi header regions if the device limit
18276          * has been exhausted.
18277          */
18278         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18279             (phba->sli4_hba.rpi_count >= max_rpi)) {
18280                 spin_unlock_irqrestore(&phba->hbalock, iflag);
18281                 return rpi;
18282         }
18283
18284         /*
18285          * RPI header postings are not required for SLI4 ports capable of
18286          * extents.
18287          */
18288         if (!phba->sli4_hba.rpi_hdrs_in_use) {
18289                 spin_unlock_irqrestore(&phba->hbalock, iflag);
18290                 return rpi;
18291         }
18292
18293         /*
18294          * If the driver is running low on rpi resources, allocate another
18295          * page now.  Note that the next_rpi value is used because
18296          * it represents how many are actually in use whereas max_rpi notes
18297          * how many are supported max by the device.
18298          */
18299         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18300         spin_unlock_irqrestore(&phba->hbalock, iflag);
18301         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18302                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18303                 if (!rpi_hdr) {
18304                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18305                                         "2002 Error Could not grow rpi "
18306                                         "count\n");
18307                 } else {
18308                         lrpi = rpi_hdr->start_rpi;
18309                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18310                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18311                 }
18312         }
18313
18314         return rpi;
18315 }
18316
18317 /**
18318  * lpfc_sli4_free_rpi - Release an rpi for reuse.
18319  * @phba: pointer to lpfc hba data structure.
18320  *
18321  * This routine is invoked to release an rpi to the pool of
18322  * available rpis maintained by the driver.
18323  **/
18324 static void
18325 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18326 {
18327         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18328                 phba->sli4_hba.rpi_count--;
18329                 phba->sli4_hba.max_cfg_param.rpi_used--;
18330         }
18331 }
18332
18333 /**
18334  * lpfc_sli4_free_rpi - Release an rpi for reuse.
18335  * @phba: pointer to lpfc hba data structure.
18336  *
18337  * This routine is invoked to release an rpi to the pool of
18338  * available rpis maintained by the driver.
18339  **/
18340 void
18341 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18342 {
18343         spin_lock_irq(&phba->hbalock);
18344         __lpfc_sli4_free_rpi(phba, rpi);
18345         spin_unlock_irq(&phba->hbalock);
18346 }
18347
18348 /**
18349  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18350  * @phba: pointer to lpfc hba data structure.
18351  *
18352  * This routine is invoked to remove the memory region that
18353  * provided rpi via a bitmask.
18354  **/
18355 void
18356 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18357 {
18358         kfree(phba->sli4_hba.rpi_bmask);
18359         kfree(phba->sli4_hba.rpi_ids);
18360         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18361 }
18362
18363 /**
18364  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18365  * @phba: pointer to lpfc hba data structure.
18366  *
18367  * This routine is invoked to remove the memory region that
18368  * provided rpi via a bitmask.
18369  **/
18370 int
18371 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18372         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18373 {
18374         LPFC_MBOXQ_t *mboxq;
18375         struct lpfc_hba *phba = ndlp->phba;
18376         int rc;
18377
18378         /* The port is notified of the header region via a mailbox command. */
18379         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18380         if (!mboxq)
18381                 return -ENOMEM;
18382
18383         /* Post all rpi memory regions to the port. */
18384         lpfc_resume_rpi(mboxq, ndlp);
18385         if (cmpl) {
18386                 mboxq->mbox_cmpl = cmpl;
18387                 mboxq->ctx_buf = arg;
18388                 mboxq->ctx_ndlp = ndlp;
18389         } else
18390                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18391         mboxq->vport = ndlp->vport;
18392         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18393         if (rc == MBX_NOT_FINISHED) {
18394                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18395                                 "2010 Resume RPI Mailbox failed "
18396                                 "status %d, mbxStatus x%x\n", rc,
18397                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18398                 mempool_free(mboxq, phba->mbox_mem_pool);
18399                 return -EIO;
18400         }
18401         return 0;
18402 }
18403
18404 /**
18405  * lpfc_sli4_init_vpi - Initialize a vpi with the port
18406  * @vport: Pointer to the vport for which the vpi is being initialized
18407  *
18408  * This routine is invoked to activate a vpi with the port.
18409  *
18410  * Returns:
18411  *    0 success
18412  *    -Evalue otherwise
18413  **/
18414 int
18415 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18416 {
18417         LPFC_MBOXQ_t *mboxq;
18418         int rc = 0;
18419         int retval = MBX_SUCCESS;
18420         uint32_t mbox_tmo;
18421         struct lpfc_hba *phba = vport->phba;
18422         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18423         if (!mboxq)
18424                 return -ENOMEM;
18425         lpfc_init_vpi(phba, mboxq, vport->vpi);
18426         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18427         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18428         if (rc != MBX_SUCCESS) {
18429                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18430                                 "2022 INIT VPI Mailbox failed "
18431                                 "status %d, mbxStatus x%x\n", rc,
18432                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18433                 retval = -EIO;
18434         }
18435         if (rc != MBX_TIMEOUT)
18436                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18437
18438         return retval;
18439 }
18440
18441 /**
18442  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18443  * @phba: pointer to lpfc hba data structure.
18444  * @mboxq: Pointer to mailbox object.
18445  *
18446  * This routine is invoked to manually add a single FCF record. The caller
18447  * must pass a completely initialized FCF_Record.  This routine takes
18448  * care of the nonembedded mailbox operations.
18449  **/
18450 static void
18451 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18452 {
18453         void *virt_addr;
18454         union lpfc_sli4_cfg_shdr *shdr;
18455         uint32_t shdr_status, shdr_add_status;
18456
18457         virt_addr = mboxq->sge_array->addr[0];
18458         /* The IOCTL status is embedded in the mailbox subheader. */
18459         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18460         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18461         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18462
18463         if ((shdr_status || shdr_add_status) &&
18464                 (shdr_status != STATUS_FCF_IN_USE))
18465                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18466                         "2558 ADD_FCF_RECORD mailbox failed with "
18467                         "status x%x add_status x%x\n",
18468                         shdr_status, shdr_add_status);
18469
18470         lpfc_sli4_mbox_cmd_free(phba, mboxq);
18471 }
18472
18473 /**
18474  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18475  * @phba: pointer to lpfc hba data structure.
18476  * @fcf_record:  pointer to the initialized fcf record to add.
18477  *
18478  * This routine is invoked to manually add a single FCF record. The caller
18479  * must pass a completely initialized FCF_Record.  This routine takes
18480  * care of the nonembedded mailbox operations.
18481  **/
18482 int
18483 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18484 {
18485         int rc = 0;
18486         LPFC_MBOXQ_t *mboxq;
18487         uint8_t *bytep;
18488         void *virt_addr;
18489         struct lpfc_mbx_sge sge;
18490         uint32_t alloc_len, req_len;
18491         uint32_t fcfindex;
18492
18493         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18494         if (!mboxq) {
18495                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18496                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18497                 return -ENOMEM;
18498         }
18499
18500         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18501                   sizeof(uint32_t);
18502
18503         /* Allocate DMA memory and set up the non-embedded mailbox command */
18504         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18505                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18506                                      req_len, LPFC_SLI4_MBX_NEMBED);
18507         if (alloc_len < req_len) {
18508                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18509                         "2523 Allocated DMA memory size (x%x) is "
18510                         "less than the requested DMA memory "
18511                         "size (x%x)\n", alloc_len, req_len);
18512                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18513                 return -ENOMEM;
18514         }
18515
18516         /*
18517          * Get the first SGE entry from the non-embedded DMA memory.  This
18518          * routine only uses a single SGE.
18519          */
18520         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18521         virt_addr = mboxq->sge_array->addr[0];
18522         /*
18523          * Configure the FCF record for FCFI 0.  This is the driver's
18524          * hardcoded default and gets used in nonFIP mode.
18525          */
18526         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18527         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18528         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18529
18530         /*
18531          * Copy the fcf_index and the FCF Record Data. The data starts after
18532          * the FCoE header plus word10. The data copy needs to be endian
18533          * correct.
18534          */
18535         bytep += sizeof(uint32_t);
18536         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18537         mboxq->vport = phba->pport;
18538         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18539         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18540         if (rc == MBX_NOT_FINISHED) {
18541                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18542                         "2515 ADD_FCF_RECORD mailbox failed with "
18543                         "status 0x%x\n", rc);
18544                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18545                 rc = -EIO;
18546         } else
18547                 rc = 0;
18548
18549         return rc;
18550 }
18551
18552 /**
18553  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18554  * @phba: pointer to lpfc hba data structure.
18555  * @fcf_record:  pointer to the fcf record to write the default data.
18556  * @fcf_index: FCF table entry index.
18557  *
18558  * This routine is invoked to build the driver's default FCF record.  The
18559  * values used are hardcoded.  This routine handles memory initialization.
18560  *
18561  **/
18562 void
18563 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18564                                 struct fcf_record *fcf_record,
18565                                 uint16_t fcf_index)
18566 {
18567         memset(fcf_record, 0, sizeof(struct fcf_record));
18568         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18569         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18570         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18571         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18572         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18573         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18574         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18575         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18576         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18577         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18578         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18579         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18580         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18581         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18582         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18583         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18584                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18585         /* Set the VLAN bit map */
18586         if (phba->valid_vlan) {
18587                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18588                         = 1 << (phba->vlan_id % 8);
18589         }
18590 }
18591
18592 /**
18593  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18594  * @phba: pointer to lpfc hba data structure.
18595  * @fcf_index: FCF table entry offset.
18596  *
18597  * This routine is invoked to scan the entire FCF table by reading FCF
18598  * record and processing it one at a time starting from the @fcf_index
18599  * for initial FCF discovery or fast FCF failover rediscovery.
18600  *
18601  * Return 0 if the mailbox command is submitted successfully, none 0
18602  * otherwise.
18603  **/
18604 int
18605 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18606 {
18607         int rc = 0, error;
18608         LPFC_MBOXQ_t *mboxq;
18609
18610         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18611         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18612         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18613         if (!mboxq) {
18614                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18615                                 "2000 Failed to allocate mbox for "
18616                                 "READ_FCF cmd\n");
18617                 error = -ENOMEM;
18618                 goto fail_fcf_scan;
18619         }
18620         /* Construct the read FCF record mailbox command */
18621         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18622         if (rc) {
18623                 error = -EINVAL;
18624                 goto fail_fcf_scan;
18625         }
18626         /* Issue the mailbox command asynchronously */
18627         mboxq->vport = phba->pport;
18628         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18629
18630         spin_lock_irq(&phba->hbalock);
18631         phba->hba_flag |= FCF_TS_INPROG;
18632         spin_unlock_irq(&phba->hbalock);
18633
18634         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18635         if (rc == MBX_NOT_FINISHED)
18636                 error = -EIO;
18637         else {
18638                 /* Reset eligible FCF count for new scan */
18639                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18640                         phba->fcf.eligible_fcf_cnt = 0;
18641                 error = 0;
18642         }
18643 fail_fcf_scan:
18644         if (error) {
18645                 if (mboxq)
18646                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
18647                 /* FCF scan failed, clear FCF_TS_INPROG flag */
18648                 spin_lock_irq(&phba->hbalock);
18649                 phba->hba_flag &= ~FCF_TS_INPROG;
18650                 spin_unlock_irq(&phba->hbalock);
18651         }
18652         return error;
18653 }
18654
18655 /**
18656  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18657  * @phba: pointer to lpfc hba data structure.
18658  * @fcf_index: FCF table entry offset.
18659  *
18660  * This routine is invoked to read an FCF record indicated by @fcf_index
18661  * and to use it for FLOGI roundrobin FCF failover.
18662  *
18663  * Return 0 if the mailbox command is submitted successfully, none 0
18664  * otherwise.
18665  **/
18666 int
18667 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18668 {
18669         int rc = 0, error;
18670         LPFC_MBOXQ_t *mboxq;
18671
18672         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18673         if (!mboxq) {
18674                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18675                                 "2763 Failed to allocate mbox for "
18676                                 "READ_FCF cmd\n");
18677                 error = -ENOMEM;
18678                 goto fail_fcf_read;
18679         }
18680         /* Construct the read FCF record mailbox command */
18681         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18682         if (rc) {
18683                 error = -EINVAL;
18684                 goto fail_fcf_read;
18685         }
18686         /* Issue the mailbox command asynchronously */
18687         mboxq->vport = phba->pport;
18688         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18689         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18690         if (rc == MBX_NOT_FINISHED)
18691                 error = -EIO;
18692         else
18693                 error = 0;
18694
18695 fail_fcf_read:
18696         if (error && mboxq)
18697                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18698         return error;
18699 }
18700
18701 /**
18702  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18703  * @phba: pointer to lpfc hba data structure.
18704  * @fcf_index: FCF table entry offset.
18705  *
18706  * This routine is invoked to read an FCF record indicated by @fcf_index to
18707  * determine whether it's eligible for FLOGI roundrobin failover list.
18708  *
18709  * Return 0 if the mailbox command is submitted successfully, none 0
18710  * otherwise.
18711  **/
18712 int
18713 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18714 {
18715         int rc = 0, error;
18716         LPFC_MBOXQ_t *mboxq;
18717
18718         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18719         if (!mboxq) {
18720                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18721                                 "2758 Failed to allocate mbox for "
18722                                 "READ_FCF cmd\n");
18723                                 error = -ENOMEM;
18724                                 goto fail_fcf_read;
18725         }
18726         /* Construct the read FCF record mailbox command */
18727         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18728         if (rc) {
18729                 error = -EINVAL;
18730                 goto fail_fcf_read;
18731         }
18732         /* Issue the mailbox command asynchronously */
18733         mboxq->vport = phba->pport;
18734         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18735         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18736         if (rc == MBX_NOT_FINISHED)
18737                 error = -EIO;
18738         else
18739                 error = 0;
18740
18741 fail_fcf_read:
18742         if (error && mboxq)
18743                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18744         return error;
18745 }
18746
18747 /**
18748  * lpfc_check_next_fcf_pri_level
18749  * phba pointer to the lpfc_hba struct for this port.
18750  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18751  * routine when the rr_bmask is empty. The FCF indecies are put into the
18752  * rr_bmask based on their priority level. Starting from the highest priority
18753  * to the lowest. The most likely FCF candidate will be in the highest
18754  * priority group. When this routine is called it searches the fcf_pri list for
18755  * next lowest priority group and repopulates the rr_bmask with only those
18756  * fcf_indexes.
18757  * returns:
18758  * 1=success 0=failure
18759  **/
18760 static int
18761 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18762 {
18763         uint16_t next_fcf_pri;
18764         uint16_t last_index;
18765         struct lpfc_fcf_pri *fcf_pri;
18766         int rc;
18767         int ret = 0;
18768
18769         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18770                         LPFC_SLI4_FCF_TBL_INDX_MAX);
18771         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18772                         "3060 Last IDX %d\n", last_index);
18773
18774         /* Verify the priority list has 2 or more entries */
18775         spin_lock_irq(&phba->hbalock);
18776         if (list_empty(&phba->fcf.fcf_pri_list) ||
18777             list_is_singular(&phba->fcf.fcf_pri_list)) {
18778                 spin_unlock_irq(&phba->hbalock);
18779                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18780                         "3061 Last IDX %d\n", last_index);
18781                 return 0; /* Empty rr list */
18782         }
18783         spin_unlock_irq(&phba->hbalock);
18784
18785         next_fcf_pri = 0;
18786         /*
18787          * Clear the rr_bmask and set all of the bits that are at this
18788          * priority.
18789          */
18790         memset(phba->fcf.fcf_rr_bmask, 0,
18791                         sizeof(*phba->fcf.fcf_rr_bmask));
18792         spin_lock_irq(&phba->hbalock);
18793         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18794                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18795                         continue;
18796                 /*
18797                  * the 1st priority that has not FLOGI failed
18798                  * will be the highest.
18799                  */
18800                 if (!next_fcf_pri)
18801                         next_fcf_pri = fcf_pri->fcf_rec.priority;
18802                 spin_unlock_irq(&phba->hbalock);
18803                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18804                         rc = lpfc_sli4_fcf_rr_index_set(phba,
18805                                                 fcf_pri->fcf_rec.fcf_index);
18806                         if (rc)
18807                                 return 0;
18808                 }
18809                 spin_lock_irq(&phba->hbalock);
18810         }
18811         /*
18812          * if next_fcf_pri was not set above and the list is not empty then
18813          * we have failed flogis on all of them. So reset flogi failed
18814          * and start at the beginning.
18815          */
18816         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18817                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18818                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18819                         /*
18820                          * the 1st priority that has not FLOGI failed
18821                          * will be the highest.
18822                          */
18823                         if (!next_fcf_pri)
18824                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
18825                         spin_unlock_irq(&phba->hbalock);
18826                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18827                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
18828                                                 fcf_pri->fcf_rec.fcf_index);
18829                                 if (rc)
18830                                         return 0;
18831                         }
18832                         spin_lock_irq(&phba->hbalock);
18833                 }
18834         } else
18835                 ret = 1;
18836         spin_unlock_irq(&phba->hbalock);
18837
18838         return ret;
18839 }
18840 /**
18841  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18842  * @phba: pointer to lpfc hba data structure.
18843  *
18844  * This routine is to get the next eligible FCF record index in a round
18845  * robin fashion. If the next eligible FCF record index equals to the
18846  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18847  * shall be returned, otherwise, the next eligible FCF record's index
18848  * shall be returned.
18849  **/
18850 uint16_t
18851 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18852 {
18853         uint16_t next_fcf_index;
18854
18855 initial_priority:
18856         /* Search start from next bit of currently registered FCF index */
18857         next_fcf_index = phba->fcf.current_rec.fcf_indx;
18858
18859 next_priority:
18860         /* Determine the next fcf index to check */
18861         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18862         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18863                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
18864                                        next_fcf_index);
18865
18866         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18867         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18868                 /*
18869                  * If we have wrapped then we need to clear the bits that
18870                  * have been tested so that we can detect when we should
18871                  * change the priority level.
18872                  */
18873                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18874                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18875         }
18876
18877
18878         /* Check roundrobin failover list empty condition */
18879         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18880                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18881                 /*
18882                  * If next fcf index is not found check if there are lower
18883                  * Priority level fcf's in the fcf_priority list.
18884                  * Set up the rr_bmask with all of the avaiable fcf bits
18885                  * at that level and continue the selection process.
18886                  */
18887                 if (lpfc_check_next_fcf_pri_level(phba))
18888                         goto initial_priority;
18889                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18890                                 "2844 No roundrobin failover FCF available\n");
18891
18892                 return LPFC_FCOE_FCF_NEXT_NONE;
18893         }
18894
18895         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18896                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18897                 LPFC_FCF_FLOGI_FAILED) {
18898                 if (list_is_singular(&phba->fcf.fcf_pri_list))
18899                         return LPFC_FCOE_FCF_NEXT_NONE;
18900
18901                 goto next_priority;
18902         }
18903
18904         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18905                         "2845 Get next roundrobin failover FCF (x%x)\n",
18906                         next_fcf_index);
18907
18908         return next_fcf_index;
18909 }
18910
18911 /**
18912  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18913  * @phba: pointer to lpfc hba data structure.
18914  *
18915  * This routine sets the FCF record index in to the eligible bmask for
18916  * roundrobin failover search. It checks to make sure that the index
18917  * does not go beyond the range of the driver allocated bmask dimension
18918  * before setting the bit.
18919  *
18920  * Returns 0 if the index bit successfully set, otherwise, it returns
18921  * -EINVAL.
18922  **/
18923 int
18924 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18925 {
18926         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18927                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18928                                 "2610 FCF (x%x) reached driver's book "
18929                                 "keeping dimension:x%x\n",
18930                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18931                 return -EINVAL;
18932         }
18933         /* Set the eligible FCF record index bmask */
18934         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18935
18936         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18937                         "2790 Set FCF (x%x) to roundrobin FCF failover "
18938                         "bmask\n", fcf_index);
18939
18940         return 0;
18941 }
18942
18943 /**
18944  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18945  * @phba: pointer to lpfc hba data structure.
18946  *
18947  * This routine clears the FCF record index from the eligible bmask for
18948  * roundrobin failover search. It checks to make sure that the index
18949  * does not go beyond the range of the driver allocated bmask dimension
18950  * before clearing the bit.
18951  **/
18952 void
18953 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18954 {
18955         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18956         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18957                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18958                                 "2762 FCF (x%x) reached driver's book "
18959                                 "keeping dimension:x%x\n",
18960                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18961                 return;
18962         }
18963         /* Clear the eligible FCF record index bmask */
18964         spin_lock_irq(&phba->hbalock);
18965         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18966                                  list) {
18967                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18968                         list_del_init(&fcf_pri->list);
18969                         break;
18970                 }
18971         }
18972         spin_unlock_irq(&phba->hbalock);
18973         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18974
18975         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18976                         "2791 Clear FCF (x%x) from roundrobin failover "
18977                         "bmask\n", fcf_index);
18978 }
18979
18980 /**
18981  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18982  * @phba: pointer to lpfc hba data structure.
18983  *
18984  * This routine is the completion routine for the rediscover FCF table mailbox
18985  * command. If the mailbox command returned failure, it will try to stop the
18986  * FCF rediscover wait timer.
18987  **/
18988 static void
18989 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18990 {
18991         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18992         uint32_t shdr_status, shdr_add_status;
18993
18994         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18995
18996         shdr_status = bf_get(lpfc_mbox_hdr_status,
18997                              &redisc_fcf->header.cfg_shdr.response);
18998         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18999                              &redisc_fcf->header.cfg_shdr.response);
19000         if (shdr_status || shdr_add_status) {
19001                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19002                                 "2746 Requesting for FCF rediscovery failed "
19003                                 "status x%x add_status x%x\n",
19004                                 shdr_status, shdr_add_status);
19005                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19006                         spin_lock_irq(&phba->hbalock);
19007                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19008                         spin_unlock_irq(&phba->hbalock);
19009                         /*
19010                          * CVL event triggered FCF rediscover request failed,
19011                          * last resort to re-try current registered FCF entry.
19012                          */
19013                         lpfc_retry_pport_discovery(phba);
19014                 } else {
19015                         spin_lock_irq(&phba->hbalock);
19016                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19017                         spin_unlock_irq(&phba->hbalock);
19018                         /*
19019                          * DEAD FCF event triggered FCF rediscover request
19020                          * failed, last resort to fail over as a link down
19021                          * to FCF registration.
19022                          */
19023                         lpfc_sli4_fcf_dead_failthrough(phba);
19024                 }
19025         } else {
19026                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19027                                 "2775 Start FCF rediscover quiescent timer\n");
19028                 /*
19029                  * Start FCF rediscovery wait timer for pending FCF
19030                  * before rescan FCF record table.
19031                  */
19032                 lpfc_fcf_redisc_wait_start_timer(phba);
19033         }
19034
19035         mempool_free(mbox, phba->mbox_mem_pool);
19036 }
19037
19038 /**
19039  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19040  * @phba: pointer to lpfc hba data structure.
19041  *
19042  * This routine is invoked to request for rediscovery of the entire FCF table
19043  * by the port.
19044  **/
19045 int
19046 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19047 {
19048         LPFC_MBOXQ_t *mbox;
19049         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19050         int rc, length;
19051
19052         /* Cancel retry delay timers to all vports before FCF rediscover */
19053         lpfc_cancel_all_vport_retry_delay_timer(phba);
19054
19055         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19056         if (!mbox) {
19057                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19058                                 "2745 Failed to allocate mbox for "
19059                                 "requesting FCF rediscover.\n");
19060                 return -ENOMEM;
19061         }
19062
19063         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19064                   sizeof(struct lpfc_sli4_cfg_mhdr));
19065         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19066                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19067                          length, LPFC_SLI4_MBX_EMBED);
19068
19069         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19070         /* Set count to 0 for invalidating the entire FCF database */
19071         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19072
19073         /* Issue the mailbox command asynchronously */
19074         mbox->vport = phba->pport;
19075         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19076         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19077
19078         if (rc == MBX_NOT_FINISHED) {
19079                 mempool_free(mbox, phba->mbox_mem_pool);
19080                 return -EIO;
19081         }
19082         return 0;
19083 }
19084
19085 /**
19086  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19087  * @phba: pointer to lpfc hba data structure.
19088  *
19089  * This function is the failover routine as a last resort to the FCF DEAD
19090  * event when driver failed to perform fast FCF failover.
19091  **/
19092 void
19093 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19094 {
19095         uint32_t link_state;
19096
19097         /*
19098          * Last resort as FCF DEAD event failover will treat this as
19099          * a link down, but save the link state because we don't want
19100          * it to be changed to Link Down unless it is already down.
19101          */
19102         link_state = phba->link_state;
19103         lpfc_linkdown(phba);
19104         phba->link_state = link_state;
19105
19106         /* Unregister FCF if no devices connected to it */
19107         lpfc_unregister_unused_fcf(phba);
19108 }
19109
19110 /**
19111  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19112  * @phba: pointer to lpfc hba data structure.
19113  * @rgn23_data: pointer to configure region 23 data.
19114  *
19115  * This function gets SLI3 port configure region 23 data through memory dump
19116  * mailbox command. When it successfully retrieves data, the size of the data
19117  * will be returned, otherwise, 0 will be returned.
19118  **/
19119 static uint32_t
19120 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19121 {
19122         LPFC_MBOXQ_t *pmb = NULL;
19123         MAILBOX_t *mb;
19124         uint32_t offset = 0;
19125         int rc;
19126
19127         if (!rgn23_data)
19128                 return 0;
19129
19130         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19131         if (!pmb) {
19132                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19133                                 "2600 failed to allocate mailbox memory\n");
19134                 return 0;
19135         }
19136         mb = &pmb->u.mb;
19137
19138         do {
19139                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19140                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19141
19142                 if (rc != MBX_SUCCESS) {
19143                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19144                                         "2601 failed to read config "
19145                                         "region 23, rc 0x%x Status 0x%x\n",
19146                                         rc, mb->mbxStatus);
19147                         mb->un.varDmp.word_cnt = 0;
19148                 }
19149                 /*
19150                  * dump mem may return a zero when finished or we got a
19151                  * mailbox error, either way we are done.
19152                  */
19153                 if (mb->un.varDmp.word_cnt == 0)
19154                         break;
19155                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19156                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19157
19158                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19159                                        rgn23_data + offset,
19160                                        mb->un.varDmp.word_cnt);
19161                 offset += mb->un.varDmp.word_cnt;
19162         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19163
19164         mempool_free(pmb, phba->mbox_mem_pool);
19165         return offset;
19166 }
19167
19168 /**
19169  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19170  * @phba: pointer to lpfc hba data structure.
19171  * @rgn23_data: pointer to configure region 23 data.
19172  *
19173  * This function gets SLI4 port configure region 23 data through memory dump
19174  * mailbox command. When it successfully retrieves data, the size of the data
19175  * will be returned, otherwise, 0 will be returned.
19176  **/
19177 static uint32_t
19178 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19179 {
19180         LPFC_MBOXQ_t *mboxq = NULL;
19181         struct lpfc_dmabuf *mp = NULL;
19182         struct lpfc_mqe *mqe;
19183         uint32_t data_length = 0;
19184         int rc;
19185
19186         if (!rgn23_data)
19187                 return 0;
19188
19189         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19190         if (!mboxq) {
19191                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19192                                 "3105 failed to allocate mailbox memory\n");
19193                 return 0;
19194         }
19195
19196         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19197                 goto out;
19198         mqe = &mboxq->u.mqe;
19199         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19200         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19201         if (rc)
19202                 goto out;
19203         data_length = mqe->un.mb_words[5];
19204         if (data_length == 0)
19205                 goto out;
19206         if (data_length > DMP_RGN23_SIZE) {
19207                 data_length = 0;
19208                 goto out;
19209         }
19210         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19211 out:
19212         mempool_free(mboxq, phba->mbox_mem_pool);
19213         if (mp) {
19214                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19215                 kfree(mp);
19216         }
19217         return data_length;
19218 }
19219
19220 /**
19221  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19222  * @phba: pointer to lpfc hba data structure.
19223  *
19224  * This function read region 23 and parse TLV for port status to
19225  * decide if the user disaled the port. If the TLV indicates the
19226  * port is disabled, the hba_flag is set accordingly.
19227  **/
19228 void
19229 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19230 {
19231         uint8_t *rgn23_data = NULL;
19232         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19233         uint32_t offset = 0;
19234
19235         /* Get adapter Region 23 data */
19236         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19237         if (!rgn23_data)
19238                 goto out;
19239
19240         if (phba->sli_rev < LPFC_SLI_REV4)
19241                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19242         else {
19243                 if_type = bf_get(lpfc_sli_intf_if_type,
19244                                  &phba->sli4_hba.sli_intf);
19245                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19246                         goto out;
19247                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19248         }
19249
19250         if (!data_size)
19251                 goto out;
19252
19253         /* Check the region signature first */
19254         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19255                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19256                         "2619 Config region 23 has bad signature\n");
19257                         goto out;
19258         }
19259         offset += 4;
19260
19261         /* Check the data structure version */
19262         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19263                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19264                         "2620 Config region 23 has bad version\n");
19265                 goto out;
19266         }
19267         offset += 4;
19268
19269         /* Parse TLV entries in the region */
19270         while (offset < data_size) {
19271                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19272                         break;
19273                 /*
19274                  * If the TLV is not driver specific TLV or driver id is
19275                  * not linux driver id, skip the record.
19276                  */
19277                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19278                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19279                     (rgn23_data[offset + 3] != 0)) {
19280                         offset += rgn23_data[offset + 1] * 4 + 4;
19281                         continue;
19282                 }
19283
19284                 /* Driver found a driver specific TLV in the config region */
19285                 sub_tlv_len = rgn23_data[offset + 1] * 4;
19286                 offset += 4;
19287                 tlv_offset = 0;
19288
19289                 /*
19290                  * Search for configured port state sub-TLV.
19291                  */
19292                 while ((offset < data_size) &&
19293                         (tlv_offset < sub_tlv_len)) {
19294                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19295                                 offset += 4;
19296                                 tlv_offset += 4;
19297                                 break;
19298                         }
19299                         if (rgn23_data[offset] != PORT_STE_TYPE) {
19300                                 offset += rgn23_data[offset + 1] * 4 + 4;
19301                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19302                                 continue;
19303                         }
19304
19305                         /* This HBA contains PORT_STE configured */
19306                         if (!rgn23_data[offset + 2])
19307                                 phba->hba_flag |= LINK_DISABLED;
19308
19309                         goto out;
19310                 }
19311         }
19312
19313 out:
19314         kfree(rgn23_data);
19315         return;
19316 }
19317
19318 /**
19319  * lpfc_wr_object - write an object to the firmware
19320  * @phba: HBA structure that indicates port to create a queue on.
19321  * @dmabuf_list: list of dmabufs to write to the port.
19322  * @size: the total byte value of the objects to write to the port.
19323  * @offset: the current offset to be used to start the transfer.
19324  *
19325  * This routine will create a wr_object mailbox command to send to the port.
19326  * the mailbox command will be constructed using the dma buffers described in
19327  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19328  * BDEs that the imbedded mailbox can support. The @offset variable will be
19329  * used to indicate the starting offset of the transfer and will also return
19330  * the offset after the write object mailbox has completed. @size is used to
19331  * determine the end of the object and whether the eof bit should be set.
19332  *
19333  * Return 0 is successful and offset will contain the the new offset to use
19334  * for the next write.
19335  * Return negative value for error cases.
19336  **/
19337 int
19338 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19339                uint32_t size, uint32_t *offset)
19340 {
19341         struct lpfc_mbx_wr_object *wr_object;
19342         LPFC_MBOXQ_t *mbox;
19343         int rc = 0, i = 0;
19344         uint32_t shdr_status, shdr_add_status, shdr_change_status;
19345         uint32_t mbox_tmo;
19346         struct lpfc_dmabuf *dmabuf;
19347         uint32_t written = 0;
19348         bool check_change_status = false;
19349
19350         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19351         if (!mbox)
19352                 return -ENOMEM;
19353
19354         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19355                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
19356                         sizeof(struct lpfc_mbx_wr_object) -
19357                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19358
19359         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19360         wr_object->u.request.write_offset = *offset;
19361         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19362         wr_object->u.request.object_name[0] =
19363                 cpu_to_le32(wr_object->u.request.object_name[0]);
19364         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19365         list_for_each_entry(dmabuf, dmabuf_list, list) {
19366                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19367                         break;
19368                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19369                 wr_object->u.request.bde[i].addrHigh =
19370                         putPaddrHigh(dmabuf->phys);
19371                 if (written + SLI4_PAGE_SIZE >= size) {
19372                         wr_object->u.request.bde[i].tus.f.bdeSize =
19373                                 (size - written);
19374                         written += (size - written);
19375                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19376                         bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19377                         check_change_status = true;
19378                 } else {
19379                         wr_object->u.request.bde[i].tus.f.bdeSize =
19380                                 SLI4_PAGE_SIZE;
19381                         written += SLI4_PAGE_SIZE;
19382                 }
19383                 i++;
19384         }
19385         wr_object->u.request.bde_count = i;
19386         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19387         if (!phba->sli4_hba.intr_enable)
19388                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19389         else {
19390                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19391                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19392         }
19393         /* The IOCTL status is embedded in the mailbox subheader. */
19394         shdr_status = bf_get(lpfc_mbox_hdr_status,
19395                              &wr_object->header.cfg_shdr.response);
19396         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19397                                  &wr_object->header.cfg_shdr.response);
19398         if (check_change_status) {
19399                 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19400                                             &wr_object->u.response);
19401                 switch (shdr_change_status) {
19402                 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19403                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19404                                         "3198 Firmware write complete: System "
19405                                         "reboot required to instantiate\n");
19406                         break;
19407                 case (LPFC_CHANGE_STATUS_FW_RESET):
19408                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19409                                         "3199 Firmware write complete: Firmware"
19410                                         " reset required to instantiate\n");
19411                         break;
19412                 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19413                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19414                                         "3200 Firmware write complete: Port "
19415                                         "Migration or PCI Reset required to "
19416                                         "instantiate\n");
19417                         break;
19418                 case (LPFC_CHANGE_STATUS_PCI_RESET):
19419                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19420                                         "3201 Firmware write complete: PCI "
19421                                         "Reset required to instantiate\n");
19422                         break;
19423                 default:
19424                         break;
19425                 }
19426         }
19427         if (rc != MBX_TIMEOUT)
19428                 mempool_free(mbox, phba->mbox_mem_pool);
19429         if (shdr_status || shdr_add_status || rc) {
19430                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19431                                 "3025 Write Object mailbox failed with "
19432                                 "status x%x add_status x%x, mbx status x%x\n",
19433                                 shdr_status, shdr_add_status, rc);
19434                 rc = -ENXIO;
19435                 *offset = shdr_add_status;
19436         } else
19437                 *offset += wr_object->u.response.actual_write_length;
19438         return rc;
19439 }
19440
19441 /**
19442  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19443  * @vport: pointer to vport data structure.
19444  *
19445  * This function iterate through the mailboxq and clean up all REG_LOGIN
19446  * and REG_VPI mailbox commands associated with the vport. This function
19447  * is called when driver want to restart discovery of the vport due to
19448  * a Clear Virtual Link event.
19449  **/
19450 void
19451 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19452 {
19453         struct lpfc_hba *phba = vport->phba;
19454         LPFC_MBOXQ_t *mb, *nextmb;
19455         struct lpfc_dmabuf *mp;
19456         struct lpfc_nodelist *ndlp;
19457         struct lpfc_nodelist *act_mbx_ndlp = NULL;
19458         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
19459         LIST_HEAD(mbox_cmd_list);
19460         uint8_t restart_loop;
19461
19462         /* Clean up internally queued mailbox commands with the vport */
19463         spin_lock_irq(&phba->hbalock);
19464         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19465                 if (mb->vport != vport)
19466                         continue;
19467
19468                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19469                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
19470                         continue;
19471
19472                 list_del(&mb->list);
19473                 list_add_tail(&mb->list, &mbox_cmd_list);
19474         }
19475         /* Clean up active mailbox command with the vport */
19476         mb = phba->sli.mbox_active;
19477         if (mb && (mb->vport == vport)) {
19478                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19479                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
19480                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19481                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19482                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19483                         /* Put reference count for delayed processing */
19484                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19485                         /* Unregister the RPI when mailbox complete */
19486                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19487                 }
19488         }
19489         /* Cleanup any mailbox completions which are not yet processed */
19490         do {
19491                 restart_loop = 0;
19492                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19493                         /*
19494                          * If this mailox is already processed or it is
19495                          * for another vport ignore it.
19496                          */
19497                         if ((mb->vport != vport) ||
19498                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19499                                 continue;
19500
19501                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19502                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19503                                 continue;
19504
19505                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19506                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19507                                 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19508                                 /* Unregister the RPI when mailbox complete */
19509                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19510                                 restart_loop = 1;
19511                                 spin_unlock_irq(&phba->hbalock);
19512                                 spin_lock(shost->host_lock);
19513                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19514                                 spin_unlock(shost->host_lock);
19515                                 spin_lock_irq(&phba->hbalock);
19516                                 break;
19517                         }
19518                 }
19519         } while (restart_loop);
19520
19521         spin_unlock_irq(&phba->hbalock);
19522
19523         /* Release the cleaned-up mailbox commands */
19524         while (!list_empty(&mbox_cmd_list)) {
19525                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19526                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19527                         mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19528                         if (mp) {
19529                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19530                                 kfree(mp);
19531                         }
19532                         mb->ctx_buf = NULL;
19533                         ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19534                         mb->ctx_ndlp = NULL;
19535                         if (ndlp) {
19536                                 spin_lock(shost->host_lock);
19537                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19538                                 spin_unlock(shost->host_lock);
19539                                 lpfc_nlp_put(ndlp);
19540                         }
19541                 }
19542                 mempool_free(mb, phba->mbox_mem_pool);
19543         }
19544
19545         /* Release the ndlp with the cleaned-up active mailbox command */
19546         if (act_mbx_ndlp) {
19547                 spin_lock(shost->host_lock);
19548                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19549                 spin_unlock(shost->host_lock);
19550                 lpfc_nlp_put(act_mbx_ndlp);
19551         }
19552 }
19553
19554 /**
19555  * lpfc_drain_txq - Drain the txq
19556  * @phba: Pointer to HBA context object.
19557  *
19558  * This function attempt to submit IOCBs on the txq
19559  * to the adapter.  For SLI4 adapters, the txq contains
19560  * ELS IOCBs that have been deferred because the there
19561  * are no SGLs.  This congestion can occur with large
19562  * vport counts during node discovery.
19563  **/
19564
19565 uint32_t
19566 lpfc_drain_txq(struct lpfc_hba *phba)
19567 {
19568         LIST_HEAD(completions);
19569         struct lpfc_sli_ring *pring;
19570         struct lpfc_iocbq *piocbq = NULL;
19571         unsigned long iflags = 0;
19572         char *fail_msg = NULL;
19573         struct lpfc_sglq *sglq;
19574         union lpfc_wqe128 wqe;
19575         uint32_t txq_cnt = 0;
19576         struct lpfc_queue *wq;
19577
19578         if (phba->link_flag & LS_MDS_LOOPBACK) {
19579                 /* MDS WQE are posted only to first WQ*/
19580                 wq = phba->sli4_hba.hdwq[0].fcp_wq;
19581                 if (unlikely(!wq))
19582                         return 0;
19583                 pring = wq->pring;
19584         } else {
19585                 wq = phba->sli4_hba.els_wq;
19586                 if (unlikely(!wq))
19587                         return 0;
19588                 pring = lpfc_phba_elsring(phba);
19589         }
19590
19591         if (unlikely(!pring) || list_empty(&pring->txq))
19592                 return 0;
19593
19594         spin_lock_irqsave(&pring->ring_lock, iflags);
19595         list_for_each_entry(piocbq, &pring->txq, list) {
19596                 txq_cnt++;
19597         }
19598
19599         if (txq_cnt > pring->txq_max)
19600                 pring->txq_max = txq_cnt;
19601
19602         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19603
19604         while (!list_empty(&pring->txq)) {
19605                 spin_lock_irqsave(&pring->ring_lock, iflags);
19606
19607                 piocbq = lpfc_sli_ringtx_get(phba, pring);
19608                 if (!piocbq) {
19609                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19610                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19611                                 "2823 txq empty and txq_cnt is %d\n ",
19612                                 txq_cnt);
19613                         break;
19614                 }
19615                 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19616                 if (!sglq) {
19617                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
19618                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19619                         break;
19620                 }
19621                 txq_cnt--;
19622
19623                 /* The xri and iocb resources secured,
19624                  * attempt to issue request
19625                  */
19626                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19627                 piocbq->sli4_xritag = sglq->sli4_xritag;
19628                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19629                         fail_msg = "to convert bpl to sgl";
19630                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19631                         fail_msg = "to convert iocb to wqe";
19632                 else if (lpfc_sli4_wq_put(wq, &wqe))
19633                         fail_msg = " - Wq is full";
19634                 else
19635                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19636
19637                 if (fail_msg) {
19638                         /* Failed means we can't issue and need to cancel */
19639                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19640                                         "2822 IOCB failed %s iotag 0x%x "
19641                                         "xri 0x%x\n",
19642                                         fail_msg,
19643                                         piocbq->iotag, piocbq->sli4_xritag);
19644                         list_add_tail(&piocbq->list, &completions);
19645                 }
19646                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19647         }
19648
19649         /* Cancel all the IOCBs that cannot be issued */
19650         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19651                                 IOERR_SLI_ABORTED);
19652
19653         return txq_cnt;
19654 }
19655
19656 /**
19657  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19658  * @phba: Pointer to HBA context object.
19659  * @pwqe: Pointer to command WQE.
19660  * @sglq: Pointer to the scatter gather queue object.
19661  *
19662  * This routine converts the bpl or bde that is in the WQE
19663  * to a sgl list for the sli4 hardware. The physical address
19664  * of the bpl/bde is converted back to a virtual address.
19665  * If the WQE contains a BPL then the list of BDE's is
19666  * converted to sli4_sge's. If the WQE contains a single
19667  * BDE then it is converted to a single sli_sge.
19668  * The WQE is still in cpu endianness so the contents of
19669  * the bpl can be used without byte swapping.
19670  *
19671  * Returns valid XRI = Success, NO_XRI = Failure.
19672  */
19673 static uint16_t
19674 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19675                  struct lpfc_sglq *sglq)
19676 {
19677         uint16_t xritag = NO_XRI;
19678         struct ulp_bde64 *bpl = NULL;
19679         struct ulp_bde64 bde;
19680         struct sli4_sge *sgl  = NULL;
19681         struct lpfc_dmabuf *dmabuf;
19682         union lpfc_wqe128 *wqe;
19683         int numBdes = 0;
19684         int i = 0;
19685         uint32_t offset = 0; /* accumulated offset in the sg request list */
19686         int inbound = 0; /* number of sg reply entries inbound from firmware */
19687         uint32_t cmd;
19688
19689         if (!pwqeq || !sglq)
19690                 return xritag;
19691
19692         sgl  = (struct sli4_sge *)sglq->sgl;
19693         wqe = &pwqeq->wqe;
19694         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19695
19696         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19697         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19698                 return sglq->sli4_xritag;
19699         numBdes = pwqeq->rsvd2;
19700         if (numBdes) {
19701                 /* The addrHigh and addrLow fields within the WQE
19702                  * have not been byteswapped yet so there is no
19703                  * need to swap them back.
19704                  */
19705                 if (pwqeq->context3)
19706                         dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19707                 else
19708                         return xritag;
19709
19710                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
19711                 if (!bpl)
19712                         return xritag;
19713
19714                 for (i = 0; i < numBdes; i++) {
19715                         /* Should already be byte swapped. */
19716                         sgl->addr_hi = bpl->addrHigh;
19717                         sgl->addr_lo = bpl->addrLow;
19718
19719                         sgl->word2 = le32_to_cpu(sgl->word2);
19720                         if ((i+1) == numBdes)
19721                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19722                         else
19723                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
19724                         /* swap the size field back to the cpu so we
19725                          * can assign it to the sgl.
19726                          */
19727                         bde.tus.w = le32_to_cpu(bpl->tus.w);
19728                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19729                         /* The offsets in the sgl need to be accumulated
19730                          * separately for the request and reply lists.
19731                          * The request is always first, the reply follows.
19732                          */
19733                         switch (cmd) {
19734                         case CMD_GEN_REQUEST64_WQE:
19735                                 /* add up the reply sg entries */
19736                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19737                                         inbound++;
19738                                 /* first inbound? reset the offset */
19739                                 if (inbound == 1)
19740                                         offset = 0;
19741                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19742                                 bf_set(lpfc_sli4_sge_type, sgl,
19743                                         LPFC_SGE_TYPE_DATA);
19744                                 offset += bde.tus.f.bdeSize;
19745                                 break;
19746                         case CMD_FCP_TRSP64_WQE:
19747                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19748                                 bf_set(lpfc_sli4_sge_type, sgl,
19749                                         LPFC_SGE_TYPE_DATA);
19750                                 break;
19751                         case CMD_FCP_TSEND64_WQE:
19752                         case CMD_FCP_TRECEIVE64_WQE:
19753                                 bf_set(lpfc_sli4_sge_type, sgl,
19754                                         bpl->tus.f.bdeFlags);
19755                                 if (i < 3)
19756                                         offset = 0;
19757                                 else
19758                                         offset += bde.tus.f.bdeSize;
19759                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19760                                 break;
19761                         }
19762                         sgl->word2 = cpu_to_le32(sgl->word2);
19763                         bpl++;
19764                         sgl++;
19765                 }
19766         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19767                 /* The addrHigh and addrLow fields of the BDE have not
19768                  * been byteswapped yet so they need to be swapped
19769                  * before putting them in the sgl.
19770                  */
19771                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19772                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19773                 sgl->word2 = le32_to_cpu(sgl->word2);
19774                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19775                 sgl->word2 = cpu_to_le32(sgl->word2);
19776                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19777         }
19778         return sglq->sli4_xritag;
19779 }
19780
19781 /**
19782  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19783  * @phba: Pointer to HBA context object.
19784  * @ring_number: Base sli ring number
19785  * @pwqe: Pointer to command WQE.
19786  **/
19787 int
19788 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19789                     struct lpfc_iocbq *pwqe)
19790 {
19791         union lpfc_wqe128 *wqe = &pwqe->wqe;
19792         struct lpfc_nvmet_rcv_ctx *ctxp;
19793         struct lpfc_queue *wq;
19794         struct lpfc_sglq *sglq;
19795         struct lpfc_sli_ring *pring;
19796         unsigned long iflags;
19797         uint32_t ret = 0;
19798
19799         /* NVME_LS and NVME_LS ABTS requests. */
19800         if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19801                 pring =  phba->sli4_hba.nvmels_wq->pring;
19802                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19803                                           qp, wq_access);
19804                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19805                 if (!sglq) {
19806                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19807                         return WQE_BUSY;
19808                 }
19809                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19810                 pwqe->sli4_xritag = sglq->sli4_xritag;
19811                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19812                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19813                         return WQE_ERROR;
19814                 }
19815                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19816                        pwqe->sli4_xritag);
19817                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19818                 if (ret) {
19819                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19820                         return ret;
19821                 }
19822
19823                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19824                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19825                 return 0;
19826         }
19827
19828         /* NVME_FCREQ and NVME_ABTS requests */
19829         if (pwqe->iocb_flag & LPFC_IO_NVME) {
19830                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19831                 wq = qp->nvme_wq;
19832                 pring = wq->pring;
19833
19834                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19835
19836                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19837                                           qp, wq_access);
19838                 ret = lpfc_sli4_wq_put(wq, wqe);
19839                 if (ret) {
19840                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19841                         return ret;
19842                 }
19843                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19844                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19845                 return 0;
19846         }
19847
19848         /* NVMET requests */
19849         if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19850                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19851                 wq = qp->nvme_wq;
19852                 pring = wq->pring;
19853
19854                 ctxp = pwqe->context2;
19855                 sglq = ctxp->ctxbuf->sglq;
19856                 if (pwqe->sli4_xritag ==  NO_XRI) {
19857                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
19858                         pwqe->sli4_xritag = sglq->sli4_xritag;
19859                 }
19860                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19861                        pwqe->sli4_xritag);
19862                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19863
19864                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19865                                           qp, wq_access);
19866                 ret = lpfc_sli4_wq_put(wq, wqe);
19867                 if (ret) {
19868                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19869                         return ret;
19870                 }
19871                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19872                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19873                 return 0;
19874         }
19875         return WQE_ERROR;
19876 }
19877
19878 #ifdef LPFC_MXP_STAT
19879 /**
19880  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19881  * @phba: pointer to lpfc hba data structure.
19882  * @hwqid: belong to which HWQ.
19883  *
19884  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19885  * 15 seconds after a test case is running.
19886  *
19887  * The user should call lpfc_debugfs_multixripools_write before running a test
19888  * case to clear stat_snapshot_taken. Then the user starts a test case. During
19889  * test case is running, stat_snapshot_taken is incremented by 1 every time when
19890  * this routine is called from heartbeat timer. When stat_snapshot_taken is
19891  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19892  **/
19893 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19894 {
19895         struct lpfc_sli4_hdw_queue *qp;
19896         struct lpfc_multixri_pool *multixri_pool;
19897         struct lpfc_pvt_pool *pvt_pool;
19898         struct lpfc_pbl_pool *pbl_pool;
19899         u32 txcmplq_cnt;
19900
19901         qp = &phba->sli4_hba.hdwq[hwqid];
19902         multixri_pool = qp->p_multixri_pool;
19903         if (!multixri_pool)
19904                 return;
19905
19906         if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19907                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19908                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19909                 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19910                 if (qp->nvme_wq)
19911                         txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19912
19913                 multixri_pool->stat_pbl_count = pbl_pool->count;
19914                 multixri_pool->stat_pvt_count = pvt_pool->count;
19915                 multixri_pool->stat_busy_count = txcmplq_cnt;
19916         }
19917
19918         multixri_pool->stat_snapshot_taken++;
19919 }
19920 #endif
19921
19922 /**
19923  * lpfc_adjust_pvt_pool_count - Adjust private pool count
19924  * @phba: pointer to lpfc hba data structure.
19925  * @hwqid: belong to which HWQ.
19926  *
19927  * This routine moves some XRIs from private to public pool when private pool
19928  * is not busy.
19929  **/
19930 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19931 {
19932         struct lpfc_multixri_pool *multixri_pool;
19933         u32 io_req_count;
19934         u32 prev_io_req_count;
19935
19936         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19937         if (!multixri_pool)
19938                 return;
19939         io_req_count = multixri_pool->io_req_count;
19940         prev_io_req_count = multixri_pool->prev_io_req_count;
19941
19942         if (prev_io_req_count != io_req_count) {
19943                 /* Private pool is busy */
19944                 multixri_pool->prev_io_req_count = io_req_count;
19945         } else {
19946                 /* Private pool is not busy.
19947                  * Move XRIs from private to public pool.
19948                  */
19949                 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19950         }
19951 }
19952
19953 /**
19954  * lpfc_adjust_high_watermark - Adjust high watermark
19955  * @phba: pointer to lpfc hba data structure.
19956  * @hwqid: belong to which HWQ.
19957  *
19958  * This routine sets high watermark as number of outstanding XRIs,
19959  * but make sure the new value is between xri_limit/2 and xri_limit.
19960  **/
19961 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19962 {
19963         u32 new_watermark;
19964         u32 watermark_max;
19965         u32 watermark_min;
19966         u32 xri_limit;
19967         u32 txcmplq_cnt;
19968         u32 abts_io_bufs;
19969         struct lpfc_multixri_pool *multixri_pool;
19970         struct lpfc_sli4_hdw_queue *qp;
19971
19972         qp = &phba->sli4_hba.hdwq[hwqid];
19973         multixri_pool = qp->p_multixri_pool;
19974         if (!multixri_pool)
19975                 return;
19976         xri_limit = multixri_pool->xri_limit;
19977
19978         watermark_max = xri_limit;
19979         watermark_min = xri_limit / 2;
19980
19981         txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19982         abts_io_bufs = qp->abts_scsi_io_bufs;
19983         if (qp->nvme_wq) {
19984                 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19985                 abts_io_bufs += qp->abts_nvme_io_bufs;
19986         }
19987
19988         new_watermark = txcmplq_cnt + abts_io_bufs;
19989         new_watermark = min(watermark_max, new_watermark);
19990         new_watermark = max(watermark_min, new_watermark);
19991         multixri_pool->pvt_pool.high_watermark = new_watermark;
19992
19993 #ifdef LPFC_MXP_STAT
19994         multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19995                                           new_watermark);
19996 #endif
19997 }
19998
19999 /**
20000  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20001  * @phba: pointer to lpfc hba data structure.
20002  * @hwqid: belong to which HWQ.
20003  *
20004  * This routine is called from hearbeat timer when pvt_pool is idle.
20005  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20006  * The first step moves (all - low_watermark) amount of XRIs.
20007  * The second step moves the rest of XRIs.
20008  **/
20009 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20010 {
20011         struct lpfc_pbl_pool *pbl_pool;
20012         struct lpfc_pvt_pool *pvt_pool;
20013         struct lpfc_sli4_hdw_queue *qp;
20014         struct lpfc_io_buf *lpfc_ncmd;
20015         struct lpfc_io_buf *lpfc_ncmd_next;
20016         unsigned long iflag;
20017         struct list_head tmp_list;
20018         u32 tmp_count;
20019
20020         qp = &phba->sli4_hba.hdwq[hwqid];
20021         pbl_pool = &qp->p_multixri_pool->pbl_pool;
20022         pvt_pool = &qp->p_multixri_pool->pvt_pool;
20023         tmp_count = 0;
20024
20025         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20026         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20027
20028         if (pvt_pool->count > pvt_pool->low_watermark) {
20029                 /* Step 1: move (all - low_watermark) from pvt_pool
20030                  * to pbl_pool
20031                  */
20032
20033                 /* Move low watermark of bufs from pvt_pool to tmp_list */
20034                 INIT_LIST_HEAD(&tmp_list);
20035                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20036                                          &pvt_pool->list, list) {
20037                         list_move_tail(&lpfc_ncmd->list, &tmp_list);
20038                         tmp_count++;
20039                         if (tmp_count >= pvt_pool->low_watermark)
20040                                 break;
20041                 }
20042
20043                 /* Move all bufs from pvt_pool to pbl_pool */
20044                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20045
20046                 /* Move all bufs from tmp_list to pvt_pool */
20047                 list_splice(&tmp_list, &pvt_pool->list);
20048
20049                 pbl_pool->count += (pvt_pool->count - tmp_count);
20050                 pvt_pool->count = tmp_count;
20051         } else {
20052                 /* Step 2: move the rest from pvt_pool to pbl_pool */
20053                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20054                 pbl_pool->count += pvt_pool->count;
20055                 pvt_pool->count = 0;
20056         }
20057
20058         spin_unlock(&pvt_pool->lock);
20059         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20060 }
20061
20062 /**
20063  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20064  * @phba: pointer to lpfc hba data structure
20065  * @pbl_pool: specified public free XRI pool
20066  * @pvt_pool: specified private free XRI pool
20067  * @count: number of XRIs to move
20068  *
20069  * This routine tries to move some free common bufs from the specified pbl_pool
20070  * to the specified pvt_pool. It might move less than count XRIs if there's not
20071  * enough in public pool.
20072  *
20073  * Return:
20074  *   true - if XRIs are successfully moved from the specified pbl_pool to the
20075  *          specified pvt_pool
20076  *   false - if the specified pbl_pool is empty or locked by someone else
20077  **/
20078 static bool
20079 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20080                           struct lpfc_pbl_pool *pbl_pool,
20081                           struct lpfc_pvt_pool *pvt_pool, u32 count)
20082 {
20083         struct lpfc_io_buf *lpfc_ncmd;
20084         struct lpfc_io_buf *lpfc_ncmd_next;
20085         unsigned long iflag;
20086         int ret;
20087
20088         ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20089         if (ret) {
20090                 if (pbl_pool->count) {
20091                         /* Move a batch of XRIs from public to private pool */
20092                         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20093                         list_for_each_entry_safe(lpfc_ncmd,
20094                                                  lpfc_ncmd_next,
20095                                                  &pbl_pool->list,
20096                                                  list) {
20097                                 list_move_tail(&lpfc_ncmd->list,
20098                                                &pvt_pool->list);
20099                                 pvt_pool->count++;
20100                                 pbl_pool->count--;
20101                                 count--;
20102                                 if (count == 0)
20103                                         break;
20104                         }
20105
20106                         spin_unlock(&pvt_pool->lock);
20107                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20108                         return true;
20109                 }
20110                 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20111         }
20112
20113         return false;
20114 }
20115
20116 /**
20117  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20118  * @phba: pointer to lpfc hba data structure.
20119  * @hwqid: belong to which HWQ.
20120  * @count: number of XRIs to move
20121  *
20122  * This routine tries to find some free common bufs in one of public pools with
20123  * Round Robin method. The search always starts from local hwqid, then the next
20124  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20125  * a batch of free common bufs are moved to private pool on hwqid.
20126  * It might move less than count XRIs if there's not enough in public pool.
20127  **/
20128 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20129 {
20130         struct lpfc_multixri_pool *multixri_pool;
20131         struct lpfc_multixri_pool *next_multixri_pool;
20132         struct lpfc_pvt_pool *pvt_pool;
20133         struct lpfc_pbl_pool *pbl_pool;
20134         struct lpfc_sli4_hdw_queue *qp;
20135         u32 next_hwqid;
20136         u32 hwq_count;
20137         int ret;
20138
20139         qp = &phba->sli4_hba.hdwq[hwqid];
20140         multixri_pool = qp->p_multixri_pool;
20141         pvt_pool = &multixri_pool->pvt_pool;
20142         pbl_pool = &multixri_pool->pbl_pool;
20143
20144         /* Check if local pbl_pool is available */
20145         ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20146         if (ret) {
20147 #ifdef LPFC_MXP_STAT
20148                 multixri_pool->local_pbl_hit_count++;
20149 #endif
20150                 return;
20151         }
20152
20153         hwq_count = phba->cfg_hdw_queue;
20154
20155         /* Get the next hwqid which was found last time */
20156         next_hwqid = multixri_pool->rrb_next_hwqid;
20157
20158         do {
20159                 /* Go to next hwq */
20160                 next_hwqid = (next_hwqid + 1) % hwq_count;
20161
20162                 next_multixri_pool =
20163                         phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20164                 pbl_pool = &next_multixri_pool->pbl_pool;
20165
20166                 /* Check if the public free xri pool is available */
20167                 ret = _lpfc_move_xri_pbl_to_pvt(
20168                         phba, qp, pbl_pool, pvt_pool, count);
20169
20170                 /* Exit while-loop if success or all hwqid are checked */
20171         } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20172
20173         /* Starting point for the next time */
20174         multixri_pool->rrb_next_hwqid = next_hwqid;
20175
20176         if (!ret) {
20177                 /* stats: all public pools are empty*/
20178                 multixri_pool->pbl_empty_count++;
20179         }
20180
20181 #ifdef LPFC_MXP_STAT
20182         if (ret) {
20183                 if (next_hwqid == hwqid)
20184                         multixri_pool->local_pbl_hit_count++;
20185                 else
20186                         multixri_pool->other_pbl_hit_count++;
20187         }
20188 #endif
20189 }
20190
20191 /**
20192  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20193  * @phba: pointer to lpfc hba data structure.
20194  * @qp: belong to which HWQ.
20195  *
20196  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20197  * low watermark.
20198  **/
20199 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20200 {
20201         struct lpfc_multixri_pool *multixri_pool;
20202         struct lpfc_pvt_pool *pvt_pool;
20203
20204         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20205         pvt_pool = &multixri_pool->pvt_pool;
20206
20207         if (pvt_pool->count < pvt_pool->low_watermark)
20208                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20209 }
20210
20211 /**
20212  * lpfc_release_io_buf - Return one IO buf back to free pool
20213  * @phba: pointer to lpfc hba data structure.
20214  * @lpfc_ncmd: IO buf to be returned.
20215  * @qp: belong to which HWQ.
20216  *
20217  * This routine returns one IO buf back to free pool. If this is an urgent IO,
20218  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20219  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20220  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
20221  * lpfc_io_buf_list_put.
20222  **/
20223 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20224                          struct lpfc_sli4_hdw_queue *qp)
20225 {
20226         unsigned long iflag;
20227         struct lpfc_pbl_pool *pbl_pool;
20228         struct lpfc_pvt_pool *pvt_pool;
20229         struct lpfc_epd_pool *epd_pool;
20230         u32 txcmplq_cnt;
20231         u32 xri_owned;
20232         u32 xri_limit;
20233         u32 abts_io_bufs;
20234
20235         /* MUST zero fields if buffer is reused by another protocol */
20236         lpfc_ncmd->nvmeCmd = NULL;
20237         lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20238         lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20239
20240         if (phba->cfg_xri_rebalancing) {
20241                 if (lpfc_ncmd->expedite) {
20242                         /* Return to expedite pool */
20243                         epd_pool = &phba->epd_pool;
20244                         spin_lock_irqsave(&epd_pool->lock, iflag);
20245                         list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20246                         epd_pool->count++;
20247                         spin_unlock_irqrestore(&epd_pool->lock, iflag);
20248                         return;
20249                 }
20250
20251                 /* Avoid invalid access if an IO sneaks in and is being rejected
20252                  * just _after_ xri pools are destroyed in lpfc_offline.
20253                  * Nothing much can be done at this point.
20254                  */
20255                 if (!qp->p_multixri_pool)
20256                         return;
20257
20258                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20259                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20260
20261                 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20262                 abts_io_bufs = qp->abts_scsi_io_bufs;
20263                 if (qp->nvme_wq) {
20264                         txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20265                         abts_io_bufs += qp->abts_nvme_io_bufs;
20266                 }
20267
20268                 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20269                 xri_limit = qp->p_multixri_pool->xri_limit;
20270
20271 #ifdef LPFC_MXP_STAT
20272                 if (xri_owned <= xri_limit)
20273                         qp->p_multixri_pool->below_limit_count++;
20274                 else
20275                         qp->p_multixri_pool->above_limit_count++;
20276 #endif
20277
20278                 /* XRI goes to either public or private free xri pool
20279                  *     based on watermark and xri_limit
20280                  */
20281                 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20282                     (xri_owned < xri_limit &&
20283                      pvt_pool->count < pvt_pool->high_watermark)) {
20284                         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20285                                                   qp, free_pvt_pool);
20286                         list_add_tail(&lpfc_ncmd->list,
20287                                       &pvt_pool->list);
20288                         pvt_pool->count++;
20289                         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20290                 } else {
20291                         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20292                                                   qp, free_pub_pool);
20293                         list_add_tail(&lpfc_ncmd->list,
20294                                       &pbl_pool->list);
20295                         pbl_pool->count++;
20296                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20297                 }
20298         } else {
20299                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20300                                           qp, free_xri);
20301                 list_add_tail(&lpfc_ncmd->list,
20302                               &qp->lpfc_io_buf_list_put);
20303                 qp->put_io_bufs++;
20304                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20305                                        iflag);
20306         }
20307 }
20308
20309 /**
20310  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20311  * @phba: pointer to lpfc hba data structure.
20312  * @pvt_pool: pointer to private pool data structure.
20313  * @ndlp: pointer to lpfc nodelist data structure.
20314  *
20315  * This routine tries to get one free IO buf from private pool.
20316  *
20317  * Return:
20318  *   pointer to one free IO buf - if private pool is not empty
20319  *   NULL - if private pool is empty
20320  **/
20321 static struct lpfc_io_buf *
20322 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20323                                   struct lpfc_sli4_hdw_queue *qp,
20324                                   struct lpfc_pvt_pool *pvt_pool,
20325                                   struct lpfc_nodelist *ndlp)
20326 {
20327         struct lpfc_io_buf *lpfc_ncmd;
20328         struct lpfc_io_buf *lpfc_ncmd_next;
20329         unsigned long iflag;
20330
20331         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20332         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20333                                  &pvt_pool->list, list) {
20334                 if (lpfc_test_rrq_active(
20335                         phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20336                         continue;
20337                 list_del(&lpfc_ncmd->list);
20338                 pvt_pool->count--;
20339                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20340                 return lpfc_ncmd;
20341         }
20342         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20343
20344         return NULL;
20345 }
20346
20347 /**
20348  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20349  * @phba: pointer to lpfc hba data structure.
20350  *
20351  * This routine tries to get one free IO buf from expedite pool.
20352  *
20353  * Return:
20354  *   pointer to one free IO buf - if expedite pool is not empty
20355  *   NULL - if expedite pool is empty
20356  **/
20357 static struct lpfc_io_buf *
20358 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20359 {
20360         struct lpfc_io_buf *lpfc_ncmd;
20361         struct lpfc_io_buf *lpfc_ncmd_next;
20362         unsigned long iflag;
20363         struct lpfc_epd_pool *epd_pool;
20364
20365         epd_pool = &phba->epd_pool;
20366         lpfc_ncmd = NULL;
20367
20368         spin_lock_irqsave(&epd_pool->lock, iflag);
20369         if (epd_pool->count > 0) {
20370                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20371                                          &epd_pool->list, list) {
20372                         list_del(&lpfc_ncmd->list);
20373                         epd_pool->count--;
20374                         break;
20375                 }
20376         }
20377         spin_unlock_irqrestore(&epd_pool->lock, iflag);
20378
20379         return lpfc_ncmd;
20380 }
20381
20382 /**
20383  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20384  * @phba: pointer to lpfc hba data structure.
20385  * @ndlp: pointer to lpfc nodelist data structure.
20386  * @hwqid: belong to which HWQ
20387  * @expedite: 1 means this request is urgent.
20388  *
20389  * This routine will do the following actions and then return a pointer to
20390  * one free IO buf.
20391  *
20392  * 1. If private free xri count is empty, move some XRIs from public to
20393  *    private pool.
20394  * 2. Get one XRI from private free xri pool.
20395  * 3. If we fail to get one from pvt_pool and this is an expedite request,
20396  *    get one free xri from expedite pool.
20397  *
20398  * Note: ndlp is only used on SCSI side for RRQ testing.
20399  *       The caller should pass NULL for ndlp on NVME side.
20400  *
20401  * Return:
20402  *   pointer to one free IO buf - if private pool is not empty
20403  *   NULL - if private pool is empty
20404  **/
20405 static struct lpfc_io_buf *
20406 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20407                                     struct lpfc_nodelist *ndlp,
20408                                     int hwqid, int expedite)
20409 {
20410         struct lpfc_sli4_hdw_queue *qp;
20411         struct lpfc_multixri_pool *multixri_pool;
20412         struct lpfc_pvt_pool *pvt_pool;
20413         struct lpfc_io_buf *lpfc_ncmd;
20414
20415         qp = &phba->sli4_hba.hdwq[hwqid];
20416         lpfc_ncmd = NULL;
20417         multixri_pool = qp->p_multixri_pool;
20418         pvt_pool = &multixri_pool->pvt_pool;
20419         multixri_pool->io_req_count++;
20420
20421         /* If pvt_pool is empty, move some XRIs from public to private pool */
20422         if (pvt_pool->count == 0)
20423                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20424
20425         /* Get one XRI from private free xri pool */
20426         lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20427
20428         if (lpfc_ncmd) {
20429                 lpfc_ncmd->hdwq = qp;
20430                 lpfc_ncmd->hdwq_no = hwqid;
20431         } else if (expedite) {
20432                 /* If we fail to get one from pvt_pool and this is an expedite
20433                  * request, get one free xri from expedite pool.
20434                  */
20435                 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20436         }
20437
20438         return lpfc_ncmd;
20439 }
20440
20441 static inline struct lpfc_io_buf *
20442 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20443 {
20444         struct lpfc_sli4_hdw_queue *qp;
20445         struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20446
20447         qp = &phba->sli4_hba.hdwq[idx];
20448         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20449                                  &qp->lpfc_io_buf_list_get, list) {
20450                 if (lpfc_test_rrq_active(phba, ndlp,
20451                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
20452                         continue;
20453
20454                 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20455                         continue;
20456
20457                 list_del_init(&lpfc_cmd->list);
20458                 qp->get_io_bufs--;
20459                 lpfc_cmd->hdwq = qp;
20460                 lpfc_cmd->hdwq_no = idx;
20461                 return lpfc_cmd;
20462         }
20463         return NULL;
20464 }
20465
20466 /**
20467  * lpfc_get_io_buf - Get one IO buffer from free pool
20468  * @phba: The HBA for which this call is being executed.
20469  * @ndlp: pointer to lpfc nodelist data structure.
20470  * @hwqid: belong to which HWQ
20471  * @expedite: 1 means this request is urgent.
20472  *
20473  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20474  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20475  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20476  *
20477  * Note: ndlp is only used on SCSI side for RRQ testing.
20478  *       The caller should pass NULL for ndlp on NVME side.
20479  *
20480  * Return codes:
20481  *   NULL - Error
20482  *   Pointer to lpfc_io_buf - Success
20483  **/
20484 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20485                                     struct lpfc_nodelist *ndlp,
20486                                     u32 hwqid, int expedite)
20487 {
20488         struct lpfc_sli4_hdw_queue *qp;
20489         unsigned long iflag;
20490         struct lpfc_io_buf *lpfc_cmd;
20491
20492         qp = &phba->sli4_hba.hdwq[hwqid];
20493         lpfc_cmd = NULL;
20494
20495         if (phba->cfg_xri_rebalancing)
20496                 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20497                         phba, ndlp, hwqid, expedite);
20498         else {
20499                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20500                                           qp, alloc_xri_get);
20501                 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20502                         lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20503                 if (!lpfc_cmd) {
20504                         lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20505                                           qp, alloc_xri_put);
20506                         list_splice(&qp->lpfc_io_buf_list_put,
20507                                     &qp->lpfc_io_buf_list_get);
20508                         qp->get_io_bufs += qp->put_io_bufs;
20509                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20510                         qp->put_io_bufs = 0;
20511                         spin_unlock(&qp->io_buf_list_put_lock);
20512                         if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20513                             expedite)
20514                                 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20515                 }
20516                 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20517         }
20518
20519         return lpfc_cmd;
20520 }