1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
100 * @srcp: Source memory pointer.
101 * @destp: Destination memory pointer.
102 * @cnt: Number of words required to be copied.
103 * Must be a multiple of sizeof(uint64_t)
105 * This function is used for copying data between driver memory
106 * and the SLI WQ. This function also changes the endianness
107 * of each word if native endianness is different from SLI
108 * endianness. This function can be called with or without
112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
127 * @q: The Work Queue to operate on.
128 * @wqe: The work Queue Entry to put on the Work queue.
130 * This routine will copy the contents of @wqe to the next available entry on
131 * the @q. This function will then ring the Work Queue Doorbell to signal the
132 * HBA to start processing the Work Queue Entry. This function returns 0 if
133 * successful. If no entries are available on @q then this function will return
135 * The caller is expected to hold the hbalock when calling this routine.
138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
140 union lpfc_wqe *temp_wqe;
141 struct lpfc_register doorbell;
148 /* sanity check on queue memory */
151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
153 /* If the host has not yet processed the next entry then we are done */
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
160 /* set consumption flag every once in a while */
161 if (!((q->host_index + 1) % q->notify_interval))
162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169 /* write to DPP aperture taking advatage of Combined Writes */
170 tmp = (uint8_t *)temp_wqe;
172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
173 __raw_writeq(*((uint64_t *)(tmp + i)),
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
181 /* ensure WQE bcopy and DPP flushed before doorbell write */
184 /* Update the host index before invoking device */
185 host_index = q->host_index;
191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
203 /* Leave bits <23:16> clear for if_type 6 dpp */
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
216 writel(doorbell.word0, q->db_regaddr);
222 * lpfc_sli4_wq_release - Updates internal hba index for WQ
223 * @q: The Work Queue to operate on.
224 * @index: The index to advance the hba index to.
226 * This routine will update the HBA index of a queue to reflect consumption of
227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
228 * an entry the host calls this function to update the queue's internal
229 * pointers. This routine returns the number of entries that were consumed by
233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
235 uint32_t released = 0;
237 /* sanity check on queue memory */
241 if (q->hba_index == index)
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
246 } while (q->hba_index != index);
251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
252 * @q: The Mailbox Queue to operate on.
253 * @wqe: The Mailbox Queue Entry to put on the Work queue.
255 * This routine will copy the contents of @mqe to the next available entry on
256 * the @q. This function will then ring the Work Queue Doorbell to signal the
257 * HBA to start processing the Work Queue Entry. This function returns 0 if
258 * successful. If no entries are available on @q then this function will return
260 * The caller is expected to hold the hbalock when calling this routine.
263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
265 struct lpfc_mqe *temp_mqe;
266 struct lpfc_register doorbell;
268 /* sanity check on queue memory */
271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
273 /* If the host has not yet processed the next entry then we are done */
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
277 /* Save off the mailbox pointer for completion */
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
280 /* Update the host index before invoking device */
281 q->host_index = ((q->host_index + 1) % q->entry_count);
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
292 * lpfc_sli4_mq_release - Updates internal hba index for MQ
293 * @q: The Mailbox Queue to operate on.
295 * This routine will update the HBA index of a queue to reflect consumption of
296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
297 * an entry the host calls this function to update the queue's internal
298 * pointers. This routine returns the number of entries that were consumed by
302 lpfc_sli4_mq_release(struct lpfc_queue *q)
304 /* sanity check on queue memory */
308 /* Clear the mailbox pointer for completion */
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
316 * @q: The Event Queue to get the first valid EQE from
318 * This routine will get the first valid Event Queue Entry from @q, update
319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
320 * the Queue (no more work to do), or the Queue is full of EQEs that have been
321 * processed, but not popped back to the HBA then this routine will return NULL.
323 static struct lpfc_eqe *
324 lpfc_sli4_eq_get(struct lpfc_queue *q)
326 struct lpfc_eqe *eqe;
328 /* sanity check on queue memory */
331 eqe = lpfc_sli4_qe(q, q->host_index);
333 /* If the next EQE is not valid then we are done */
334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
338 * insert barrier for instruction interlock : data from the hardware
339 * must have the valid bit checked before it can be copied and acted
340 * upon. Speculative instructions were allowing a bcopy at the start
341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
342 * after our return, to copy data before the valid bit check above
343 * was done. As such, some of the copied data was stale. The barrier
344 * ensures the check is before any data is copied.
351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
352 * @q: The Event Queue to disable interrupts
356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
358 struct lpfc_register doorbell;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
371 * @q: The Event Queue to disable interrupts
375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
377 struct lpfc_register doorbell;
380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
386 * @phba: adapter with EQ
387 * @q: The Event Queue that the host has completed processing for.
388 * @count: Number of elements that have been consumed
389 * @arm: Indicates whether the host wants to arms this CQ.
391 * This routine will notify the HBA, by ringing the doorbell, that count
392 * number of EQEs have been processed. The @arm parameter indicates whether
393 * the queue should be rearmed when ringing the doorbell.
396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
399 struct lpfc_register doorbell;
401 /* sanity check on queue memory */
402 if (unlikely(!q || (count == 0 && !arm)))
405 /* ring doorbell for number popped */
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
419 readl(q->phba->sli4_hba.EQDBregaddr);
423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
424 * @phba: adapter with EQ
425 * @q: The Event Queue that the host has completed processing for.
426 * @count: Number of elements that have been consumed
427 * @arm: Indicates whether the host wants to arms this CQ.
429 * This routine will notify the HBA, by ringing the doorbell, that count
430 * number of EQEs have been processed. The @arm parameter indicates whether
431 * the queue should be rearmed when ringing the doorbell.
434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
437 struct lpfc_register doorbell;
439 /* sanity check on queue memory */
440 if (unlikely(!q || (count == 0 && !arm)))
443 /* ring doorbell for number popped */
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
464 /* if the index wrapped around, toggle the valid bit */
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
472 struct lpfc_eqe *eqe;
475 /* walk all the EQ entries and drop on the floor */
476 eqe = lpfc_sli4_eq_get(eq);
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
480 eqe = lpfc_sli4_eq_get(eq);
483 /* Clear and re-arm the EQ */
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
490 struct lpfc_eqe *eqe;
491 int count = 0, consumed = 0;
493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
496 eqe = lpfc_sli4_eq_get(eq);
498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
499 __lpfc_sli4_consume_eqe(phba, eq, eqe);
502 if (!(++count % eq->max_proc_limit))
505 if (!(count % eq->notify_interval)) {
506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
511 eqe = lpfc_sli4_eq_get(eq);
513 eq->EQ_processed += count;
515 /* Track the max number of EQEs processed in 1 intr */
516 if (count > eq->EQ_max_eqe)
517 eq->EQ_max_eqe = count;
519 eq->queue_claimed = 0;
522 /* Always clear and re-arm the EQ */
523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
530 * @q: The Completion Queue to get the first valid CQE from
532 * This routine will get the first valid Completion Queue Entry from @q, update
533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
534 * the Queue (no more work to do), or the Queue is full of CQEs that have been
535 * processed, but not popped back to the HBA then this routine will return NULL.
537 static struct lpfc_cqe *
538 lpfc_sli4_cq_get(struct lpfc_queue *q)
540 struct lpfc_cqe *cqe;
542 /* sanity check on queue memory */
545 cqe = lpfc_sli4_qe(q, q->host_index);
547 /* If the next CQE is not valid then we are done */
548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
552 * insert barrier for instruction interlock : data from the hardware
553 * must have the valid bit checked before it can be copied and acted
554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
555 * instructions allowing action on content before valid bit checked,
556 * add barrier here as well. May not be needed as "content" is a
557 * single 32-bit entity here (vs multi word structure for cq's).
564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
565 struct lpfc_cqe *cqe)
567 if (!phba->sli4_hba.pc_sli4_params.cqav)
568 bf_set_le32(lpfc_cqe_valid, cqe, 0);
570 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
572 /* if the index wrapped around, toggle the valid bit */
573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
574 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
579 * @phba: the adapter with the CQ
580 * @q: The Completion Queue that the host has completed processing for.
581 * @count: the number of elements that were consumed
582 * @arm: Indicates whether the host wants to arms this CQ.
584 * This routine will notify the HBA, by ringing the doorbell, that the
585 * CQEs have been processed. The @arm parameter specifies whether the
586 * queue should be rearmed when ringing the doorbell.
589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
590 uint32_t count, bool arm)
592 struct lpfc_register doorbell;
594 /* sanity check on queue memory */
595 if (unlikely(!q || (count == 0 && !arm)))
598 /* ring doorbell for number popped */
601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
612 * @phba: the adapter with the CQ
613 * @q: The Completion Queue that the host has completed processing for.
614 * @count: the number of elements that were consumed
615 * @arm: Indicates whether the host wants to arms this CQ.
617 * This routine will notify the HBA, by ringing the doorbell, that the
618 * CQEs have been processed. The @arm parameter specifies whether the
619 * queue should be rearmed when ringing the doorbell.
622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
623 uint32_t count, bool arm)
625 struct lpfc_register doorbell;
627 /* sanity check on queue memory */
628 if (unlikely(!q || (count == 0 && !arm)))
631 /* ring doorbell for number popped */
634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
642 * @q: The Header Receive Queue to operate on.
643 * @wqe: The Receive Queue Entry to put on the Receive queue.
645 * This routine will copy the contents of @wqe to the next available entry on
646 * the @q. This function will then ring the Receive Queue Doorbell to signal the
647 * HBA to start processing the Receive Queue Entry. This function returns the
648 * index that the rqe was copied to if successful. If no entries are available
649 * on @q then this function will return -ENOMEM.
650 * The caller is expected to hold the hbalock when calling this routine.
653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
656 struct lpfc_rqe *temp_hrqe;
657 struct lpfc_rqe *temp_drqe;
658 struct lpfc_register doorbell;
662 /* sanity check on queue memory */
663 if (unlikely(!hq) || unlikely(!dq))
665 hq_put_index = hq->host_index;
666 dq_put_index = dq->host_index;
667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
672 if (hq_put_index != dq_put_index)
674 /* If the host has not yet processed the next entry then we are done */
675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
680 /* Update the host index to point to the next slot */
681 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
682 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
685 /* Ring The Header Receive Queue Doorbell */
686 if (!(hq->host_index % hq->notify_interval)) {
688 if (hq->db_format == LPFC_DB_RING_FORMAT) {
689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
690 hq->notify_interval);
691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
694 hq->notify_interval);
695 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
701 writel(doorbell.word0, hq->db_regaddr);
707 * lpfc_sli4_rq_release - Updates internal hba index for RQ
708 * @q: The Header Receive Queue to operate on.
710 * This routine will update the HBA index of a queue to reflect consumption of
711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
712 * consumed an entry the host calls this function to update the queue's
713 * internal pointers. This routine returns the number of entries that were
714 * consumed by the HBA.
717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
719 /* sanity check on queue memory */
720 if (unlikely(!hq) || unlikely(!dq))
723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
731 * lpfc_cmd_iocb - Get next command iocb entry in the ring
732 * @phba: Pointer to HBA context object.
733 * @pring: Pointer to driver SLI ring object.
735 * This function returns pointer to next command iocb entry
736 * in the command ring. The caller must hold hbalock to prevent
737 * other threads consume the next command iocb.
738 * SLI-2/SLI-3 provide different sized iocbs.
740 static inline IOCB_t *
741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
748 * lpfc_resp_iocb - Get next response iocb entry in the ring
749 * @phba: Pointer to HBA context object.
750 * @pring: Pointer to driver SLI ring object.
752 * This function returns pointer to next response iocb entry
753 * in the response ring. The caller must hold hbalock to make sure
754 * that no other thread consume the next response iocb.
755 * SLI-2/SLI-3 provide different sized iocbs.
757 static inline IOCB_t *
758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
761 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
766 * @phba: Pointer to HBA context object.
768 * This function is called with hbalock held. This function
769 * allocates a new driver iocb object from the iocb pool. If the
770 * allocation is successful, it returns pointer to the newly
771 * allocated iocb object else it returns NULL.
774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
777 struct lpfc_iocbq * iocbq = NULL;
779 lockdep_assert_held(&phba->hbalock);
781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
784 if (phba->iocb_cnt > phba->iocb_max)
785 phba->iocb_max = phba->iocb_cnt;
790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
791 * @phba: Pointer to HBA context object.
792 * @xritag: XRI value.
794 * This function clears the sglq pointer from the array of acive
795 * sglq's. The xritag that is passed in is used to index into the
796 * array. Before the xritag can be used it needs to be adjusted
797 * by subtracting the xribase.
799 * Returns sglq ponter = success, NULL = Failure.
802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
804 struct lpfc_sglq *sglq;
806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
812 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
813 * @phba: Pointer to HBA context object.
814 * @xritag: XRI value.
816 * This function returns the sglq pointer from the array of acive
817 * sglq's. The xritag that is passed in is used to index into the
818 * array. Before the xritag can be used it needs to be adjusted
819 * by subtracting the xribase.
821 * Returns sglq ponter = success, NULL = Failure.
824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
826 struct lpfc_sglq *sglq;
828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
834 * @phba: Pointer to HBA context object.
835 * @xritag: xri used in this exchange.
836 * @rrq: The RRQ to be cleared.
840 lpfc_clr_rrq_active(struct lpfc_hba *phba,
842 struct lpfc_node_rrq *rrq)
844 struct lpfc_nodelist *ndlp = NULL;
846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
849 /* The target DID could have been swapped (cable swap)
850 * we should use the ndlp from the findnode if it is
853 if ((!ndlp) && rrq->ndlp)
859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
862 rrq->rrq_stop_time = 0;
865 mempool_free(rrq, phba->rrq_pool);
869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
870 * @phba: Pointer to HBA context object.
872 * This function is called with hbalock held. This function
873 * Checks if stop_time (ratov from setting rrq active) has
874 * been reached, if it has and the send_rrq flag is set then
875 * it will call lpfc_send_rrq. If the send_rrq flag is not set
876 * then it will just call the routine to clear the rrq and
877 * free the rrq resource.
878 * The timer is set to the next rrq that is going to expire before
879 * leaving the routine.
883 lpfc_handle_rrq_active(struct lpfc_hba *phba)
885 struct lpfc_node_rrq *rrq;
886 struct lpfc_node_rrq *nextrrq;
887 unsigned long next_time;
888 unsigned long iflags;
891 spin_lock_irqsave(&phba->hbalock, iflags);
892 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
894 list_for_each_entry_safe(rrq, nextrrq,
895 &phba->active_rrq_list, list) {
896 if (time_after(jiffies, rrq->rrq_stop_time))
897 list_move(&rrq->list, &send_rrq);
898 else if (time_before(rrq->rrq_stop_time, next_time))
899 next_time = rrq->rrq_stop_time;
901 spin_unlock_irqrestore(&phba->hbalock, iflags);
902 if ((!list_empty(&phba->active_rrq_list)) &&
903 (!(phba->pport->load_flag & FC_UNLOADING)))
904 mod_timer(&phba->rrq_tmr, next_time);
905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
906 list_del(&rrq->list);
907 if (!rrq->send_rrq) {
908 /* this call will free the rrq */
909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
910 } else if (lpfc_send_rrq(phba, rrq)) {
911 /* if we send the rrq then the completion handler
912 * will clear the bit in the xribitmap.
914 lpfc_clr_rrq_active(phba, rrq->xritag,
921 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
922 * @vport: Pointer to vport context object.
923 * @xri: The xri used in the exchange.
924 * @did: The targets DID for this exchange.
926 * returns NULL = rrq not found in the phba->active_rrq_list.
927 * rrq = rrq for this xri and target.
929 struct lpfc_node_rrq *
930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
932 struct lpfc_hba *phba = vport->phba;
933 struct lpfc_node_rrq *rrq;
934 struct lpfc_node_rrq *nextrrq;
935 unsigned long iflags;
937 if (phba->sli_rev != LPFC_SLI_REV4)
939 spin_lock_irqsave(&phba->hbalock, iflags);
940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
941 if (rrq->vport == vport && rrq->xritag == xri &&
942 rrq->nlp_DID == did){
943 list_del(&rrq->list);
944 spin_unlock_irqrestore(&phba->hbalock, iflags);
948 spin_unlock_irqrestore(&phba->hbalock, iflags);
953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
954 * @vport: Pointer to vport context object.
955 * @ndlp: Pointer to the lpfc_node_list structure.
956 * If ndlp is NULL Remove all active RRQs for this vport from the
957 * phba->active_rrq_list and clear the rrq.
958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
964 struct lpfc_hba *phba = vport->phba;
965 struct lpfc_node_rrq *rrq;
966 struct lpfc_node_rrq *nextrrq;
967 unsigned long iflags;
970 if (phba->sli_rev != LPFC_SLI_REV4)
973 lpfc_sli4_vport_delete_els_xri_aborted(vport);
974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
976 spin_lock_irqsave(&phba->hbalock, iflags);
977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
979 list_move(&rrq->list, &rrq_list);
980 spin_unlock_irqrestore(&phba->hbalock, iflags);
982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
983 list_del(&rrq->list);
984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
990 * @phba: Pointer to HBA context object.
991 * @ndlp: Targets nodelist pointer for this exchange.
992 * @xritag the xri in the bitmap to test.
994 * This function returns:
995 * 0 = rrq not active for this xri
996 * 1 = rrq is valid for this xri.
999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1004 if (!ndlp->active_rrqs_xri_bitmap)
1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1014 * @phba: Pointer to HBA context object.
1015 * @ndlp: nodelist pointer for this target.
1016 * @xritag: xri used in this exchange.
1017 * @rxid: Remote Exchange ID.
1018 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1020 * This function takes the hbalock.
1021 * The active bit is always set in the active rrq xri_bitmap even
1022 * if there is no slot avaiable for the other rrq information.
1024 * returns 0 rrq actived for this xri
1025 * < 0 No memory or invalid ndlp.
1028 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1031 unsigned long iflags;
1032 struct lpfc_node_rrq *rrq;
1038 if (!phba->cfg_enable_rrq)
1041 spin_lock_irqsave(&phba->hbalock, iflags);
1042 if (phba->pport->load_flag & FC_UNLOADING) {
1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1048 * set the active bit even if there is no mem available.
1050 if (NLP_CHK_FREE_REQ(ndlp))
1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1056 if (!ndlp->active_rrqs_xri_bitmap)
1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1062 spin_unlock_irqrestore(&phba->hbalock, iflags);
1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1067 " DID:0x%x Send:%d\n",
1068 xritag, rxid, ndlp->nlp_DID, send_rrq);
1071 if (phba->cfg_enable_rrq == 1)
1072 rrq->send_rrq = send_rrq;
1075 rrq->xritag = xritag;
1076 rrq->rrq_stop_time = jiffies +
1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1079 rrq->nlp_DID = ndlp->nlp_DID;
1080 rrq->vport = ndlp->vport;
1082 spin_lock_irqsave(&phba->hbalock, iflags);
1083 empty = list_empty(&phba->active_rrq_list);
1084 list_add_tail(&rrq->list, &phba->active_rrq_list);
1085 phba->hba_flag |= HBA_RRQ_ACTIVE;
1087 lpfc_worker_wake_up(phba);
1088 spin_unlock_irqrestore(&phba->hbalock, iflags);
1091 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1094 " DID:0x%x Send:%d\n",
1095 xritag, rxid, ndlp->nlp_DID, send_rrq);
1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1101 * @phba: Pointer to HBA context object.
1102 * @piocb: Pointer to the iocbq.
1104 * The driver calls this function with either the nvme ls ring lock
1105 * or the fc els ring lock held depending on the iocb usage. This function
1106 * gets a new driver sglq object from the sglq list. If the list is not empty
1107 * then it is successful, it returns pointer to the newly allocated sglq
1108 * object else it returns NULL.
1110 static struct lpfc_sglq *
1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1114 struct lpfc_sglq *sglq = NULL;
1115 struct lpfc_sglq *start_sglq = NULL;
1116 struct lpfc_io_buf *lpfc_cmd;
1117 struct lpfc_nodelist *ndlp;
1118 struct lpfc_sli_ring *pring = NULL;
1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1122 pring = phba->sli4_hba.nvmels_wq->pring;
1124 pring = lpfc_phba_elsring(phba);
1126 lockdep_assert_held(&pring->ring_lock);
1128 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1130 ndlp = lpfc_cmd->rdata->pnode;
1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1133 ndlp = piocbq->context_un.ndlp;
1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1138 ndlp = piocbq->context_un.ndlp;
1140 ndlp = piocbq->context1;
1143 spin_lock(&phba->sli4_hba.sgl_list_lock);
1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1149 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1150 test_bit(sglq->sli4_lxritag,
1151 ndlp->active_rrqs_xri_bitmap)) {
1152 /* This xri has an rrq outstanding for this DID.
1153 * put it back in the list and get another xri.
1155 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1157 list_remove_head(lpfc_els_sgl_list, sglq,
1158 struct lpfc_sglq, list);
1159 if (sglq == start_sglq) {
1160 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1169 sglq->state = SGL_ALLOCATED;
1171 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1177 * @phba: Pointer to HBA context object.
1178 * @piocb: Pointer to the iocbq.
1180 * This function is called with the sgl_list lock held. This function
1181 * gets a new driver sglq object from the sglq list. If the
1182 * list is not empty then it is successful, it returns pointer to the newly
1183 * allocated sglq object else it returns NULL.
1186 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1188 struct list_head *lpfc_nvmet_sgl_list;
1189 struct lpfc_sglq *sglq = NULL;
1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1199 sglq->state = SGL_ALLOCATED;
1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1205 * @phba: Pointer to HBA context object.
1207 * This function is called with no lock held. This function
1208 * allocates a new driver iocb object from the iocb pool. If the
1209 * allocation is successful, it returns pointer to the newly
1210 * allocated iocb object else it returns NULL.
1213 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1215 struct lpfc_iocbq * iocbq = NULL;
1216 unsigned long iflags;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 iocbq = __lpfc_sli_get_iocbq(phba);
1220 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1226 * @phba: Pointer to HBA context object.
1227 * @iocbq: Pointer to driver iocb object.
1229 * This function is called with hbalock held to release driver
1230 * iocb object to the iocb pool. The iotag in the iocb object
1231 * does not change for each use of the iocb object. This function
1232 * clears all other fields of the iocb object when it is freed.
1233 * The sqlq structure that holds the xritag and phys and virtual
1234 * mappings for the scatter gather list is retrieved from the
1235 * active array of sglq. The get of the sglq pointer also clears
1236 * the entry in the array. If the status of the IO indiactes that
1237 * this IO was aborted then the sglq entry it put on the
1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1239 * IO has good status or fails for any other reason then the sglq
1240 * entry is added to the free list (lpfc_els_sgl_list).
1243 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1245 struct lpfc_sglq *sglq;
1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1247 unsigned long iflag = 0;
1248 struct lpfc_sli_ring *pring;
1250 lockdep_assert_held(&phba->hbalock);
1252 if (iocbq->sli4_xritag == NO_XRI)
1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1262 sglq->state = SGL_FREED;
1264 list_add_tail(&sglq->list,
1265 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1266 spin_unlock_irqrestore(
1267 &phba->sli4_hba.sgl_list_lock, iflag);
1271 pring = phba->sli4_hba.els_wq->pring;
1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1273 (sglq->state != SGL_XRI_ABORTED)) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1276 list_add(&sglq->list,
1277 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1278 spin_unlock_irqrestore(
1279 &phba->sli4_hba.sgl_list_lock, iflag);
1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1283 sglq->state = SGL_FREED;
1285 list_add_tail(&sglq->list,
1286 &phba->sli4_hba.lpfc_els_sgl_list);
1287 spin_unlock_irqrestore(
1288 &phba->sli4_hba.sgl_list_lock, iflag);
1290 /* Check if TXQ queue needs to be serviced */
1291 if (!list_empty(&pring->txq))
1292 lpfc_worker_wake_up(phba);
1298 * Clean all volatile data fields, preserve iotag and node struct.
1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1301 iocbq->sli4_lxritag = NO_XRI;
1302 iocbq->sli4_xritag = NO_XRI;
1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1311 * @phba: Pointer to HBA context object.
1312 * @iocbq: Pointer to driver iocb object.
1314 * This function is called with hbalock held to release driver
1315 * iocb object to the iocb pool. The iotag in the iocb object
1316 * does not change for each use of the iocb object. This function
1317 * clears all other fields of the iocb object when it is freed.
1320 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1324 lockdep_assert_held(&phba->hbalock);
1327 * Clean all volatile data fields, preserve iotag and node struct.
1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1330 iocbq->sli4_xritag = NO_XRI;
1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1336 * @phba: Pointer to HBA context object.
1337 * @iocbq: Pointer to driver iocb object.
1339 * This function is called with hbalock held to release driver
1340 * iocb object to the iocb pool. The iotag in the iocb object
1341 * does not change for each use of the iocb object. This function
1342 * clears all other fields of the iocb object when it is freed.
1345 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1347 lockdep_assert_held(&phba->hbalock);
1349 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1358 * This function is called with no lock held to release the iocb to
1362 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1364 unsigned long iflags;
1367 * Clean all volatile data fields, preserve iotag and node struct.
1369 spin_lock_irqsave(&phba->hbalock, iflags);
1370 __lpfc_sli_release_iocbq(phba, iocbq);
1371 spin_unlock_irqrestore(&phba->hbalock, iflags);
1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1376 * @phba: Pointer to HBA context object.
1377 * @iocblist: List of IOCBs.
1378 * @ulpstatus: ULP status in IOCB command field.
1379 * @ulpWord4: ULP word-4 in IOCB command field.
1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1382 * on the list by invoking the complete callback function associated with the
1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1387 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1388 uint32_t ulpstatus, uint32_t ulpWord4)
1390 struct lpfc_iocbq *piocb;
1392 while (!list_empty(iocblist)) {
1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1394 if (!piocb->iocb_cmpl)
1395 lpfc_sli_release_iocbq(phba, piocb);
1397 piocb->iocb.ulpStatus = ulpstatus;
1398 piocb->iocb.un.ulpWord[4] = ulpWord4;
1399 (piocb->iocb_cmpl) (phba, piocb, piocb);
1406 * lpfc_sli_iocb_cmd_type - Get the iocb type
1407 * @iocb_cmnd: iocb command code.
1409 * This function is called by ring event handler function to get the iocb type.
1410 * This function translates the iocb command to an iocb command type used to
1411 * decide the final disposition of each completed IOCB.
1412 * The function returns
1413 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1414 * LPFC_SOL_IOCB if it is a solicited iocb completion
1415 * LPFC_ABORT_IOCB if it is an abort iocb
1416 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1418 * The caller is not required to hold any lock.
1420 static lpfc_iocb_type
1421 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1423 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1425 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1428 switch (iocb_cmnd) {
1429 case CMD_XMIT_SEQUENCE_CR:
1430 case CMD_XMIT_SEQUENCE_CX:
1431 case CMD_XMIT_BCAST_CN:
1432 case CMD_XMIT_BCAST_CX:
1433 case CMD_ELS_REQUEST_CR:
1434 case CMD_ELS_REQUEST_CX:
1435 case CMD_CREATE_XRI_CR:
1436 case CMD_CREATE_XRI_CX:
1437 case CMD_GET_RPI_CN:
1438 case CMD_XMIT_ELS_RSP_CX:
1439 case CMD_GET_RPI_CR:
1440 case CMD_FCP_IWRITE_CR:
1441 case CMD_FCP_IWRITE_CX:
1442 case CMD_FCP_IREAD_CR:
1443 case CMD_FCP_IREAD_CX:
1444 case CMD_FCP_ICMND_CR:
1445 case CMD_FCP_ICMND_CX:
1446 case CMD_FCP_TSEND_CX:
1447 case CMD_FCP_TRSP_CX:
1448 case CMD_FCP_TRECEIVE_CX:
1449 case CMD_FCP_AUTO_TRSP_CX:
1450 case CMD_ADAPTER_MSG:
1451 case CMD_ADAPTER_DUMP:
1452 case CMD_XMIT_SEQUENCE64_CR:
1453 case CMD_XMIT_SEQUENCE64_CX:
1454 case CMD_XMIT_BCAST64_CN:
1455 case CMD_XMIT_BCAST64_CX:
1456 case CMD_ELS_REQUEST64_CR:
1457 case CMD_ELS_REQUEST64_CX:
1458 case CMD_FCP_IWRITE64_CR:
1459 case CMD_FCP_IWRITE64_CX:
1460 case CMD_FCP_IREAD64_CR:
1461 case CMD_FCP_IREAD64_CX:
1462 case CMD_FCP_ICMND64_CR:
1463 case CMD_FCP_ICMND64_CX:
1464 case CMD_FCP_TSEND64_CX:
1465 case CMD_FCP_TRSP64_CX:
1466 case CMD_FCP_TRECEIVE64_CX:
1467 case CMD_GEN_REQUEST64_CR:
1468 case CMD_GEN_REQUEST64_CX:
1469 case CMD_XMIT_ELS_RSP64_CX:
1470 case DSSCMD_IWRITE64_CR:
1471 case DSSCMD_IWRITE64_CX:
1472 case DSSCMD_IREAD64_CR:
1473 case DSSCMD_IREAD64_CX:
1474 type = LPFC_SOL_IOCB;
1476 case CMD_ABORT_XRI_CN:
1477 case CMD_ABORT_XRI_CX:
1478 case CMD_CLOSE_XRI_CN:
1479 case CMD_CLOSE_XRI_CX:
1480 case CMD_XRI_ABORTED_CX:
1481 case CMD_ABORT_MXRI64_CN:
1482 case CMD_XMIT_BLS_RSP64_CX:
1483 type = LPFC_ABORT_IOCB;
1485 case CMD_RCV_SEQUENCE_CX:
1486 case CMD_RCV_ELS_REQ_CX:
1487 case CMD_RCV_SEQUENCE64_CX:
1488 case CMD_RCV_ELS_REQ64_CX:
1489 case CMD_ASYNC_STATUS:
1490 case CMD_IOCB_RCV_SEQ64_CX:
1491 case CMD_IOCB_RCV_ELS64_CX:
1492 case CMD_IOCB_RCV_CONT64_CX:
1493 case CMD_IOCB_RET_XRI64_CX:
1494 type = LPFC_UNSOL_IOCB;
1496 case CMD_IOCB_XMIT_MSEQ64_CR:
1497 case CMD_IOCB_XMIT_MSEQ64_CX:
1498 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1499 case CMD_IOCB_RCV_ELS_LIST64_CX:
1500 case CMD_IOCB_CLOSE_EXTENDED_CN:
1501 case CMD_IOCB_ABORT_EXTENDED_CN:
1502 case CMD_IOCB_RET_HBQE64_CN:
1503 case CMD_IOCB_FCP_IBIDIR64_CR:
1504 case CMD_IOCB_FCP_IBIDIR64_CX:
1505 case CMD_IOCB_FCP_ITASKMGT64_CX:
1506 case CMD_IOCB_LOGENTRY_CN:
1507 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1508 printk("%s - Unhandled SLI-3 Command x%x\n",
1509 __func__, iocb_cmnd);
1510 type = LPFC_UNKNOWN_IOCB;
1513 type = LPFC_UNKNOWN_IOCB;
1521 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1522 * @phba: Pointer to HBA context object.
1524 * This function is called from SLI initialization code
1525 * to configure every ring of the HBA's SLI interface. The
1526 * caller is not required to hold any lock. This function issues
1527 * a config_ring mailbox command for each ring.
1528 * This function returns zero if successful else returns a negative
1532 lpfc_sli_ring_map(struct lpfc_hba *phba)
1534 struct lpfc_sli *psli = &phba->sli;
1539 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1543 phba->link_state = LPFC_INIT_MBX_CMDS;
1544 for (i = 0; i < psli->num_rings; i++) {
1545 lpfc_config_ring(phba, i, pmb);
1546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1547 if (rc != MBX_SUCCESS) {
1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1549 "0446 Adapter failed to init (%d), "
1550 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1552 rc, pmbox->mbxCommand,
1553 pmbox->mbxStatus, i);
1554 phba->link_state = LPFC_HBA_ERROR;
1559 mempool_free(pmb, phba->mbox_mem_pool);
1564 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1565 * @phba: Pointer to HBA context object.
1566 * @pring: Pointer to driver SLI ring object.
1567 * @piocb: Pointer to the driver iocb object.
1569 * The driver calls this function with the hbalock held for SLI3 ports or
1570 * the ring lock held for SLI4 ports. The function adds the
1571 * new iocb to txcmplq of the given ring. This function always returns
1572 * 0. If this function is called for ELS ring, this function checks if
1573 * there is a vport associated with the ELS command. This function also
1574 * starts els_tmofunc timer if this is an ELS command.
1577 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1578 struct lpfc_iocbq *piocb)
1580 if (phba->sli_rev == LPFC_SLI_REV4)
1581 lockdep_assert_held(&pring->ring_lock);
1583 lockdep_assert_held(&phba->hbalock);
1587 list_add_tail(&piocb->list, &pring->txcmplq);
1588 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1589 pring->txcmplq_cnt++;
1591 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1592 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1593 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1594 BUG_ON(!piocb->vport);
1595 if (!(piocb->vport->load_flag & FC_UNLOADING))
1596 mod_timer(&piocb->vport->els_tmofunc,
1598 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1605 * lpfc_sli_ringtx_get - Get first element of the txq
1606 * @phba: Pointer to HBA context object.
1607 * @pring: Pointer to driver SLI ring object.
1609 * This function is called with hbalock held to get next
1610 * iocb in txq of the given ring. If there is any iocb in
1611 * the txq, the function returns first iocb in the list after
1612 * removing the iocb from the list, else it returns NULL.
1615 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1617 struct lpfc_iocbq *cmd_iocb;
1619 lockdep_assert_held(&phba->hbalock);
1621 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1626 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1627 * @phba: Pointer to HBA context object.
1628 * @pring: Pointer to driver SLI ring object.
1630 * This function is called with hbalock held and the caller must post the
1631 * iocb without releasing the lock. If the caller releases the lock,
1632 * iocb slot returned by the function is not guaranteed to be available.
1633 * The function returns pointer to the next available iocb slot if there
1634 * is available slot in the ring, else it returns NULL.
1635 * If the get index of the ring is ahead of the put index, the function
1636 * will post an error attention event to the worker thread to take the
1637 * HBA to offline state.
1640 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1642 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1643 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1645 lockdep_assert_held(&phba->hbalock);
1647 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1648 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1649 pring->sli.sli3.next_cmdidx = 0;
1651 if (unlikely(pring->sli.sli3.local_getidx ==
1652 pring->sli.sli3.next_cmdidx)) {
1654 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1656 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1658 "0315 Ring %d issue: portCmdGet %d "
1659 "is bigger than cmd ring %d\n",
1661 pring->sli.sli3.local_getidx,
1664 phba->link_state = LPFC_HBA_ERROR;
1666 * All error attention handlers are posted to
1669 phba->work_ha |= HA_ERATT;
1670 phba->work_hs = HS_FFER3;
1672 lpfc_worker_wake_up(phba);
1677 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1681 return lpfc_cmd_iocb(phba, pring);
1685 * lpfc_sli_next_iotag - Get an iotag for the iocb
1686 * @phba: Pointer to HBA context object.
1687 * @iocbq: Pointer to driver iocb object.
1689 * This function gets an iotag for the iocb. If there is no unused iotag and
1690 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1691 * array and assigns a new iotag.
1692 * The function returns the allocated iotag if successful, else returns zero.
1693 * Zero is not a valid iotag.
1694 * The caller is not required to hold any lock.
1697 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1699 struct lpfc_iocbq **new_arr;
1700 struct lpfc_iocbq **old_arr;
1702 struct lpfc_sli *psli = &phba->sli;
1705 spin_lock_irq(&phba->hbalock);
1706 iotag = psli->last_iotag;
1707 if(++iotag < psli->iocbq_lookup_len) {
1708 psli->last_iotag = iotag;
1709 psli->iocbq_lookup[iotag] = iocbq;
1710 spin_unlock_irq(&phba->hbalock);
1711 iocbq->iotag = iotag;
1713 } else if (psli->iocbq_lookup_len < (0xffff
1714 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1715 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1716 spin_unlock_irq(&phba->hbalock);
1717 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1720 spin_lock_irq(&phba->hbalock);
1721 old_arr = psli->iocbq_lookup;
1722 if (new_len <= psli->iocbq_lookup_len) {
1723 /* highly unprobable case */
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1733 spin_unlock_irq(&phba->hbalock);
1736 if (psli->iocbq_lookup)
1737 memcpy(new_arr, old_arr,
1738 ((psli->last_iotag + 1) *
1739 sizeof (struct lpfc_iocbq *)));
1740 psli->iocbq_lookup = new_arr;
1741 psli->iocbq_lookup_len = new_len;
1742 psli->last_iotag = iotag;
1743 psli->iocbq_lookup[iotag] = iocbq;
1744 spin_unlock_irq(&phba->hbalock);
1745 iocbq->iotag = iotag;
1750 spin_unlock_irq(&phba->hbalock);
1752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1753 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1760 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1761 * @phba: Pointer to HBA context object.
1762 * @pring: Pointer to driver SLI ring object.
1763 * @iocb: Pointer to iocb slot in the ring.
1764 * @nextiocb: Pointer to driver iocb object which need to be
1765 * posted to firmware.
1767 * This function is called with hbalock held to post a new iocb to
1768 * the firmware. This function copies the new iocb to ring iocb slot and
1769 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1770 * a completion call back for this iocb else the function will free the
1774 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1775 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1777 lockdep_assert_held(&phba->hbalock);
1781 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1784 if (pring->ringno == LPFC_ELS_RING) {
1785 lpfc_debugfs_slow_ring_trc(phba,
1786 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1787 *(((uint32_t *) &nextiocb->iocb) + 4),
1788 *(((uint32_t *) &nextiocb->iocb) + 6),
1789 *(((uint32_t *) &nextiocb->iocb) + 7));
1793 * Issue iocb command to adapter
1795 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1797 pring->stats.iocb_cmd++;
1800 * If there is no completion routine to call, we can release the
1801 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1802 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1804 if (nextiocb->iocb_cmpl)
1805 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1807 __lpfc_sli_release_iocbq(phba, nextiocb);
1810 * Let the HBA know what IOCB slot will be the next one the
1811 * driver will put a command into.
1813 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1814 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1818 * lpfc_sli_update_full_ring - Update the chip attention register
1819 * @phba: Pointer to HBA context object.
1820 * @pring: Pointer to driver SLI ring object.
1822 * The caller is not required to hold any lock for calling this function.
1823 * This function updates the chip attention bits for the ring to inform firmware
1824 * that there are pending work to be done for this ring and requests an
1825 * interrupt when there is space available in the ring. This function is
1826 * called when the driver is unable to post more iocbs to the ring due
1827 * to unavailability of space in the ring.
1830 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1832 int ringno = pring->ringno;
1834 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1839 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1840 * The HBA will tell us when an IOCB entry is available.
1842 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1843 readl(phba->CAregaddr); /* flush */
1845 pring->stats.iocb_cmd_full++;
1849 * lpfc_sli_update_ring - Update chip attention register
1850 * @phba: Pointer to HBA context object.
1851 * @pring: Pointer to driver SLI ring object.
1853 * This function updates the chip attention register bit for the
1854 * given ring to inform HBA that there is more work to be done
1855 * in this ring. The caller is not required to hold any lock.
1858 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1860 int ringno = pring->ringno;
1863 * Tell the HBA that there is work to do in this ring.
1865 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1867 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1868 readl(phba->CAregaddr); /* flush */
1873 * lpfc_sli_resume_iocb - Process iocbs in the txq
1874 * @phba: Pointer to HBA context object.
1875 * @pring: Pointer to driver SLI ring object.
1877 * This function is called with hbalock held to post pending iocbs
1878 * in the txq to the firmware. This function is called when driver
1879 * detects space available in the ring.
1882 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1885 struct lpfc_iocbq *nextiocb;
1887 lockdep_assert_held(&phba->hbalock);
1891 * (a) there is anything on the txq to send
1893 * (c) link attention events can be processed (fcp ring only)
1894 * (d) IOCB processing is not blocked by the outstanding mbox command.
1897 if (lpfc_is_link_up(phba) &&
1898 (!list_empty(&pring->txq)) &&
1899 (pring->ringno != LPFC_FCP_RING ||
1900 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1902 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1903 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1904 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1907 lpfc_sli_update_ring(phba, pring);
1909 lpfc_sli_update_full_ring(phba, pring);
1916 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1917 * @phba: Pointer to HBA context object.
1918 * @hbqno: HBQ number.
1920 * This function is called with hbalock held to get the next
1921 * available slot for the given HBQ. If there is free slot
1922 * available for the HBQ it will return pointer to the next available
1923 * HBQ entry else it will return NULL.
1925 static struct lpfc_hbq_entry *
1926 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1928 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1930 lockdep_assert_held(&phba->hbalock);
1932 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1933 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1934 hbqp->next_hbqPutIdx = 0;
1936 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1937 uint32_t raw_index = phba->hbq_get[hbqno];
1938 uint32_t getidx = le32_to_cpu(raw_index);
1940 hbqp->local_hbqGetIdx = getidx;
1942 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1943 lpfc_printf_log(phba, KERN_ERR,
1944 LOG_SLI | LOG_VPORT,
1945 "1802 HBQ %d: local_hbqGetIdx "
1946 "%u is > than hbqp->entry_count %u\n",
1947 hbqno, hbqp->local_hbqGetIdx,
1950 phba->link_state = LPFC_HBA_ERROR;
1954 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1958 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1963 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1964 * @phba: Pointer to HBA context object.
1966 * This function is called with no lock held to free all the
1967 * hbq buffers while uninitializing the SLI interface. It also
1968 * frees the HBQ buffers returned by the firmware but not yet
1969 * processed by the upper layers.
1972 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1974 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1975 struct hbq_dmabuf *hbq_buf;
1976 unsigned long flags;
1979 hbq_count = lpfc_sli_hbq_count();
1980 /* Return all memory used by all HBQs */
1981 spin_lock_irqsave(&phba->hbalock, flags);
1982 for (i = 0; i < hbq_count; ++i) {
1983 list_for_each_entry_safe(dmabuf, next_dmabuf,
1984 &phba->hbqs[i].hbq_buffer_list, list) {
1985 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1986 list_del(&hbq_buf->dbuf.list);
1987 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1989 phba->hbqs[i].buffer_count = 0;
1992 /* Mark the HBQs not in use */
1993 phba->hbq_in_use = 0;
1994 spin_unlock_irqrestore(&phba->hbalock, flags);
1998 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1999 * @phba: Pointer to HBA context object.
2000 * @hbqno: HBQ number.
2001 * @hbq_buf: Pointer to HBQ buffer.
2003 * This function is called with the hbalock held to post a
2004 * hbq buffer to the firmware. If the function finds an empty
2005 * slot in the HBQ, it will post the buffer. The function will return
2006 * pointer to the hbq entry if it successfully post the buffer
2007 * else it will return NULL.
2010 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2011 struct hbq_dmabuf *hbq_buf)
2013 lockdep_assert_held(&phba->hbalock);
2014 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2018 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2019 * @phba: Pointer to HBA context object.
2020 * @hbqno: HBQ number.
2021 * @hbq_buf: Pointer to HBQ buffer.
2023 * This function is called with the hbalock held to post a hbq buffer to the
2024 * firmware. If the function finds an empty slot in the HBQ, it will post the
2025 * buffer and place it on the hbq_buffer_list. The function will return zero if
2026 * it successfully post the buffer else it will return an error.
2029 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2030 struct hbq_dmabuf *hbq_buf)
2032 struct lpfc_hbq_entry *hbqe;
2033 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2035 lockdep_assert_held(&phba->hbalock);
2036 /* Get next HBQ entry slot to use */
2037 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2039 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2041 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2042 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2043 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2044 hbqe->bde.tus.f.bdeFlags = 0;
2045 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2046 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2048 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2049 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2051 readl(phba->hbq_put + hbqno);
2052 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2059 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2060 * @phba: Pointer to HBA context object.
2061 * @hbqno: HBQ number.
2062 * @hbq_buf: Pointer to HBQ buffer.
2064 * This function is called with the hbalock held to post an RQE to the SLI4
2065 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2066 * the hbq_buffer_list and return zero, otherwise it will return an error.
2069 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2070 struct hbq_dmabuf *hbq_buf)
2073 struct lpfc_rqe hrqe;
2074 struct lpfc_rqe drqe;
2075 struct lpfc_queue *hrq;
2076 struct lpfc_queue *drq;
2078 if (hbqno != LPFC_ELS_HBQ)
2080 hrq = phba->sli4_hba.hdr_rq;
2081 drq = phba->sli4_hba.dat_rq;
2083 lockdep_assert_held(&phba->hbalock);
2084 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2085 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2086 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2087 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2088 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2091 hbq_buf->tag = (rc | (hbqno << 16));
2092 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2096 /* HBQ for ELS and CT traffic. */
2097 static struct lpfc_hbq_init lpfc_els_hbq = {
2102 .ring_mask = (1 << LPFC_ELS_RING),
2109 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2114 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2115 * @phba: Pointer to HBA context object.
2116 * @hbqno: HBQ number.
2117 * @count: Number of HBQ buffers to be posted.
2119 * This function is called with no lock held to post more hbq buffers to the
2120 * given HBQ. The function returns the number of HBQ buffers successfully
2124 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2126 uint32_t i, posted = 0;
2127 unsigned long flags;
2128 struct hbq_dmabuf *hbq_buffer;
2129 LIST_HEAD(hbq_buf_list);
2130 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2133 if ((phba->hbqs[hbqno].buffer_count + count) >
2134 lpfc_hbq_defs[hbqno]->entry_count)
2135 count = lpfc_hbq_defs[hbqno]->entry_count -
2136 phba->hbqs[hbqno].buffer_count;
2139 /* Allocate HBQ entries */
2140 for (i = 0; i < count; i++) {
2141 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2144 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2146 /* Check whether HBQ is still in use */
2147 spin_lock_irqsave(&phba->hbalock, flags);
2148 if (!phba->hbq_in_use)
2150 while (!list_empty(&hbq_buf_list)) {
2151 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2153 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2155 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2156 phba->hbqs[hbqno].buffer_count++;
2159 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2161 spin_unlock_irqrestore(&phba->hbalock, flags);
2164 spin_unlock_irqrestore(&phba->hbalock, flags);
2165 while (!list_empty(&hbq_buf_list)) {
2166 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2174 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2175 * @phba: Pointer to HBA context object.
2178 * This function posts more buffers to the HBQ. This function
2179 * is called with no lock held. The function returns the number of HBQ entries
2180 * successfully allocated.
2183 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2185 if (phba->sli_rev == LPFC_SLI_REV4)
2188 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2189 lpfc_hbq_defs[qno]->add_count);
2193 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2194 * @phba: Pointer to HBA context object.
2195 * @qno: HBQ queue number.
2197 * This function is called from SLI initialization code path with
2198 * no lock held to post initial HBQ buffers to firmware. The
2199 * function returns the number of HBQ entries successfully allocated.
2202 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2204 if (phba->sli_rev == LPFC_SLI_REV4)
2205 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2206 lpfc_hbq_defs[qno]->entry_count);
2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2209 lpfc_hbq_defs[qno]->init_count);
2213 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2214 * @phba: Pointer to HBA context object.
2215 * @hbqno: HBQ number.
2217 * This function removes the first hbq buffer on an hbq list and returns a
2218 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2220 static struct hbq_dmabuf *
2221 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2223 struct lpfc_dmabuf *d_buf;
2225 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2228 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2232 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2233 * @phba: Pointer to HBA context object.
2234 * @hbqno: HBQ number.
2236 * This function removes the first RQ buffer on an RQ buffer list and returns a
2237 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2239 static struct rqb_dmabuf *
2240 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2242 struct lpfc_dmabuf *h_buf;
2243 struct lpfc_rqb *rqbp;
2246 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2247 struct lpfc_dmabuf, list);
2250 rqbp->buffer_count--;
2251 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2255 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2256 * @phba: Pointer to HBA context object.
2257 * @tag: Tag of the hbq buffer.
2259 * This function searches for the hbq buffer associated with the given tag in
2260 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2261 * otherwise it returns NULL.
2263 static struct hbq_dmabuf *
2264 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2266 struct lpfc_dmabuf *d_buf;
2267 struct hbq_dmabuf *hbq_buf;
2271 if (hbqno >= LPFC_MAX_HBQS)
2274 spin_lock_irq(&phba->hbalock);
2275 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2276 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2277 if (hbq_buf->tag == tag) {
2278 spin_unlock_irq(&phba->hbalock);
2282 spin_unlock_irq(&phba->hbalock);
2283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2284 "1803 Bad hbq tag. Data: x%x x%x\n",
2285 tag, phba->hbqs[tag >> 16].buffer_count);
2290 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2291 * @phba: Pointer to HBA context object.
2292 * @hbq_buffer: Pointer to HBQ buffer.
2294 * This function is called with hbalock. This function gives back
2295 * the hbq buffer to firmware. If the HBQ does not have space to
2296 * post the buffer, it will free the buffer.
2299 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2304 hbqno = hbq_buffer->tag >> 16;
2305 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2306 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2311 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2312 * @mbxCommand: mailbox command code.
2314 * This function is called by the mailbox event handler function to verify
2315 * that the completed mailbox command is a legitimate mailbox command. If the
2316 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2317 * and the mailbox event handler will take the HBA offline.
2320 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2324 switch (mbxCommand) {
2328 case MBX_WRITE_VPARMS:
2329 case MBX_RUN_BIU_DIAG:
2332 case MBX_CONFIG_LINK:
2333 case MBX_CONFIG_RING:
2334 case MBX_RESET_RING:
2335 case MBX_READ_CONFIG:
2336 case MBX_READ_RCONFIG:
2337 case MBX_READ_SPARM:
2338 case MBX_READ_STATUS:
2342 case MBX_READ_LNK_STAT:
2344 case MBX_UNREG_LOGIN:
2346 case MBX_DUMP_MEMORY:
2347 case MBX_DUMP_CONTEXT:
2350 case MBX_UPDATE_CFG:
2352 case MBX_DEL_LD_ENTRY:
2353 case MBX_RUN_PROGRAM:
2355 case MBX_SET_VARIABLE:
2356 case MBX_UNREG_D_ID:
2357 case MBX_KILL_BOARD:
2358 case MBX_CONFIG_FARP:
2361 case MBX_RUN_BIU_DIAG64:
2362 case MBX_CONFIG_PORT:
2363 case MBX_READ_SPARM64:
2364 case MBX_READ_RPI64:
2365 case MBX_REG_LOGIN64:
2366 case MBX_READ_TOPOLOGY:
2369 case MBX_LOAD_EXP_ROM:
2370 case MBX_ASYNCEVT_ENABLE:
2374 case MBX_PORT_CAPABILITIES:
2375 case MBX_PORT_IOV_CONTROL:
2376 case MBX_SLI4_CONFIG:
2377 case MBX_SLI4_REQ_FTRS:
2379 case MBX_UNREG_FCFI:
2384 case MBX_RESUME_RPI:
2385 case MBX_READ_EVENT_LOG_STATUS:
2386 case MBX_READ_EVENT_LOG:
2387 case MBX_SECURITY_MGMT:
2389 case MBX_ACCESS_VDATA:
2400 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2401 * @phba: Pointer to HBA context object.
2402 * @pmboxq: Pointer to mailbox command.
2404 * This is completion handler function for mailbox commands issued from
2405 * lpfc_sli_issue_mbox_wait function. This function is called by the
2406 * mailbox event handler function with no lock held. This function
2407 * will wake up thread waiting on the wait queue pointed by context1
2411 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2413 unsigned long drvr_flag;
2414 struct completion *pmbox_done;
2417 * If pmbox_done is empty, the driver thread gave up waiting and
2418 * continued running.
2420 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2421 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2422 pmbox_done = (struct completion *)pmboxq->context3;
2424 complete(pmbox_done);
2425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2431 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2432 * @phba: Pointer to HBA context object.
2433 * @pmb: Pointer to mailbox object.
2435 * This function is the default mailbox completion handler. It
2436 * frees the memory resources associated with the completed mailbox
2437 * command. If the completed command is a REG_LOGIN mailbox command,
2438 * this function will issue a UREG_LOGIN to re-claim the RPI.
2441 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2443 struct lpfc_vport *vport = pmb->vport;
2444 struct lpfc_dmabuf *mp;
2445 struct lpfc_nodelist *ndlp;
2446 struct Scsi_Host *shost;
2450 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2453 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2458 * If a REG_LOGIN succeeded after node is destroyed or node
2459 * is in re-discovery driver need to cleanup the RPI.
2461 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2462 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2463 !pmb->u.mb.mbxStatus) {
2464 rpi = pmb->u.mb.un.varWords[0];
2465 vpi = pmb->u.mb.un.varRegLogin.vpi;
2466 lpfc_unreg_login(phba, vpi, rpi, pmb);
2468 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2470 if (rc != MBX_NOT_FINISHED)
2474 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2475 !(phba->pport->load_flag & FC_UNLOADING) &&
2476 !pmb->u.mb.mbxStatus) {
2477 shost = lpfc_shost_from_vport(vport);
2478 spin_lock_irq(shost->host_lock);
2479 vport->vpi_state |= LPFC_VPI_REGISTERED;
2480 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2481 spin_unlock_irq(shost->host_lock);
2484 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2487 pmb->ctx_buf = NULL;
2488 pmb->ctx_ndlp = NULL;
2491 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2492 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2494 /* Check to see if there are any deferred events to process */
2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2499 "1438 UNREG cmpl deferred mbox x%x "
2500 "on NPort x%x Data: x%x x%x %p\n",
2501 ndlp->nlp_rpi, ndlp->nlp_DID,
2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2504 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2505 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2506 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2510 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2512 pmb->ctx_ndlp = NULL;
2516 /* Check security permission status on INIT_LINK mailbox command */
2517 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2518 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2520 "2860 SLI authentication is required "
2521 "for INIT_LINK but has not done yet\n");
2523 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2524 lpfc_sli4_mbox_cmd_free(phba, pmb);
2526 mempool_free(pmb, phba->mbox_mem_pool);
2529 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2530 * @phba: Pointer to HBA context object.
2531 * @pmb: Pointer to mailbox object.
2533 * This function is the unreg rpi mailbox completion handler. It
2534 * frees the memory resources associated with the completed mailbox
2535 * command. An additional refrenece is put on the ndlp to prevent
2536 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2537 * the unreg mailbox command completes, this routine puts the
2542 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2544 struct lpfc_vport *vport = pmb->vport;
2545 struct lpfc_nodelist *ndlp;
2547 ndlp = pmb->ctx_ndlp;
2548 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2549 if (phba->sli_rev == LPFC_SLI_REV4 &&
2550 (bf_get(lpfc_sli_intf_if_type,
2551 &phba->sli4_hba.sli_intf) >=
2552 LPFC_SLI_INTF_IF_TYPE_2)) {
2555 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2556 "0010 UNREG_LOGIN vpi:%x "
2557 "rpi:%x DID:%x defer x%x flg x%x "
2559 vport->vpi, ndlp->nlp_rpi,
2560 ndlp->nlp_DID, ndlp->nlp_defer_did,
2562 ndlp->nlp_usg_map, ndlp);
2563 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2566 /* Check to see if there are any deferred
2569 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2570 (ndlp->nlp_defer_did !=
2571 NLP_EVT_NOTHING_PENDING)) {
2573 vport, KERN_INFO, LOG_DISCOVERY,
2574 "4111 UNREG cmpl deferred "
2576 "NPort x%x Data: x%x %p\n",
2577 ndlp->nlp_rpi, ndlp->nlp_DID,
2578 ndlp->nlp_defer_did, ndlp);
2579 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2580 ndlp->nlp_defer_did =
2581 NLP_EVT_NOTHING_PENDING;
2582 lpfc_issue_els_plogi(
2583 vport, ndlp->nlp_DID, 0);
2585 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2591 mempool_free(pmb, phba->mbox_mem_pool);
2595 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2596 * @phba: Pointer to HBA context object.
2598 * This function is called with no lock held. This function processes all
2599 * the completed mailbox commands and gives it to upper layers. The interrupt
2600 * service routine processes mailbox completion interrupt and adds completed
2601 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2602 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2603 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2604 * function returns the mailbox commands to the upper layer by calling the
2605 * completion handler function of each mailbox.
2608 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2615 phba->sli.slistat.mbox_event++;
2617 /* Get all completed mailboxe buffers into the cmplq */
2618 spin_lock_irq(&phba->hbalock);
2619 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2620 spin_unlock_irq(&phba->hbalock);
2622 /* Get a Mailbox buffer to setup mailbox commands for callback */
2624 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2630 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2632 lpfc_debugfs_disc_trc(pmb->vport,
2633 LPFC_DISC_TRC_MBOX_VPORT,
2634 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2635 (uint32_t)pmbox->mbxCommand,
2636 pmbox->un.varWords[0],
2637 pmbox->un.varWords[1]);
2640 lpfc_debugfs_disc_trc(phba->pport,
2642 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2643 (uint32_t)pmbox->mbxCommand,
2644 pmbox->un.varWords[0],
2645 pmbox->un.varWords[1]);
2650 * It is a fatal error if unknown mbox command completion.
2652 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2654 /* Unknown mailbox command compl */
2655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2656 "(%d):0323 Unknown Mailbox command "
2657 "x%x (x%x/x%x) Cmpl\n",
2658 pmb->vport ? pmb->vport->vpi : 0,
2660 lpfc_sli_config_mbox_subsys_get(phba,
2662 lpfc_sli_config_mbox_opcode_get(phba,
2664 phba->link_state = LPFC_HBA_ERROR;
2665 phba->work_hs = HS_FFER3;
2666 lpfc_handle_eratt(phba);
2670 if (pmbox->mbxStatus) {
2671 phba->sli.slistat.mbox_stat_err++;
2672 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2673 /* Mbox cmd cmpl error - RETRYing */
2674 lpfc_printf_log(phba, KERN_INFO,
2676 "(%d):0305 Mbox cmd cmpl "
2677 "error - RETRYing Data: x%x "
2678 "(x%x/x%x) x%x x%x x%x\n",
2679 pmb->vport ? pmb->vport->vpi : 0,
2681 lpfc_sli_config_mbox_subsys_get(phba,
2683 lpfc_sli_config_mbox_opcode_get(phba,
2686 pmbox->un.varWords[0],
2687 pmb->vport->port_state);
2688 pmbox->mbxStatus = 0;
2689 pmbox->mbxOwner = OWN_HOST;
2690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2691 if (rc != MBX_NOT_FINISHED)
2696 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2698 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2701 pmb->vport ? pmb->vport->vpi : 0,
2703 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2704 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2706 *((uint32_t *) pmbox),
2707 pmbox->un.varWords[0],
2708 pmbox->un.varWords[1],
2709 pmbox->un.varWords[2],
2710 pmbox->un.varWords[3],
2711 pmbox->un.varWords[4],
2712 pmbox->un.varWords[5],
2713 pmbox->un.varWords[6],
2714 pmbox->un.varWords[7],
2715 pmbox->un.varWords[8],
2716 pmbox->un.varWords[9],
2717 pmbox->un.varWords[10]);
2720 pmb->mbox_cmpl(phba,pmb);
2726 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2727 * @phba: Pointer to HBA context object.
2728 * @pring: Pointer to driver SLI ring object.
2731 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2732 * is set in the tag the buffer is posted for a particular exchange,
2733 * the function will return the buffer without replacing the buffer.
2734 * If the buffer is for unsolicited ELS or CT traffic, this function
2735 * returns the buffer and also posts another buffer to the firmware.
2737 static struct lpfc_dmabuf *
2738 lpfc_sli_get_buff(struct lpfc_hba *phba,
2739 struct lpfc_sli_ring *pring,
2742 struct hbq_dmabuf *hbq_entry;
2744 if (tag & QUE_BUFTAG_BIT)
2745 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2746 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2749 return &hbq_entry->dbuf;
2753 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2754 * @phba: Pointer to HBA context object.
2755 * @pring: Pointer to driver SLI ring object.
2756 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2757 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2758 * @fch_type: the type for the first frame of the sequence.
2760 * This function is called with no lock held. This function uses the r_ctl and
2761 * type of the received sequence to find the correct callback function to call
2762 * to process the sequence.
2765 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2766 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2773 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2779 /* unSolicited Responses */
2780 if (pring->prt[0].profile) {
2781 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2782 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2786 /* We must search, based on rctl / type
2787 for the right routine */
2788 for (i = 0; i < pring->num_mask; i++) {
2789 if ((pring->prt[i].rctl == fch_r_ctl) &&
2790 (pring->prt[i].type == fch_type)) {
2791 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2792 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2793 (phba, pring, saveq);
2801 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2802 * @phba: Pointer to HBA context object.
2803 * @pring: Pointer to driver SLI ring object.
2804 * @saveq: Pointer to the unsolicited iocb.
2806 * This function is called with no lock held by the ring event handler
2807 * when there is an unsolicited iocb posted to the response ring by the
2808 * firmware. This function gets the buffer associated with the iocbs
2809 * and calls the event handler for the ring. This function handles both
2810 * qring buffers and hbq buffers.
2811 * When the function returns 1 the caller can free the iocb object otherwise
2812 * upper layer functions will free the iocb objects.
2815 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2816 struct lpfc_iocbq *saveq)
2820 uint32_t Rctl, Type;
2821 struct lpfc_iocbq *iocbq;
2822 struct lpfc_dmabuf *dmzbuf;
2824 irsp = &(saveq->iocb);
2826 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2827 if (pring->lpfc_sli_rcv_async_status)
2828 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2830 lpfc_printf_log(phba,
2833 "0316 Ring %d handler: unexpected "
2834 "ASYNC_STATUS iocb received evt_code "
2837 irsp->un.asyncstat.evt_code);
2841 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2842 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2843 if (irsp->ulpBdeCount > 0) {
2844 dmzbuf = lpfc_sli_get_buff(phba, pring,
2845 irsp->un.ulpWord[3]);
2846 lpfc_in_buf_free(phba, dmzbuf);
2849 if (irsp->ulpBdeCount > 1) {
2850 dmzbuf = lpfc_sli_get_buff(phba, pring,
2851 irsp->unsli3.sli3Words[3]);
2852 lpfc_in_buf_free(phba, dmzbuf);
2855 if (irsp->ulpBdeCount > 2) {
2856 dmzbuf = lpfc_sli_get_buff(phba, pring,
2857 irsp->unsli3.sli3Words[7]);
2858 lpfc_in_buf_free(phba, dmzbuf);
2864 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2865 if (irsp->ulpBdeCount != 0) {
2866 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2867 irsp->un.ulpWord[3]);
2868 if (!saveq->context2)
2869 lpfc_printf_log(phba,
2872 "0341 Ring %d Cannot find buffer for "
2873 "an unsolicited iocb. tag 0x%x\n",
2875 irsp->un.ulpWord[3]);
2877 if (irsp->ulpBdeCount == 2) {
2878 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2879 irsp->unsli3.sli3Words[7]);
2880 if (!saveq->context3)
2881 lpfc_printf_log(phba,
2884 "0342 Ring %d Cannot find buffer for an"
2885 " unsolicited iocb. tag 0x%x\n",
2887 irsp->unsli3.sli3Words[7]);
2889 list_for_each_entry(iocbq, &saveq->list, list) {
2890 irsp = &(iocbq->iocb);
2891 if (irsp->ulpBdeCount != 0) {
2892 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2893 irsp->un.ulpWord[3]);
2894 if (!iocbq->context2)
2895 lpfc_printf_log(phba,
2898 "0343 Ring %d Cannot find "
2899 "buffer for an unsolicited iocb"
2900 ". tag 0x%x\n", pring->ringno,
2901 irsp->un.ulpWord[3]);
2903 if (irsp->ulpBdeCount == 2) {
2904 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2905 irsp->unsli3.sli3Words[7]);
2906 if (!iocbq->context3)
2907 lpfc_printf_log(phba,
2910 "0344 Ring %d Cannot find "
2911 "buffer for an unsolicited "
2914 irsp->unsli3.sli3Words[7]);
2918 if (irsp->ulpBdeCount != 0 &&
2919 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2920 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2923 /* search continue save q for same XRI */
2924 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2925 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2926 saveq->iocb.unsli3.rcvsli3.ox_id) {
2927 list_add_tail(&saveq->list, &iocbq->list);
2933 list_add_tail(&saveq->clist,
2934 &pring->iocb_continue_saveq);
2935 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2936 list_del_init(&iocbq->clist);
2938 irsp = &(saveq->iocb);
2942 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2943 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2944 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2945 Rctl = FC_RCTL_ELS_REQ;
2948 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2949 Rctl = w5p->hcsw.Rctl;
2950 Type = w5p->hcsw.Type;
2952 /* Firmware Workaround */
2953 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2954 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2955 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2956 Rctl = FC_RCTL_ELS_REQ;
2958 w5p->hcsw.Rctl = Rctl;
2959 w5p->hcsw.Type = Type;
2963 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2965 "0313 Ring %d handler: unexpected Rctl x%x "
2966 "Type x%x received\n",
2967 pring->ringno, Rctl, Type);
2973 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2974 * @phba: Pointer to HBA context object.
2975 * @pring: Pointer to driver SLI ring object.
2976 * @prspiocb: Pointer to response iocb object.
2978 * This function looks up the iocb_lookup table to get the command iocb
2979 * corresponding to the given response iocb using the iotag of the
2980 * response iocb. The driver calls this function with the hbalock held
2981 * for SLI3 ports or the ring lock held for SLI4 ports.
2982 * This function returns the command iocb object if it finds the command
2983 * iocb else returns NULL.
2985 static struct lpfc_iocbq *
2986 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2987 struct lpfc_sli_ring *pring,
2988 struct lpfc_iocbq *prspiocb)
2990 struct lpfc_iocbq *cmd_iocb = NULL;
2992 spinlock_t *temp_lock = NULL;
2993 unsigned long iflag = 0;
2995 if (phba->sli_rev == LPFC_SLI_REV4)
2996 temp_lock = &pring->ring_lock;
2998 temp_lock = &phba->hbalock;
3000 spin_lock_irqsave(temp_lock, iflag);
3001 iotag = prspiocb->iocb.ulpIoTag;
3003 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3004 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3005 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3006 /* remove from txcmpl queue list */
3007 list_del_init(&cmd_iocb->list);
3008 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3009 pring->txcmplq_cnt--;
3010 spin_unlock_irqrestore(temp_lock, iflag);
3015 spin_unlock_irqrestore(temp_lock, iflag);
3016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3017 "0317 iotag x%x is out of "
3018 "range: max iotag x%x wd0 x%x\n",
3019 iotag, phba->sli.last_iotag,
3020 *(((uint32_t *) &prspiocb->iocb) + 7));
3025 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3026 * @phba: Pointer to HBA context object.
3027 * @pring: Pointer to driver SLI ring object.
3030 * This function looks up the iocb_lookup table to get the command iocb
3031 * corresponding to the given iotag. The driver calls this function with
3032 * the ring lock held because this function is an SLI4 port only helper.
3033 * This function returns the command iocb object if it finds the command
3034 * iocb else returns NULL.
3036 static struct lpfc_iocbq *
3037 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3038 struct lpfc_sli_ring *pring, uint16_t iotag)
3040 struct lpfc_iocbq *cmd_iocb = NULL;
3041 spinlock_t *temp_lock = NULL;
3042 unsigned long iflag = 0;
3044 if (phba->sli_rev == LPFC_SLI_REV4)
3045 temp_lock = &pring->ring_lock;
3047 temp_lock = &phba->hbalock;
3049 spin_lock_irqsave(temp_lock, iflag);
3050 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3051 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3052 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3053 /* remove from txcmpl queue list */
3054 list_del_init(&cmd_iocb->list);
3055 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3056 pring->txcmplq_cnt--;
3057 spin_unlock_irqrestore(temp_lock, iflag);
3062 spin_unlock_irqrestore(temp_lock, iflag);
3063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3064 "0372 iotag x%x lookup error: max iotag (x%x) "
3066 iotag, phba->sli.last_iotag,
3067 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3072 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3073 * @phba: Pointer to HBA context object.
3074 * @pring: Pointer to driver SLI ring object.
3075 * @saveq: Pointer to the response iocb to be processed.
3077 * This function is called by the ring event handler for non-fcp
3078 * rings when there is a new response iocb in the response ring.
3079 * The caller is not required to hold any locks. This function
3080 * gets the command iocb associated with the response iocb and
3081 * calls the completion handler for the command iocb. If there
3082 * is no completion handler, the function will free the resources
3083 * associated with command iocb. If the response iocb is for
3084 * an already aborted command iocb, the status of the completion
3085 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3086 * This function always returns 1.
3089 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3090 struct lpfc_iocbq *saveq)
3092 struct lpfc_iocbq *cmdiocbp;
3094 unsigned long iflag;
3096 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3098 if (cmdiocbp->iocb_cmpl) {
3100 * If an ELS command failed send an event to mgmt
3103 if (saveq->iocb.ulpStatus &&
3104 (pring->ringno == LPFC_ELS_RING) &&
3105 (cmdiocbp->iocb.ulpCommand ==
3106 CMD_ELS_REQUEST64_CR))
3107 lpfc_send_els_failure_event(phba,
3111 * Post all ELS completions to the worker thread.
3112 * All other are passed to the completion callback.
3114 if (pring->ringno == LPFC_ELS_RING) {
3115 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3116 (cmdiocbp->iocb_flag &
3117 LPFC_DRIVER_ABORTED)) {
3118 spin_lock_irqsave(&phba->hbalock,
3120 cmdiocbp->iocb_flag &=
3121 ~LPFC_DRIVER_ABORTED;
3122 spin_unlock_irqrestore(&phba->hbalock,
3124 saveq->iocb.ulpStatus =
3125 IOSTAT_LOCAL_REJECT;
3126 saveq->iocb.un.ulpWord[4] =
3129 /* Firmware could still be in progress
3130 * of DMAing payload, so don't free data
3131 * buffer till after a hbeat.
3133 spin_lock_irqsave(&phba->hbalock,
3135 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3136 spin_unlock_irqrestore(&phba->hbalock,
3139 if (phba->sli_rev == LPFC_SLI_REV4) {
3140 if (saveq->iocb_flag &
3141 LPFC_EXCHANGE_BUSY) {
3142 /* Set cmdiocb flag for the
3143 * exchange busy so sgl (xri)
3144 * will not be released until
3145 * the abort xri is received
3149 &phba->hbalock, iflag);
3150 cmdiocbp->iocb_flag |=
3152 spin_unlock_irqrestore(
3153 &phba->hbalock, iflag);
3155 if (cmdiocbp->iocb_flag &
3156 LPFC_DRIVER_ABORTED) {
3158 * Clear LPFC_DRIVER_ABORTED
3159 * bit in case it was driver
3163 &phba->hbalock, iflag);
3164 cmdiocbp->iocb_flag &=
3165 ~LPFC_DRIVER_ABORTED;
3166 spin_unlock_irqrestore(
3167 &phba->hbalock, iflag);
3168 cmdiocbp->iocb.ulpStatus =
3169 IOSTAT_LOCAL_REJECT;
3170 cmdiocbp->iocb.un.ulpWord[4] =
3171 IOERR_ABORT_REQUESTED;
3173 * For SLI4, irsiocb contains
3174 * NO_XRI in sli_xritag, it
3175 * shall not affect releasing
3176 * sgl (xri) process.
3178 saveq->iocb.ulpStatus =
3179 IOSTAT_LOCAL_REJECT;
3180 saveq->iocb.un.ulpWord[4] =
3183 &phba->hbalock, iflag);
3185 LPFC_DELAY_MEM_FREE;
3186 spin_unlock_irqrestore(
3187 &phba->hbalock, iflag);
3191 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3193 lpfc_sli_release_iocbq(phba, cmdiocbp);
3196 * Unknown initiating command based on the response iotag.
3197 * This could be the case on the ELS ring because of
3200 if (pring->ringno != LPFC_ELS_RING) {
3202 * Ring <ringno> handler: unexpected completion IoTag
3205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3206 "0322 Ring %d handler: "
3207 "unexpected completion IoTag x%x "
3208 "Data: x%x x%x x%x x%x\n",
3210 saveq->iocb.ulpIoTag,
3211 saveq->iocb.ulpStatus,
3212 saveq->iocb.un.ulpWord[4],
3213 saveq->iocb.ulpCommand,
3214 saveq->iocb.ulpContext);
3222 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3223 * @phba: Pointer to HBA context object.
3224 * @pring: Pointer to driver SLI ring object.
3226 * This function is called from the iocb ring event handlers when
3227 * put pointer is ahead of the get pointer for a ring. This function signal
3228 * an error attention condition to the worker thread and the worker
3229 * thread will transition the HBA to offline state.
3232 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3234 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3236 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3237 * rsp ring <portRspMax>
3239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3240 "0312 Ring %d handler: portRspPut %d "
3241 "is bigger than rsp ring %d\n",
3242 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3243 pring->sli.sli3.numRiocb);
3245 phba->link_state = LPFC_HBA_ERROR;
3248 * All error attention handlers are posted to
3251 phba->work_ha |= HA_ERATT;
3252 phba->work_hs = HS_FFER3;
3254 lpfc_worker_wake_up(phba);
3260 * lpfc_poll_eratt - Error attention polling timer timeout handler
3261 * @ptr: Pointer to address of HBA context object.
3263 * This function is invoked by the Error Attention polling timer when the
3264 * timer times out. It will check the SLI Error Attention register for
3265 * possible attention events. If so, it will post an Error Attention event
3266 * and wake up worker thread to process it. Otherwise, it will set up the
3267 * Error Attention polling timer for the next poll.
3269 void lpfc_poll_eratt(struct timer_list *t)
3271 struct lpfc_hba *phba;
3273 uint64_t sli_intr, cnt;
3275 phba = from_timer(phba, t, eratt_poll);
3277 /* Here we will also keep track of interrupts per sec of the hba */
3278 sli_intr = phba->sli.slistat.sli_intr;
3280 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3281 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3284 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3286 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3287 do_div(cnt, phba->eratt_poll_interval);
3288 phba->sli.slistat.sli_ips = cnt;
3290 phba->sli.slistat.sli_prev_intr = sli_intr;
3292 /* Check chip HA register for error event */
3293 eratt = lpfc_sli_check_eratt(phba);
3296 /* Tell the worker thread there is work to do */
3297 lpfc_worker_wake_up(phba);
3299 /* Restart the timer for next eratt poll */
3300 mod_timer(&phba->eratt_poll,
3302 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3308 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3309 * @phba: Pointer to HBA context object.
3310 * @pring: Pointer to driver SLI ring object.
3311 * @mask: Host attention register mask for this ring.
3313 * This function is called from the interrupt context when there is a ring
3314 * event for the fcp ring. The caller does not hold any lock.
3315 * The function processes each response iocb in the response ring until it
3316 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3317 * LE bit set. The function will call the completion handler of the command iocb
3318 * if the response iocb indicates a completion for a command iocb or it is
3319 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3320 * function if this is an unsolicited iocb.
3321 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3322 * to check it explicitly.
3325 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3326 struct lpfc_sli_ring *pring, uint32_t mask)
3328 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3329 IOCB_t *irsp = NULL;
3330 IOCB_t *entry = NULL;
3331 struct lpfc_iocbq *cmdiocbq = NULL;
3332 struct lpfc_iocbq rspiocbq;
3334 uint32_t portRspPut, portRspMax;
3336 lpfc_iocb_type type;
3337 unsigned long iflag;
3338 uint32_t rsp_cmpl = 0;
3340 spin_lock_irqsave(&phba->hbalock, iflag);
3341 pring->stats.iocb_event++;
3344 * The next available response entry should never exceed the maximum
3345 * entries. If it does, treat it as an adapter hardware error.
3347 portRspMax = pring->sli.sli3.numRiocb;
3348 portRspPut = le32_to_cpu(pgp->rspPutInx);
3349 if (unlikely(portRspPut >= portRspMax)) {
3350 lpfc_sli_rsp_pointers_error(phba, pring);
3351 spin_unlock_irqrestore(&phba->hbalock, iflag);
3354 if (phba->fcp_ring_in_use) {
3355 spin_unlock_irqrestore(&phba->hbalock, iflag);
3358 phba->fcp_ring_in_use = 1;
3361 while (pring->sli.sli3.rspidx != portRspPut) {
3363 * Fetch an entry off the ring and copy it into a local data
3364 * structure. The copy involves a byte-swap since the
3365 * network byte order and pci byte orders are different.
3367 entry = lpfc_resp_iocb(phba, pring);
3368 phba->last_completion_time = jiffies;
3370 if (++pring->sli.sli3.rspidx >= portRspMax)
3371 pring->sli.sli3.rspidx = 0;
3373 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3374 (uint32_t *) &rspiocbq.iocb,
3375 phba->iocb_rsp_size);
3376 INIT_LIST_HEAD(&(rspiocbq.list));
3377 irsp = &rspiocbq.iocb;
3379 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3380 pring->stats.iocb_rsp++;
3383 if (unlikely(irsp->ulpStatus)) {
3385 * If resource errors reported from HBA, reduce
3386 * queuedepths of the SCSI device.
3388 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3389 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3390 IOERR_NO_RESOURCES)) {
3391 spin_unlock_irqrestore(&phba->hbalock, iflag);
3392 phba->lpfc_rampdown_queue_depth(phba);
3393 spin_lock_irqsave(&phba->hbalock, iflag);
3396 /* Rsp ring <ringno> error: IOCB */
3397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3398 "0336 Rsp Ring %d error: IOCB Data: "
3399 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3401 irsp->un.ulpWord[0],
3402 irsp->un.ulpWord[1],
3403 irsp->un.ulpWord[2],
3404 irsp->un.ulpWord[3],
3405 irsp->un.ulpWord[4],
3406 irsp->un.ulpWord[5],
3407 *(uint32_t *)&irsp->un1,
3408 *((uint32_t *)&irsp->un1 + 1));
3412 case LPFC_ABORT_IOCB:
3415 * Idle exchange closed via ABTS from port. No iocb
3416 * resources need to be recovered.
3418 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3420 "0333 IOCB cmd 0x%x"
3421 " processed. Skipping"
3427 spin_unlock_irqrestore(&phba->hbalock, iflag);
3428 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3430 spin_lock_irqsave(&phba->hbalock, iflag);
3431 if (unlikely(!cmdiocbq))
3433 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3434 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3435 if (cmdiocbq->iocb_cmpl) {
3436 spin_unlock_irqrestore(&phba->hbalock, iflag);
3437 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3439 spin_lock_irqsave(&phba->hbalock, iflag);
3442 case LPFC_UNSOL_IOCB:
3443 spin_unlock_irqrestore(&phba->hbalock, iflag);
3444 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3445 spin_lock_irqsave(&phba->hbalock, iflag);
3448 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3449 char adaptermsg[LPFC_MAX_ADPTMSG];
3450 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3451 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3453 dev_warn(&((phba->pcidev)->dev),
3455 phba->brd_no, adaptermsg);
3457 /* Unknown IOCB command */
3458 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3459 "0334 Unknown IOCB command "
3460 "Data: x%x, x%x x%x x%x x%x\n",
3461 type, irsp->ulpCommand,
3470 * The response IOCB has been processed. Update the ring
3471 * pointer in SLIM. If the port response put pointer has not
3472 * been updated, sync the pgp->rspPutInx and fetch the new port
3473 * response put pointer.
3475 writel(pring->sli.sli3.rspidx,
3476 &phba->host_gp[pring->ringno].rspGetInx);
3478 if (pring->sli.sli3.rspidx == portRspPut)
3479 portRspPut = le32_to_cpu(pgp->rspPutInx);
3482 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3483 pring->stats.iocb_rsp_full++;
3484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3485 writel(status, phba->CAregaddr);
3486 readl(phba->CAregaddr);
3488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3490 pring->stats.iocb_cmd_empty++;
3492 /* Force update of the local copy of cmdGetInx */
3493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3494 lpfc_sli_resume_iocb(phba, pring);
3496 if ((pring->lpfc_sli_cmd_available))
3497 (pring->lpfc_sli_cmd_available) (phba, pring);
3501 phba->fcp_ring_in_use = 0;
3502 spin_unlock_irqrestore(&phba->hbalock, iflag);
3507 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3508 * @phba: Pointer to HBA context object.
3509 * @pring: Pointer to driver SLI ring object.
3510 * @rspiocbp: Pointer to driver response IOCB object.
3512 * This function is called from the worker thread when there is a slow-path
3513 * response IOCB to process. This function chains all the response iocbs until
3514 * seeing the iocb with the LE bit set. The function will call
3515 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3516 * completion of a command iocb. The function will call the
3517 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3518 * The function frees the resources or calls the completion handler if this
3519 * iocb is an abort completion. The function returns NULL when the response
3520 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3521 * this function shall chain the iocb on to the iocb_continueq and return the
3522 * response iocb passed in.
3524 static struct lpfc_iocbq *
3525 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3526 struct lpfc_iocbq *rspiocbp)
3528 struct lpfc_iocbq *saveq;
3529 struct lpfc_iocbq *cmdiocbp;
3530 struct lpfc_iocbq *next_iocb;
3531 IOCB_t *irsp = NULL;
3532 uint32_t free_saveq;
3533 uint8_t iocb_cmd_type;
3534 lpfc_iocb_type type;
3535 unsigned long iflag;
3538 spin_lock_irqsave(&phba->hbalock, iflag);
3539 /* First add the response iocb to the countinueq list */
3540 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3541 pring->iocb_continueq_cnt++;
3543 /* Now, determine whether the list is completed for processing */
3544 irsp = &rspiocbp->iocb;
3547 * By default, the driver expects to free all resources
3548 * associated with this iocb completion.
3551 saveq = list_get_first(&pring->iocb_continueq,
3552 struct lpfc_iocbq, list);
3553 irsp = &(saveq->iocb);
3554 list_del_init(&pring->iocb_continueq);
3555 pring->iocb_continueq_cnt = 0;
3557 pring->stats.iocb_rsp++;
3560 * If resource errors reported from HBA, reduce
3561 * queuedepths of the SCSI device.
3563 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3564 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3565 IOERR_NO_RESOURCES)) {
3566 spin_unlock_irqrestore(&phba->hbalock, iflag);
3567 phba->lpfc_rampdown_queue_depth(phba);
3568 spin_lock_irqsave(&phba->hbalock, iflag);
3571 if (irsp->ulpStatus) {
3572 /* Rsp ring <ringno> error: IOCB */
3573 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3574 "0328 Rsp Ring %d error: "
3579 "x%x x%x x%x x%x\n",
3581 irsp->un.ulpWord[0],
3582 irsp->un.ulpWord[1],
3583 irsp->un.ulpWord[2],
3584 irsp->un.ulpWord[3],
3585 irsp->un.ulpWord[4],
3586 irsp->un.ulpWord[5],
3587 *(((uint32_t *) irsp) + 6),
3588 *(((uint32_t *) irsp) + 7),
3589 *(((uint32_t *) irsp) + 8),
3590 *(((uint32_t *) irsp) + 9),
3591 *(((uint32_t *) irsp) + 10),
3592 *(((uint32_t *) irsp) + 11),
3593 *(((uint32_t *) irsp) + 12),
3594 *(((uint32_t *) irsp) + 13),
3595 *(((uint32_t *) irsp) + 14),
3596 *(((uint32_t *) irsp) + 15));
3600 * Fetch the IOCB command type and call the correct completion
3601 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3602 * get freed back to the lpfc_iocb_list by the discovery
3605 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3606 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3609 spin_unlock_irqrestore(&phba->hbalock, iflag);
3610 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3611 spin_lock_irqsave(&phba->hbalock, iflag);
3614 case LPFC_UNSOL_IOCB:
3615 spin_unlock_irqrestore(&phba->hbalock, iflag);
3616 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3617 spin_lock_irqsave(&phba->hbalock, iflag);
3622 case LPFC_ABORT_IOCB:
3624 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3625 spin_unlock_irqrestore(&phba->hbalock, iflag);
3626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3628 spin_lock_irqsave(&phba->hbalock, iflag);
3631 /* Call the specified completion routine */
3632 if (cmdiocbp->iocb_cmpl) {
3633 spin_unlock_irqrestore(&phba->hbalock,
3635 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3637 spin_lock_irqsave(&phba->hbalock,
3640 __lpfc_sli_release_iocbq(phba,
3645 case LPFC_UNKNOWN_IOCB:
3646 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3647 char adaptermsg[LPFC_MAX_ADPTMSG];
3648 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3649 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3651 dev_warn(&((phba->pcidev)->dev),
3653 phba->brd_no, adaptermsg);
3655 /* Unknown IOCB command */
3656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3657 "0335 Unknown IOCB "
3658 "command Data: x%x "
3669 list_for_each_entry_safe(rspiocbp, next_iocb,
3670 &saveq->list, list) {
3671 list_del_init(&rspiocbp->list);
3672 __lpfc_sli_release_iocbq(phba, rspiocbp);
3674 __lpfc_sli_release_iocbq(phba, saveq);
3678 spin_unlock_irqrestore(&phba->hbalock, iflag);
3683 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3684 * @phba: Pointer to HBA context object.
3685 * @pring: Pointer to driver SLI ring object.
3686 * @mask: Host attention register mask for this ring.
3688 * This routine wraps the actual slow_ring event process routine from the
3689 * API jump table function pointer from the lpfc_hba struct.
3692 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3693 struct lpfc_sli_ring *pring, uint32_t mask)
3695 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3699 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3700 * @phba: Pointer to HBA context object.
3701 * @pring: Pointer to driver SLI ring object.
3702 * @mask: Host attention register mask for this ring.
3704 * This function is called from the worker thread when there is a ring event
3705 * for non-fcp rings. The caller does not hold any lock. The function will
3706 * remove each response iocb in the response ring and calls the handle
3707 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3710 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3711 struct lpfc_sli_ring *pring, uint32_t mask)
3713 struct lpfc_pgp *pgp;
3715 IOCB_t *irsp = NULL;
3716 struct lpfc_iocbq *rspiocbp = NULL;
3717 uint32_t portRspPut, portRspMax;
3718 unsigned long iflag;
3721 pgp = &phba->port_gp[pring->ringno];
3722 spin_lock_irqsave(&phba->hbalock, iflag);
3723 pring->stats.iocb_event++;
3726 * The next available response entry should never exceed the maximum
3727 * entries. If it does, treat it as an adapter hardware error.
3729 portRspMax = pring->sli.sli3.numRiocb;
3730 portRspPut = le32_to_cpu(pgp->rspPutInx);
3731 if (portRspPut >= portRspMax) {
3733 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3734 * rsp ring <portRspMax>
3736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3737 "0303 Ring %d handler: portRspPut %d "
3738 "is bigger than rsp ring %d\n",
3739 pring->ringno, portRspPut, portRspMax);
3741 phba->link_state = LPFC_HBA_ERROR;
3742 spin_unlock_irqrestore(&phba->hbalock, iflag);
3744 phba->work_hs = HS_FFER3;
3745 lpfc_handle_eratt(phba);
3751 while (pring->sli.sli3.rspidx != portRspPut) {
3753 * Build a completion list and call the appropriate handler.
3754 * The process is to get the next available response iocb, get
3755 * a free iocb from the list, copy the response data into the
3756 * free iocb, insert to the continuation list, and update the
3757 * next response index to slim. This process makes response
3758 * iocb's in the ring available to DMA as fast as possible but
3759 * pays a penalty for a copy operation. Since the iocb is
3760 * only 32 bytes, this penalty is considered small relative to
3761 * the PCI reads for register values and a slim write. When
3762 * the ulpLe field is set, the entire Command has been
3765 entry = lpfc_resp_iocb(phba, pring);
3767 phba->last_completion_time = jiffies;
3768 rspiocbp = __lpfc_sli_get_iocbq(phba);
3769 if (rspiocbp == NULL) {
3770 printk(KERN_ERR "%s: out of buffers! Failing "
3771 "completion.\n", __func__);
3775 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3776 phba->iocb_rsp_size);
3777 irsp = &rspiocbp->iocb;
3779 if (++pring->sli.sli3.rspidx >= portRspMax)
3780 pring->sli.sli3.rspidx = 0;
3782 if (pring->ringno == LPFC_ELS_RING) {
3783 lpfc_debugfs_slow_ring_trc(phba,
3784 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3785 *(((uint32_t *) irsp) + 4),
3786 *(((uint32_t *) irsp) + 6),
3787 *(((uint32_t *) irsp) + 7));
3790 writel(pring->sli.sli3.rspidx,
3791 &phba->host_gp[pring->ringno].rspGetInx);
3793 spin_unlock_irqrestore(&phba->hbalock, iflag);
3794 /* Handle the response IOCB */
3795 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3796 spin_lock_irqsave(&phba->hbalock, iflag);
3799 * If the port response put pointer has not been updated, sync
3800 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3801 * response put pointer.
3803 if (pring->sli.sli3.rspidx == portRspPut) {
3804 portRspPut = le32_to_cpu(pgp->rspPutInx);
3806 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3808 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3809 /* At least one response entry has been freed */
3810 pring->stats.iocb_rsp_full++;
3811 /* SET RxRE_RSP in Chip Att register */
3812 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3813 writel(status, phba->CAregaddr);
3814 readl(phba->CAregaddr); /* flush */
3816 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3817 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3818 pring->stats.iocb_cmd_empty++;
3820 /* Force update of the local copy of cmdGetInx */
3821 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3822 lpfc_sli_resume_iocb(phba, pring);
3824 if ((pring->lpfc_sli_cmd_available))
3825 (pring->lpfc_sli_cmd_available) (phba, pring);
3829 spin_unlock_irqrestore(&phba->hbalock, iflag);
3834 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3835 * @phba: Pointer to HBA context object.
3836 * @pring: Pointer to driver SLI ring object.
3837 * @mask: Host attention register mask for this ring.
3839 * This function is called from the worker thread when there is a pending
3840 * ELS response iocb on the driver internal slow-path response iocb worker
3841 * queue. The caller does not hold any lock. The function will remove each
3842 * response iocb from the response worker queue and calls the handle
3843 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3846 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3847 struct lpfc_sli_ring *pring, uint32_t mask)
3849 struct lpfc_iocbq *irspiocbq;
3850 struct hbq_dmabuf *dmabuf;
3851 struct lpfc_cq_event *cq_event;
3852 unsigned long iflag;
3855 spin_lock_irqsave(&phba->hbalock, iflag);
3856 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3857 spin_unlock_irqrestore(&phba->hbalock, iflag);
3858 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3859 /* Get the response iocb from the head of work queue */
3860 spin_lock_irqsave(&phba->hbalock, iflag);
3861 list_remove_head(&phba->sli4_hba.sp_queue_event,
3862 cq_event, struct lpfc_cq_event, list);
3863 spin_unlock_irqrestore(&phba->hbalock, iflag);
3865 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3866 case CQE_CODE_COMPL_WQE:
3867 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3869 /* Translate ELS WCQE to response IOCBQ */
3870 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3873 lpfc_sli_sp_handle_rspiocb(phba, pring,
3877 case CQE_CODE_RECEIVE:
3878 case CQE_CODE_RECEIVE_V1:
3879 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3881 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3888 /* Limit the number of events to 64 to avoid soft lockups */
3895 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3896 * @phba: Pointer to HBA context object.
3897 * @pring: Pointer to driver SLI ring object.
3899 * This function aborts all iocbs in the given ring and frees all the iocb
3900 * objects in txq. This function issues an abort iocb for all the iocb commands
3901 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3902 * the return of this function. The caller is not required to hold any locks.
3905 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3907 LIST_HEAD(completions);
3908 struct lpfc_iocbq *iocb, *next_iocb;
3910 if (pring->ringno == LPFC_ELS_RING) {
3911 lpfc_fabric_abort_hba(phba);
3914 /* Error everything on txq and txcmplq
3917 if (phba->sli_rev >= LPFC_SLI_REV4) {
3918 spin_lock_irq(&pring->ring_lock);
3919 list_splice_init(&pring->txq, &completions);
3921 spin_unlock_irq(&pring->ring_lock);
3923 spin_lock_irq(&phba->hbalock);
3924 /* Next issue ABTS for everything on the txcmplq */
3925 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3926 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3927 spin_unlock_irq(&phba->hbalock);
3929 spin_lock_irq(&phba->hbalock);
3930 list_splice_init(&pring->txq, &completions);
3933 /* Next issue ABTS for everything on the txcmplq */
3934 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3935 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3936 spin_unlock_irq(&phba->hbalock);
3939 /* Cancel all the IOCBs from the completions list */
3940 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3945 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3946 * @phba: Pointer to HBA context object.
3947 * @pring: Pointer to driver SLI ring object.
3949 * This function aborts all iocbs in FCP rings and frees all the iocb
3950 * objects in txq. This function issues an abort iocb for all the iocb commands
3951 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3952 * the return of this function. The caller is not required to hold any locks.
3955 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3957 struct lpfc_sli *psli = &phba->sli;
3958 struct lpfc_sli_ring *pring;
3961 /* Look on all the FCP Rings for the iotag */
3962 if (phba->sli_rev >= LPFC_SLI_REV4) {
3963 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3964 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
3965 lpfc_sli_abort_iocb_ring(phba, pring);
3968 pring = &psli->sli3_ring[LPFC_FCP_RING];
3969 lpfc_sli_abort_iocb_ring(phba, pring);
3974 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3975 * @phba: Pointer to HBA context object.
3977 * This function flushes all iocbs in the fcp ring and frees all the iocb
3978 * objects in txq and txcmplq. This function will not issue abort iocbs
3979 * for all the iocb commands in txcmplq, they will just be returned with
3980 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3981 * slot has been permanently disabled.
3984 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3988 struct lpfc_sli *psli = &phba->sli;
3989 struct lpfc_sli_ring *pring;
3991 struct lpfc_iocbq *piocb, *next_iocb;
3993 spin_lock_irq(&phba->hbalock);
3994 /* Indicate the I/O queues are flushed */
3995 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3996 spin_unlock_irq(&phba->hbalock);
3998 /* Look on all the FCP Rings for the iotag */
3999 if (phba->sli_rev >= LPFC_SLI_REV4) {
4000 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
4003 spin_lock_irq(&pring->ring_lock);
4004 /* Retrieve everything on txq */
4005 list_splice_init(&pring->txq, &txq);
4006 list_for_each_entry_safe(piocb, next_iocb,
4007 &pring->txcmplq, list)
4008 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4009 /* Retrieve everything on the txcmplq */
4010 list_splice_init(&pring->txcmplq, &txcmplq);
4012 pring->txcmplq_cnt = 0;
4013 spin_unlock_irq(&pring->ring_lock);
4016 lpfc_sli_cancel_iocbs(phba, &txq,
4017 IOSTAT_LOCAL_REJECT,
4019 /* Flush the txcmpq */
4020 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4021 IOSTAT_LOCAL_REJECT,
4025 pring = &psli->sli3_ring[LPFC_FCP_RING];
4027 spin_lock_irq(&phba->hbalock);
4028 /* Retrieve everything on txq */
4029 list_splice_init(&pring->txq, &txq);
4030 list_for_each_entry_safe(piocb, next_iocb,
4031 &pring->txcmplq, list)
4032 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4033 /* Retrieve everything on the txcmplq */
4034 list_splice_init(&pring->txcmplq, &txcmplq);
4036 pring->txcmplq_cnt = 0;
4037 spin_unlock_irq(&phba->hbalock);
4040 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4042 /* Flush the txcmpq */
4043 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4049 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4050 * @phba: Pointer to HBA context object.
4052 * This function flushes all wqes in the nvme rings and frees all resources
4053 * in the txcmplq. This function does not issue abort wqes for the IO
4054 * commands in txcmplq, they will just be returned with
4055 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4056 * slot has been permanently disabled.
4059 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4062 struct lpfc_sli_ring *pring;
4064 struct lpfc_iocbq *piocb, *next_iocb;
4066 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4067 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4070 /* Hint to other driver operations that a flush is in progress. */
4071 spin_lock_irq(&phba->hbalock);
4072 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4073 spin_unlock_irq(&phba->hbalock);
4075 /* Cycle through all NVME rings and complete each IO with
4076 * a local driver reason code. This is a flush so no
4077 * abort exchange to FW.
4079 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4080 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4082 spin_lock_irq(&pring->ring_lock);
4083 list_for_each_entry_safe(piocb, next_iocb,
4084 &pring->txcmplq, list)
4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4086 /* Retrieve everything on the txcmplq */
4087 list_splice_init(&pring->txcmplq, &txcmplq);
4088 pring->txcmplq_cnt = 0;
4089 spin_unlock_irq(&pring->ring_lock);
4091 /* Flush the txcmpq &&&PAE */
4092 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4093 IOSTAT_LOCAL_REJECT,
4099 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4100 * @phba: Pointer to HBA context object.
4101 * @mask: Bit mask to be checked.
4103 * This function reads the host status register and compares
4104 * with the provided bit mask to check if HBA completed
4105 * the restart. This function will wait in a loop for the
4106 * HBA to complete restart. If the HBA does not restart within
4107 * 15 iterations, the function will reset the HBA again. The
4108 * function returns 1 when HBA fail to restart otherwise returns
4112 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4118 /* Read the HBA Host Status Register */
4119 if (lpfc_readl(phba->HSregaddr, &status))
4123 * Check status register every 100ms for 5 retries, then every
4124 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4125 * every 2.5 sec for 4.
4126 * Break our of the loop if errors occurred during init.
4128 while (((status & mask) != mask) &&
4129 !(status & HS_FFERM) &&
4141 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4142 lpfc_sli_brdrestart(phba);
4144 /* Read the HBA Host Status Register */
4145 if (lpfc_readl(phba->HSregaddr, &status)) {
4151 /* Check to see if any errors occurred during init */
4152 if ((status & HS_FFERM) || (i >= 20)) {
4153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4154 "2751 Adapter failed to restart, "
4155 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4157 readl(phba->MBslimaddr + 0xa8),
4158 readl(phba->MBslimaddr + 0xac));
4159 phba->link_state = LPFC_HBA_ERROR;
4167 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4168 * @phba: Pointer to HBA context object.
4169 * @mask: Bit mask to be checked.
4171 * This function checks the host status register to check if HBA is
4172 * ready. This function will wait in a loop for the HBA to be ready
4173 * If the HBA is not ready , the function will will reset the HBA PCI
4174 * function again. The function returns 1 when HBA fail to be ready
4175 * otherwise returns zero.
4178 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4183 /* Read the HBA Host Status Register */
4184 status = lpfc_sli4_post_status_check(phba);
4187 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4188 lpfc_sli_brdrestart(phba);
4189 status = lpfc_sli4_post_status_check(phba);
4192 /* Check to see if any errors occurred during init */
4194 phba->link_state = LPFC_HBA_ERROR;
4197 phba->sli4_hba.intr_enable = 0;
4203 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4204 * @phba: Pointer to HBA context object.
4205 * @mask: Bit mask to be checked.
4207 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4208 * from the API jump table function pointer from the lpfc_hba struct.
4211 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4213 return phba->lpfc_sli_brdready(phba, mask);
4216 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4219 * lpfc_reset_barrier - Make HBA ready for HBA reset
4220 * @phba: Pointer to HBA context object.
4222 * This function is called before resetting an HBA. This function is called
4223 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4225 void lpfc_reset_barrier(struct lpfc_hba *phba)
4227 uint32_t __iomem *resp_buf;
4228 uint32_t __iomem *mbox_buf;
4229 volatile uint32_t mbox;
4230 uint32_t hc_copy, ha_copy, resp_data;
4234 lockdep_assert_held(&phba->hbalock);
4236 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4237 if (hdrtype != 0x80 ||
4238 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4239 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4243 * Tell the other part of the chip to suspend temporarily all
4246 resp_buf = phba->MBslimaddr;
4248 /* Disable the error attention */
4249 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4251 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4252 readl(phba->HCregaddr); /* flush */
4253 phba->link_flag |= LS_IGNORE_ERATT;
4255 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4257 if (ha_copy & HA_ERATT) {
4258 /* Clear Chip error bit */
4259 writel(HA_ERATT, phba->HAregaddr);
4260 phba->pport->stopped = 1;
4264 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4265 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4267 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4268 mbox_buf = phba->MBslimaddr;
4269 writel(mbox, mbox_buf);
4271 for (i = 0; i < 50; i++) {
4272 if (lpfc_readl((resp_buf + 1), &resp_data))
4274 if (resp_data != ~(BARRIER_TEST_PATTERN))
4280 if (lpfc_readl((resp_buf + 1), &resp_data))
4282 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4283 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4284 phba->pport->stopped)
4290 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4292 for (i = 0; i < 500; i++) {
4293 if (lpfc_readl(resp_buf, &resp_data))
4295 if (resp_data != mbox)
4304 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4306 if (!(ha_copy & HA_ERATT))
4312 if (readl(phba->HAregaddr) & HA_ERATT) {
4313 writel(HA_ERATT, phba->HAregaddr);
4314 phba->pport->stopped = 1;
4318 phba->link_flag &= ~LS_IGNORE_ERATT;
4319 writel(hc_copy, phba->HCregaddr);
4320 readl(phba->HCregaddr); /* flush */
4324 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4325 * @phba: Pointer to HBA context object.
4327 * This function issues a kill_board mailbox command and waits for
4328 * the error attention interrupt. This function is called for stopping
4329 * the firmware processing. The caller is not required to hold any
4330 * locks. This function calls lpfc_hba_down_post function to free
4331 * any pending commands after the kill. The function will return 1 when it
4332 * fails to kill the board else will return 0.
4335 lpfc_sli_brdkill(struct lpfc_hba *phba)
4337 struct lpfc_sli *psli;
4347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4348 "0329 Kill HBA Data: x%x x%x\n",
4349 phba->pport->port_state, psli->sli_flag);
4351 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4355 /* Disable the error attention */
4356 spin_lock_irq(&phba->hbalock);
4357 if (lpfc_readl(phba->HCregaddr, &status)) {
4358 spin_unlock_irq(&phba->hbalock);
4359 mempool_free(pmb, phba->mbox_mem_pool);
4362 status &= ~HC_ERINT_ENA;
4363 writel(status, phba->HCregaddr);
4364 readl(phba->HCregaddr); /* flush */
4365 phba->link_flag |= LS_IGNORE_ERATT;
4366 spin_unlock_irq(&phba->hbalock);
4368 lpfc_kill_board(phba, pmb);
4369 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4370 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4372 if (retval != MBX_SUCCESS) {
4373 if (retval != MBX_BUSY)
4374 mempool_free(pmb, phba->mbox_mem_pool);
4375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4376 "2752 KILL_BOARD command failed retval %d\n",
4378 spin_lock_irq(&phba->hbalock);
4379 phba->link_flag &= ~LS_IGNORE_ERATT;
4380 spin_unlock_irq(&phba->hbalock);
4384 spin_lock_irq(&phba->hbalock);
4385 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4386 spin_unlock_irq(&phba->hbalock);
4388 mempool_free(pmb, phba->mbox_mem_pool);
4390 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4391 * attention every 100ms for 3 seconds. If we don't get ERATT after
4392 * 3 seconds we still set HBA_ERROR state because the status of the
4393 * board is now undefined.
4395 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4397 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4399 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4403 del_timer_sync(&psli->mbox_tmo);
4404 if (ha_copy & HA_ERATT) {
4405 writel(HA_ERATT, phba->HAregaddr);
4406 phba->pport->stopped = 1;
4408 spin_lock_irq(&phba->hbalock);
4409 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4410 psli->mbox_active = NULL;
4411 phba->link_flag &= ~LS_IGNORE_ERATT;
4412 spin_unlock_irq(&phba->hbalock);
4414 lpfc_hba_down_post(phba);
4415 phba->link_state = LPFC_HBA_ERROR;
4417 return ha_copy & HA_ERATT ? 0 : 1;
4421 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4422 * @phba: Pointer to HBA context object.
4424 * This function resets the HBA by writing HC_INITFF to the control
4425 * register. After the HBA resets, this function resets all the iocb ring
4426 * indices. This function disables PCI layer parity checking during
4428 * This function returns 0 always.
4429 * The caller is not required to hold any locks.
4432 lpfc_sli_brdreset(struct lpfc_hba *phba)
4434 struct lpfc_sli *psli;
4435 struct lpfc_sli_ring *pring;
4442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4443 "0325 Reset HBA Data: x%x x%x\n",
4444 (phba->pport) ? phba->pport->port_state : 0,
4447 /* perform board reset */
4448 phba->fc_eventTag = 0;
4449 phba->link_events = 0;
4451 phba->pport->fc_myDID = 0;
4452 phba->pport->fc_prevDID = 0;
4455 /* Turn off parity checking and serr during the physical reset */
4456 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4459 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4461 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4463 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4465 /* Now toggle INITFF bit in the Host Control Register */
4466 writel(HC_INITFF, phba->HCregaddr);
4468 readl(phba->HCregaddr); /* flush */
4469 writel(0, phba->HCregaddr);
4470 readl(phba->HCregaddr); /* flush */
4472 /* Restore PCI cmd register */
4473 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4475 /* Initialize relevant SLI info */
4476 for (i = 0; i < psli->num_rings; i++) {
4477 pring = &psli->sli3_ring[i];
4479 pring->sli.sli3.rspidx = 0;
4480 pring->sli.sli3.next_cmdidx = 0;
4481 pring->sli.sli3.local_getidx = 0;
4482 pring->sli.sli3.cmdidx = 0;
4483 pring->missbufcnt = 0;
4486 phba->link_state = LPFC_WARM_START;
4491 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4492 * @phba: Pointer to HBA context object.
4494 * This function resets a SLI4 HBA. This function disables PCI layer parity
4495 * checking during resets the device. The caller is not required to hold
4498 * This function returns 0 always.
4501 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4503 struct lpfc_sli *psli = &phba->sli;
4508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4509 "0295 Reset HBA Data: x%x x%x x%x\n",
4510 phba->pport->port_state, psli->sli_flag,
4513 /* perform board reset */
4514 phba->fc_eventTag = 0;
4515 phba->link_events = 0;
4516 phba->pport->fc_myDID = 0;
4517 phba->pport->fc_prevDID = 0;
4519 spin_lock_irq(&phba->hbalock);
4520 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4521 phba->fcf.fcf_flag = 0;
4522 spin_unlock_irq(&phba->hbalock);
4524 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4525 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4526 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4530 /* Now physically reset the device */
4531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4532 "0389 Performing PCI function reset!\n");
4534 /* Turn off parity checking and serr during the physical reset */
4535 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4537 "3205 PCI read Config failed\n");
4541 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4542 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4544 /* Perform FCoE PCI function reset before freeing queue memory */
4545 rc = lpfc_pci_function_reset(phba);
4547 /* Restore PCI cmd register */
4548 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4554 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4555 * @phba: Pointer to HBA context object.
4557 * This function is called in the SLI initialization code path to
4558 * restart the HBA. The caller is not required to hold any lock.
4559 * This function writes MBX_RESTART mailbox command to the SLIM and
4560 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4561 * function to free any pending commands. The function enables
4562 * POST only during the first initialization. The function returns zero.
4563 * The function does not guarantee completion of MBX_RESTART mailbox
4564 * command before the return of this function.
4567 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4570 struct lpfc_sli *psli;
4571 volatile uint32_t word0;
4572 void __iomem *to_slim;
4573 uint32_t hba_aer_enabled;
4575 spin_lock_irq(&phba->hbalock);
4577 /* Take PCIe device Advanced Error Reporting (AER) state */
4578 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4584 "0337 Restart HBA Data: x%x x%x\n",
4585 (phba->pport) ? phba->pport->port_state : 0,
4589 mb = (MAILBOX_t *) &word0;
4590 mb->mbxCommand = MBX_RESTART;
4593 lpfc_reset_barrier(phba);
4595 to_slim = phba->MBslimaddr;
4596 writel(*(uint32_t *) mb, to_slim);
4597 readl(to_slim); /* flush */
4599 /* Only skip post after fc_ffinit is completed */
4600 if (phba->pport && phba->pport->port_state)
4601 word0 = 1; /* This is really setting up word1 */
4603 word0 = 0; /* This is really setting up word1 */
4604 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4605 writel(*(uint32_t *) mb, to_slim);
4606 readl(to_slim); /* flush */
4608 lpfc_sli_brdreset(phba);
4610 phba->pport->stopped = 0;
4611 phba->link_state = LPFC_INIT_START;
4613 spin_unlock_irq(&phba->hbalock);
4615 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4616 psli->stats_start = ktime_get_seconds();
4618 /* Give the INITFF and Post time to settle. */
4621 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4622 if (hba_aer_enabled)
4623 pci_disable_pcie_error_reporting(phba->pcidev);
4625 lpfc_hba_down_post(phba);
4631 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4632 * @phba: Pointer to HBA context object.
4634 * This function is called in the SLI initialization code path to restart
4635 * a SLI4 HBA. The caller is not required to hold any lock.
4636 * At the end of the function, it calls lpfc_hba_down_post function to
4637 * free any pending commands.
4640 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4642 struct lpfc_sli *psli = &phba->sli;
4643 uint32_t hba_aer_enabled;
4647 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4648 "0296 Restart HBA Data: x%x x%x\n",
4649 phba->pport->port_state, psli->sli_flag);
4651 /* Take PCIe device Advanced Error Reporting (AER) state */
4652 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4654 rc = lpfc_sli4_brdreset(phba);
4658 spin_lock_irq(&phba->hbalock);
4659 phba->pport->stopped = 0;
4660 phba->link_state = LPFC_INIT_START;
4662 spin_unlock_irq(&phba->hbalock);
4664 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4665 psli->stats_start = ktime_get_seconds();
4667 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4668 if (hba_aer_enabled)
4669 pci_disable_pcie_error_reporting(phba->pcidev);
4671 lpfc_hba_down_post(phba);
4672 lpfc_sli4_queue_destroy(phba);
4678 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4679 * @phba: Pointer to HBA context object.
4681 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4682 * API jump table function pointer from the lpfc_hba struct.
4685 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4687 return phba->lpfc_sli_brdrestart(phba);
4691 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4692 * @phba: Pointer to HBA context object.
4694 * This function is called after a HBA restart to wait for successful
4695 * restart of the HBA. Successful restart of the HBA is indicated by
4696 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4697 * iteration, the function will restart the HBA again. The function returns
4698 * zero if HBA successfully restarted else returns negative error code.
4701 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4703 uint32_t status, i = 0;
4705 /* Read the HBA Host Status Register */
4706 if (lpfc_readl(phba->HSregaddr, &status))
4709 /* Check status register to see what current state is */
4711 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4713 /* Check every 10ms for 10 retries, then every 100ms for 90
4714 * retries, then every 1 sec for 50 retires for a total of
4715 * ~60 seconds before reset the board again and check every
4716 * 1 sec for 50 retries. The up to 60 seconds before the
4717 * board ready is required by the Falcon FIPS zeroization
4718 * complete, and any reset the board in between shall cause
4719 * restart of zeroization, further delay the board ready.
4722 /* Adapter failed to init, timeout, status reg
4724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4725 "0436 Adapter failed to init, "
4726 "timeout, status reg x%x, "
4727 "FW Data: A8 x%x AC x%x\n", status,
4728 readl(phba->MBslimaddr + 0xa8),
4729 readl(phba->MBslimaddr + 0xac));
4730 phba->link_state = LPFC_HBA_ERROR;
4734 /* Check to see if any errors occurred during init */
4735 if (status & HS_FFERM) {
4736 /* ERROR: During chipset initialization */
4737 /* Adapter failed to init, chipset, status reg
4739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4740 "0437 Adapter failed to init, "
4741 "chipset, status reg x%x, "
4742 "FW Data: A8 x%x AC x%x\n", status,
4743 readl(phba->MBslimaddr + 0xa8),
4744 readl(phba->MBslimaddr + 0xac));
4745 phba->link_state = LPFC_HBA_ERROR;
4758 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4759 lpfc_sli_brdrestart(phba);
4761 /* Read the HBA Host Status Register */
4762 if (lpfc_readl(phba->HSregaddr, &status))
4766 /* Check to see if any errors occurred during init */
4767 if (status & HS_FFERM) {
4768 /* ERROR: During chipset initialization */
4769 /* Adapter failed to init, chipset, status reg <status> */
4770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4771 "0438 Adapter failed to init, chipset, "
4773 "FW Data: A8 x%x AC x%x\n", status,
4774 readl(phba->MBslimaddr + 0xa8),
4775 readl(phba->MBslimaddr + 0xac));
4776 phba->link_state = LPFC_HBA_ERROR;
4780 /* Clear all interrupt enable conditions */
4781 writel(0, phba->HCregaddr);
4782 readl(phba->HCregaddr); /* flush */
4784 /* setup host attn register */
4785 writel(0xffffffff, phba->HAregaddr);
4786 readl(phba->HAregaddr); /* flush */
4791 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4793 * This function calculates and returns the number of HBQs required to be
4797 lpfc_sli_hbq_count(void)
4799 return ARRAY_SIZE(lpfc_hbq_defs);
4803 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4805 * This function adds the number of hbq entries in every HBQ to get
4806 * the total number of hbq entries required for the HBA and returns
4810 lpfc_sli_hbq_entry_count(void)
4812 int hbq_count = lpfc_sli_hbq_count();
4816 for (i = 0; i < hbq_count; ++i)
4817 count += lpfc_hbq_defs[i]->entry_count;
4822 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4824 * This function calculates amount of memory required for all hbq entries
4825 * to be configured and returns the total memory required.
4828 lpfc_sli_hbq_size(void)
4830 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4834 * lpfc_sli_hbq_setup - configure and initialize HBQs
4835 * @phba: Pointer to HBA context object.
4837 * This function is called during the SLI initialization to configure
4838 * all the HBQs and post buffers to the HBQ. The caller is not
4839 * required to hold any locks. This function will return zero if successful
4840 * else it will return negative error code.
4843 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4845 int hbq_count = lpfc_sli_hbq_count();
4849 uint32_t hbq_entry_index;
4851 /* Get a Mailbox buffer to setup mailbox
4852 * commands for HBA initialization
4854 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4861 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4862 phba->link_state = LPFC_INIT_MBX_CMDS;
4863 phba->hbq_in_use = 1;
4865 hbq_entry_index = 0;
4866 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4867 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4868 phba->hbqs[hbqno].hbqPutIdx = 0;
4869 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4870 phba->hbqs[hbqno].entry_count =
4871 lpfc_hbq_defs[hbqno]->entry_count;
4872 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4873 hbq_entry_index, pmb);
4874 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4876 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4877 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4878 mbxStatus <status>, ring <num> */
4880 lpfc_printf_log(phba, KERN_ERR,
4881 LOG_SLI | LOG_VPORT,
4882 "1805 Adapter failed to init. "
4883 "Data: x%x x%x x%x\n",
4885 pmbox->mbxStatus, hbqno);
4887 phba->link_state = LPFC_HBA_ERROR;
4888 mempool_free(pmb, phba->mbox_mem_pool);
4892 phba->hbq_count = hbq_count;
4894 mempool_free(pmb, phba->mbox_mem_pool);
4896 /* Initially populate or replenish the HBQs */
4897 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4898 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4903 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4904 * @phba: Pointer to HBA context object.
4906 * This function is called during the SLI initialization to configure
4907 * all the HBQs and post buffers to the HBQ. The caller is not
4908 * required to hold any locks. This function will return zero if successful
4909 * else it will return negative error code.
4912 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4914 phba->hbq_in_use = 1;
4915 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4916 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4917 phba->hbq_count = 1;
4918 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4919 /* Initially populate or replenish the HBQs */
4924 * lpfc_sli_config_port - Issue config port mailbox command
4925 * @phba: Pointer to HBA context object.
4926 * @sli_mode: sli mode - 2/3
4928 * This function is called by the sli initialization code path
4929 * to issue config_port mailbox command. This function restarts the
4930 * HBA firmware and issues a config_port mailbox command to configure
4931 * the SLI interface in the sli mode specified by sli_mode
4932 * variable. The caller is not required to hold any locks.
4933 * The function returns 0 if successful, else returns negative error
4937 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4940 uint32_t resetcount = 0, rc = 0, done = 0;
4942 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4944 phba->link_state = LPFC_HBA_ERROR;
4948 phba->sli_rev = sli_mode;
4949 while (resetcount < 2 && !done) {
4950 spin_lock_irq(&phba->hbalock);
4951 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4952 spin_unlock_irq(&phba->hbalock);
4953 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4954 lpfc_sli_brdrestart(phba);
4955 rc = lpfc_sli_chipset_init(phba);
4959 spin_lock_irq(&phba->hbalock);
4960 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4961 spin_unlock_irq(&phba->hbalock);
4964 /* Call pre CONFIG_PORT mailbox command initialization. A
4965 * value of 0 means the call was successful. Any other
4966 * nonzero value is a failure, but if ERESTART is returned,
4967 * the driver may reset the HBA and try again.
4969 rc = lpfc_config_port_prep(phba);
4970 if (rc == -ERESTART) {
4971 phba->link_state = LPFC_LINK_UNKNOWN;
4976 phba->link_state = LPFC_INIT_MBX_CMDS;
4977 lpfc_config_port(phba, pmb);
4978 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4979 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4980 LPFC_SLI3_HBQ_ENABLED |
4981 LPFC_SLI3_CRP_ENABLED |
4982 LPFC_SLI3_DSS_ENABLED);
4983 if (rc != MBX_SUCCESS) {
4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4985 "0442 Adapter failed to init, mbxCmd x%x "
4986 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4987 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4988 spin_lock_irq(&phba->hbalock);
4989 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4990 spin_unlock_irq(&phba->hbalock);
4993 /* Allow asynchronous mailbox command to go through */
4994 spin_lock_irq(&phba->hbalock);
4995 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4996 spin_unlock_irq(&phba->hbalock);
4999 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5000 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5001 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5002 "3110 Port did not grant ASABT\n");
5007 goto do_prep_failed;
5009 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5010 if (!pmb->u.mb.un.varCfgPort.cMA) {
5012 goto do_prep_failed;
5014 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5015 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5016 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5017 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5018 phba->max_vpi : phba->max_vports;
5022 phba->fips_level = 0;
5023 phba->fips_spec_rev = 0;
5024 if (pmb->u.mb.un.varCfgPort.gdss) {
5025 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5026 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5027 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5029 "2850 Security Crypto Active. FIPS x%d "
5031 phba->fips_level, phba->fips_spec_rev);
5033 if (pmb->u.mb.un.varCfgPort.sec_err) {
5034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5035 "2856 Config Port Security Crypto "
5037 pmb->u.mb.un.varCfgPort.sec_err);
5039 if (pmb->u.mb.un.varCfgPort.gerbm)
5040 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5041 if (pmb->u.mb.un.varCfgPort.gcrp)
5042 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5044 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5045 phba->port_gp = phba->mbox->us.s3_pgp.port;
5047 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5048 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5049 phba->cfg_enable_bg = 0;
5050 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5052 "0443 Adapter did not grant "
5057 phba->hbq_get = NULL;
5058 phba->port_gp = phba->mbox->us.s2.port;
5062 mempool_free(pmb, phba->mbox_mem_pool);
5068 * lpfc_sli_hba_setup - SLI initialization function
5069 * @phba: Pointer to HBA context object.
5071 * This function is the main SLI initialization function. This function
5072 * is called by the HBA initialization code, HBA reset code and HBA
5073 * error attention handler code. Caller is not required to hold any
5074 * locks. This function issues config_port mailbox command to configure
5075 * the SLI, setup iocb rings and HBQ rings. In the end the function
5076 * calls the config_port_post function to issue init_link mailbox
5077 * command and to start the discovery. The function will return zero
5078 * if successful, else it will return negative error code.
5081 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5087 switch (phba->cfg_sli_mode) {
5089 if (phba->cfg_enable_npiv) {
5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5091 "1824 NPIV enabled: Override sli_mode "
5092 "parameter (%d) to auto (0).\n",
5093 phba->cfg_sli_mode);
5102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5103 "1819 Unrecognized sli_mode parameter: %d.\n",
5104 phba->cfg_sli_mode);
5108 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5110 rc = lpfc_sli_config_port(phba, mode);
5112 if (rc && phba->cfg_sli_mode == 3)
5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5114 "1820 Unable to select SLI-3. "
5115 "Not supported by adapter.\n");
5116 if (rc && mode != 2)
5117 rc = lpfc_sli_config_port(phba, 2);
5118 else if (rc && mode == 2)
5119 rc = lpfc_sli_config_port(phba, 3);
5121 goto lpfc_sli_hba_setup_error;
5123 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5124 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5125 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5128 "2709 This device supports "
5129 "Advanced Error Reporting (AER)\n");
5130 spin_lock_irq(&phba->hbalock);
5131 phba->hba_flag |= HBA_AER_ENABLED;
5132 spin_unlock_irq(&phba->hbalock);
5134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5135 "2708 This device does not support "
5136 "Advanced Error Reporting (AER): %d\n",
5138 phba->cfg_aer_support = 0;
5142 if (phba->sli_rev == 3) {
5143 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5144 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5146 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5147 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5148 phba->sli3_options = 0;
5151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5152 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5153 phba->sli_rev, phba->max_vpi);
5154 rc = lpfc_sli_ring_map(phba);
5157 goto lpfc_sli_hba_setup_error;
5159 /* Initialize VPIs. */
5160 if (phba->sli_rev == LPFC_SLI_REV3) {
5162 * The VPI bitmask and physical ID array are allocated
5163 * and initialized once only - at driver load. A port
5164 * reset doesn't need to reinitialize this memory.
5166 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5167 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5168 phba->vpi_bmask = kcalloc(longs,
5169 sizeof(unsigned long),
5171 if (!phba->vpi_bmask) {
5173 goto lpfc_sli_hba_setup_error;
5176 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5179 if (!phba->vpi_ids) {
5180 kfree(phba->vpi_bmask);
5182 goto lpfc_sli_hba_setup_error;
5184 for (i = 0; i < phba->max_vpi; i++)
5185 phba->vpi_ids[i] = i;
5190 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5191 rc = lpfc_sli_hbq_setup(phba);
5193 goto lpfc_sli_hba_setup_error;
5195 spin_lock_irq(&phba->hbalock);
5196 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5197 spin_unlock_irq(&phba->hbalock);
5199 rc = lpfc_config_port_post(phba);
5201 goto lpfc_sli_hba_setup_error;
5205 lpfc_sli_hba_setup_error:
5206 phba->link_state = LPFC_HBA_ERROR;
5207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5208 "0445 Firmware initialization failed\n");
5213 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5214 * @phba: Pointer to HBA context object.
5215 * @mboxq: mailbox pointer.
5216 * This function issue a dump mailbox command to read config region
5217 * 23 and parse the records in the region and populate driver
5221 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5223 LPFC_MBOXQ_t *mboxq;
5224 struct lpfc_dmabuf *mp;
5225 struct lpfc_mqe *mqe;
5226 uint32_t data_length;
5229 /* Program the default value of vlan_id and fc_map */
5230 phba->valid_vlan = 0;
5231 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5232 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5233 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5235 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5239 mqe = &mboxq->u.mqe;
5240 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5242 goto out_free_mboxq;
5245 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5246 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5249 "(%d):2571 Mailbox cmd x%x Status x%x "
5250 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5251 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5252 "CQ: x%x x%x x%x x%x\n",
5253 mboxq->vport ? mboxq->vport->vpi : 0,
5254 bf_get(lpfc_mqe_command, mqe),
5255 bf_get(lpfc_mqe_status, mqe),
5256 mqe->un.mb_words[0], mqe->un.mb_words[1],
5257 mqe->un.mb_words[2], mqe->un.mb_words[3],
5258 mqe->un.mb_words[4], mqe->un.mb_words[5],
5259 mqe->un.mb_words[6], mqe->un.mb_words[7],
5260 mqe->un.mb_words[8], mqe->un.mb_words[9],
5261 mqe->un.mb_words[10], mqe->un.mb_words[11],
5262 mqe->un.mb_words[12], mqe->un.mb_words[13],
5263 mqe->un.mb_words[14], mqe->un.mb_words[15],
5264 mqe->un.mb_words[16], mqe->un.mb_words[50],
5266 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5267 mboxq->mcqe.trailer);
5270 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5273 goto out_free_mboxq;
5275 data_length = mqe->un.mb_words[5];
5276 if (data_length > DMP_RGN23_SIZE) {
5277 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5280 goto out_free_mboxq;
5283 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5284 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5289 mempool_free(mboxq, phba->mbox_mem_pool);
5294 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5295 * @phba: pointer to lpfc hba data structure.
5296 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5297 * @vpd: pointer to the memory to hold resulting port vpd data.
5298 * @vpd_size: On input, the number of bytes allocated to @vpd.
5299 * On output, the number of data bytes in @vpd.
5301 * This routine executes a READ_REV SLI4 mailbox command. In
5302 * addition, this routine gets the port vpd data.
5306 * -ENOMEM - could not allocated memory.
5309 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5310 uint8_t *vpd, uint32_t *vpd_size)
5314 struct lpfc_dmabuf *dmabuf;
5315 struct lpfc_mqe *mqe;
5317 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5322 * Get a DMA buffer for the vpd data resulting from the READ_REV
5325 dma_size = *vpd_size;
5326 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5327 &dmabuf->phys, GFP_KERNEL);
5328 if (!dmabuf->virt) {
5334 * The SLI4 implementation of READ_REV conflicts at word1,
5335 * bits 31:16 and SLI4 adds vpd functionality not present
5336 * in SLI3. This code corrects the conflicts.
5338 lpfc_read_rev(phba, mboxq);
5339 mqe = &mboxq->u.mqe;
5340 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5341 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5342 mqe->un.read_rev.word1 &= 0x0000FFFF;
5343 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5344 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5348 dma_free_coherent(&phba->pcidev->dev, dma_size,
5349 dmabuf->virt, dmabuf->phys);
5355 * The available vpd length cannot be bigger than the
5356 * DMA buffer passed to the port. Catch the less than
5357 * case and update the caller's size.
5359 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5360 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5362 memcpy(vpd, dmabuf->virt, *vpd_size);
5364 dma_free_coherent(&phba->pcidev->dev, dma_size,
5365 dmabuf->virt, dmabuf->phys);
5371 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5372 * @phba: pointer to lpfc hba data structure.
5374 * This routine retrieves SLI4 device physical port name this PCI function
5379 * otherwise - failed to retrieve controller attributes
5382 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5384 LPFC_MBOXQ_t *mboxq;
5385 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5386 struct lpfc_controller_attribute *cntl_attr;
5387 void *virtaddr = NULL;
5388 uint32_t alloclen, reqlen;
5389 uint32_t shdr_status, shdr_add_status;
5390 union lpfc_sli4_cfg_shdr *shdr;
5393 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5397 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5398 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5399 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5400 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5401 LPFC_SLI4_MBX_NEMBED);
5403 if (alloclen < reqlen) {
5404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5405 "3084 Allocated DMA memory size (%d) is "
5406 "less than the requested DMA memory size "
5407 "(%d)\n", alloclen, reqlen);
5409 goto out_free_mboxq;
5411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5412 virtaddr = mboxq->sge_array->addr[0];
5413 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5414 shdr = &mbx_cntl_attr->cfg_shdr;
5415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5417 if (shdr_status || shdr_add_status || rc) {
5418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5419 "3085 Mailbox x%x (x%x/x%x) failed, "
5420 "rc:x%x, status:x%x, add_status:x%x\n",
5421 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5422 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5423 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5424 rc, shdr_status, shdr_add_status);
5426 goto out_free_mboxq;
5429 cntl_attr = &mbx_cntl_attr->cntl_attr;
5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5431 phba->sli4_hba.lnk_info.lnk_tp =
5432 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5433 phba->sli4_hba.lnk_info.lnk_no =
5434 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5436 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5437 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5438 sizeof(phba->BIOSVersion));
5440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5441 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5442 phba->sli4_hba.lnk_info.lnk_tp,
5443 phba->sli4_hba.lnk_info.lnk_no,
5446 if (rc != MBX_TIMEOUT) {
5447 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5448 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5450 mempool_free(mboxq, phba->mbox_mem_pool);
5456 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5457 * @phba: pointer to lpfc hba data structure.
5459 * This routine retrieves SLI4 device physical port name this PCI function
5464 * otherwise - failed to retrieve physical port name
5467 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5469 LPFC_MBOXQ_t *mboxq;
5470 struct lpfc_mbx_get_port_name *get_port_name;
5471 uint32_t shdr_status, shdr_add_status;
5472 union lpfc_sli4_cfg_shdr *shdr;
5473 char cport_name = 0;
5476 /* We assume nothing at this point */
5477 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5478 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5480 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5483 /* obtain link type and link number via READ_CONFIG */
5484 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5485 lpfc_sli4_read_config(phba);
5486 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5487 goto retrieve_ppname;
5489 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5490 rc = lpfc_sli4_get_ctl_attr(phba);
5492 goto out_free_mboxq;
5495 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5496 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5497 sizeof(struct lpfc_mbx_get_port_name) -
5498 sizeof(struct lpfc_sli4_cfg_mhdr),
5499 LPFC_SLI4_MBX_EMBED);
5500 get_port_name = &mboxq->u.mqe.un.get_port_name;
5501 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5502 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5503 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5504 phba->sli4_hba.lnk_info.lnk_tp);
5505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5508 if (shdr_status || shdr_add_status || rc) {
5509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5510 "3087 Mailbox x%x (x%x/x%x) failed: "
5511 "rc:x%x, status:x%x, add_status:x%x\n",
5512 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5513 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5514 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5515 rc, shdr_status, shdr_add_status);
5517 goto out_free_mboxq;
5519 switch (phba->sli4_hba.lnk_info.lnk_no) {
5520 case LPFC_LINK_NUMBER_0:
5521 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5522 &get_port_name->u.response);
5523 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5525 case LPFC_LINK_NUMBER_1:
5526 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5527 &get_port_name->u.response);
5528 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5530 case LPFC_LINK_NUMBER_2:
5531 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5532 &get_port_name->u.response);
5533 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5535 case LPFC_LINK_NUMBER_3:
5536 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5537 &get_port_name->u.response);
5538 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5544 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5545 phba->Port[0] = cport_name;
5546 phba->Port[1] = '\0';
5547 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5548 "3091 SLI get port name: %s\n", phba->Port);
5552 if (rc != MBX_TIMEOUT) {
5553 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5554 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5556 mempool_free(mboxq, phba->mbox_mem_pool);
5562 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5563 * @phba: pointer to lpfc hba data structure.
5565 * This routine is called to explicitly arm the SLI4 device's completion and
5569 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5572 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5573 struct lpfc_sli4_hdw_queue *qp;
5574 struct lpfc_queue *eq;
5576 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5577 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5578 if (sli4_hba->nvmels_cq)
5579 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5582 if (sli4_hba->hdwq) {
5583 /* Loop thru all Hardware Queues */
5584 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5585 qp = &sli4_hba->hdwq[qidx];
5586 /* ARM the corresponding CQ */
5587 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
5589 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
5593 /* Loop thru all IRQ vectors */
5594 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5595 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5596 /* ARM the corresponding EQ */
5597 sli4_hba->sli4_write_eq_db(phba, eq,
5598 0, LPFC_QUEUE_REARM);
5602 if (phba->nvmet_support) {
5603 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5604 sli4_hba->sli4_write_cq_db(phba,
5605 sli4_hba->nvmet_cqset[qidx], 0,
5612 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5613 * @phba: Pointer to HBA context object.
5614 * @type: The resource extent type.
5615 * @extnt_count: buffer to hold port available extent count.
5616 * @extnt_size: buffer to hold element count per extent.
5618 * This function calls the port and retrievs the number of available
5619 * extents and their size for a particular extent type.
5621 * Returns: 0 if successful. Nonzero otherwise.
5624 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5625 uint16_t *extnt_count, uint16_t *extnt_size)
5630 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5633 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5637 /* Find out how many extents are available for this resource type */
5638 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5639 sizeof(struct lpfc_sli4_cfg_mhdr));
5640 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5641 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5642 length, LPFC_SLI4_MBX_EMBED);
5644 /* Send an extents count of 0 - the GET doesn't use it. */
5645 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5646 LPFC_SLI4_MBX_EMBED);
5652 if (!phba->sli4_hba.intr_enable)
5653 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5655 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5656 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5663 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5664 if (bf_get(lpfc_mbox_hdr_status,
5665 &rsrc_info->header.cfg_shdr.response)) {
5666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5667 "2930 Failed to get resource extents "
5668 "Status 0x%x Add'l Status 0x%x\n",
5669 bf_get(lpfc_mbox_hdr_status,
5670 &rsrc_info->header.cfg_shdr.response),
5671 bf_get(lpfc_mbox_hdr_add_status,
5672 &rsrc_info->header.cfg_shdr.response));
5677 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5679 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5683 "3162 Retrieved extents type-%d from port: count:%d, "
5684 "size:%d\n", type, *extnt_count, *extnt_size);
5687 mempool_free(mbox, phba->mbox_mem_pool);
5692 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5693 * @phba: Pointer to HBA context object.
5694 * @type: The extent type to check.
5696 * This function reads the current available extents from the port and checks
5697 * if the extent count or extent size has changed since the last access.
5698 * Callers use this routine post port reset to understand if there is a
5699 * extent reprovisioning requirement.
5702 * -Error: error indicates problem.
5703 * 1: Extent count or size has changed.
5707 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5709 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5710 uint16_t size_diff, rsrc_ext_size;
5712 struct lpfc_rsrc_blks *rsrc_entry;
5713 struct list_head *rsrc_blk_list = NULL;
5717 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5724 case LPFC_RSC_TYPE_FCOE_RPI:
5725 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5727 case LPFC_RSC_TYPE_FCOE_VPI:
5728 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5730 case LPFC_RSC_TYPE_FCOE_XRI:
5731 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5733 case LPFC_RSC_TYPE_FCOE_VFI:
5734 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5740 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5742 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5746 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5753 * lpfc_sli4_cfg_post_extnts -
5754 * @phba: Pointer to HBA context object.
5755 * @extnt_cnt - number of available extents.
5756 * @type - the extent type (rpi, xri, vfi, vpi).
5757 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5758 * @mbox - pointer to the caller's allocated mailbox structure.
5760 * This function executes the extents allocation request. It also
5761 * takes care of the amount of memory needed to allocate or get the
5762 * allocated extents. It is the caller's responsibility to evaluate
5766 * -Error: Error value describes the condition found.
5770 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5771 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5776 uint32_t alloc_len, mbox_tmo;
5778 /* Calculate the total requested length of the dma memory */
5779 req_len = extnt_cnt * sizeof(uint16_t);
5782 * Calculate the size of an embedded mailbox. The uint32_t
5783 * accounts for extents-specific word.
5785 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5789 * Presume the allocation and response will fit into an embedded
5790 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5792 *emb = LPFC_SLI4_MBX_EMBED;
5793 if (req_len > emb_len) {
5794 req_len = extnt_cnt * sizeof(uint16_t) +
5795 sizeof(union lpfc_sli4_cfg_shdr) +
5797 *emb = LPFC_SLI4_MBX_NEMBED;
5800 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5801 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5803 if (alloc_len < req_len) {
5804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5805 "2982 Allocated DMA memory size (x%x) is "
5806 "less than the requested DMA memory "
5807 "size (x%x)\n", alloc_len, req_len);
5810 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5814 if (!phba->sli4_hba.intr_enable)
5815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5827 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5828 * @phba: Pointer to HBA context object.
5829 * @type: The resource extent type to allocate.
5831 * This function allocates the number of elements for the specified
5835 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5838 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5839 uint16_t rsrc_id, rsrc_start, j, k;
5842 unsigned long longs;
5843 unsigned long *bmask;
5844 struct lpfc_rsrc_blks *rsrc_blks;
5847 struct lpfc_id_range *id_array = NULL;
5848 void *virtaddr = NULL;
5849 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5850 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5851 struct list_head *ext_blk_list;
5853 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5859 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5860 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5861 "3009 No available Resource Extents "
5862 "for resource type 0x%x: Count: 0x%x, "
5863 "Size 0x%x\n", type, rsrc_cnt,
5868 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5869 "2903 Post resource extents type-0x%x: "
5870 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5872 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5876 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5883 * Figure out where the response is located. Then get local pointers
5884 * to the response data. The port does not guarantee to respond to
5885 * all extents counts request so update the local variable with the
5886 * allocated count from the port.
5888 if (emb == LPFC_SLI4_MBX_EMBED) {
5889 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5890 id_array = &rsrc_ext->u.rsp.id[0];
5891 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5893 virtaddr = mbox->sge_array->addr[0];
5894 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5895 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5896 id_array = &n_rsrc->id;
5899 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5900 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5903 * Based on the resource size and count, correct the base and max
5906 length = sizeof(struct lpfc_rsrc_blks);
5908 case LPFC_RSC_TYPE_FCOE_RPI:
5909 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5910 sizeof(unsigned long),
5912 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5916 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5919 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5920 kfree(phba->sli4_hba.rpi_bmask);
5926 * The next_rpi was initialized with the maximum available
5927 * count but the port may allocate a smaller number. Catch
5928 * that case and update the next_rpi.
5930 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5932 /* Initialize local ptrs for common extent processing later. */
5933 bmask = phba->sli4_hba.rpi_bmask;
5934 ids = phba->sli4_hba.rpi_ids;
5935 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5937 case LPFC_RSC_TYPE_FCOE_VPI:
5938 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5940 if (unlikely(!phba->vpi_bmask)) {
5944 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5946 if (unlikely(!phba->vpi_ids)) {
5947 kfree(phba->vpi_bmask);
5952 /* Initialize local ptrs for common extent processing later. */
5953 bmask = phba->vpi_bmask;
5954 ids = phba->vpi_ids;
5955 ext_blk_list = &phba->lpfc_vpi_blk_list;
5957 case LPFC_RSC_TYPE_FCOE_XRI:
5958 phba->sli4_hba.xri_bmask = kcalloc(longs,
5959 sizeof(unsigned long),
5961 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5965 phba->sli4_hba.max_cfg_param.xri_used = 0;
5966 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5969 if (unlikely(!phba->sli4_hba.xri_ids)) {
5970 kfree(phba->sli4_hba.xri_bmask);
5975 /* Initialize local ptrs for common extent processing later. */
5976 bmask = phba->sli4_hba.xri_bmask;
5977 ids = phba->sli4_hba.xri_ids;
5978 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5980 case LPFC_RSC_TYPE_FCOE_VFI:
5981 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5982 sizeof(unsigned long),
5984 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5988 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5991 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5992 kfree(phba->sli4_hba.vfi_bmask);
5997 /* Initialize local ptrs for common extent processing later. */
5998 bmask = phba->sli4_hba.vfi_bmask;
5999 ids = phba->sli4_hba.vfi_ids;
6000 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6003 /* Unsupported Opcode. Fail call. */
6007 ext_blk_list = NULL;
6012 * Complete initializing the extent configuration with the
6013 * allocated ids assigned to this function. The bitmask serves
6014 * as an index into the array and manages the available ids. The
6015 * array just stores the ids communicated to the port via the wqes.
6017 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6019 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6022 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6025 rsrc_blks = kzalloc(length, GFP_KERNEL);
6026 if (unlikely(!rsrc_blks)) {
6032 rsrc_blks->rsrc_start = rsrc_id;
6033 rsrc_blks->rsrc_size = rsrc_size;
6034 list_add_tail(&rsrc_blks->list, ext_blk_list);
6035 rsrc_start = rsrc_id;
6036 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6037 phba->sli4_hba.io_xri_start = rsrc_start +
6038 lpfc_sli4_get_iocb_cnt(phba);
6041 while (rsrc_id < (rsrc_start + rsrc_size)) {
6046 /* Entire word processed. Get next word.*/
6051 lpfc_sli4_mbox_cmd_free(phba, mbox);
6058 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6059 * @phba: Pointer to HBA context object.
6060 * @type: the extent's type.
6062 * This function deallocates all extents of a particular resource type.
6063 * SLI4 does not allow for deallocating a particular extent range. It
6064 * is the caller's responsibility to release all kernel memory resources.
6067 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6070 uint32_t length, mbox_tmo = 0;
6072 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6073 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6075 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6080 * This function sends an embedded mailbox because it only sends the
6081 * the resource type. All extents of this type are released by the
6084 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6085 sizeof(struct lpfc_sli4_cfg_mhdr));
6086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6087 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6088 length, LPFC_SLI4_MBX_EMBED);
6090 /* Send an extents count of 0 - the dealloc doesn't use it. */
6091 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6092 LPFC_SLI4_MBX_EMBED);
6097 if (!phba->sli4_hba.intr_enable)
6098 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6100 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6101 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6108 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6109 if (bf_get(lpfc_mbox_hdr_status,
6110 &dealloc_rsrc->header.cfg_shdr.response)) {
6111 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6112 "2919 Failed to release resource extents "
6113 "for type %d - Status 0x%x Add'l Status 0x%x. "
6114 "Resource memory not released.\n",
6116 bf_get(lpfc_mbox_hdr_status,
6117 &dealloc_rsrc->header.cfg_shdr.response),
6118 bf_get(lpfc_mbox_hdr_add_status,
6119 &dealloc_rsrc->header.cfg_shdr.response));
6124 /* Release kernel memory resources for the specific type. */
6126 case LPFC_RSC_TYPE_FCOE_VPI:
6127 kfree(phba->vpi_bmask);
6128 kfree(phba->vpi_ids);
6129 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6130 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6131 &phba->lpfc_vpi_blk_list, list) {
6132 list_del_init(&rsrc_blk->list);
6135 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6137 case LPFC_RSC_TYPE_FCOE_XRI:
6138 kfree(phba->sli4_hba.xri_bmask);
6139 kfree(phba->sli4_hba.xri_ids);
6140 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6141 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6142 list_del_init(&rsrc_blk->list);
6146 case LPFC_RSC_TYPE_FCOE_VFI:
6147 kfree(phba->sli4_hba.vfi_bmask);
6148 kfree(phba->sli4_hba.vfi_ids);
6149 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6150 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6151 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6152 list_del_init(&rsrc_blk->list);
6156 case LPFC_RSC_TYPE_FCOE_RPI:
6157 /* RPI bitmask and physical id array are cleaned up earlier. */
6158 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6159 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6160 list_del_init(&rsrc_blk->list);
6168 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6171 mempool_free(mbox, phba->mbox_mem_pool);
6176 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6181 len = sizeof(struct lpfc_mbx_set_feature) -
6182 sizeof(struct lpfc_sli4_cfg_mhdr);
6183 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6184 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6185 LPFC_SLI4_MBX_EMBED);
6188 case LPFC_SET_UE_RECOVERY:
6189 bf_set(lpfc_mbx_set_feature_UER,
6190 &mbox->u.mqe.un.set_feature, 1);
6191 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6192 mbox->u.mqe.un.set_feature.param_len = 8;
6194 case LPFC_SET_MDS_DIAGS:
6195 bf_set(lpfc_mbx_set_feature_mds,
6196 &mbox->u.mqe.un.set_feature, 1);
6197 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6198 &mbox->u.mqe.un.set_feature, 1);
6199 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6200 mbox->u.mqe.un.set_feature.param_len = 8;
6208 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6209 * @phba: Pointer to HBA context object.
6211 * Disable FW logging into host memory on the adapter. To
6212 * be done before reading logs from the host memory.
6215 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6217 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6219 ras_fwlog->ras_active = false;
6221 /* Disable FW logging to host memory */
6222 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6223 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6227 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6228 * @phba: Pointer to HBA context object.
6230 * This function is called to free memory allocated for RAS FW logging
6231 * support in the driver.
6234 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6236 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6237 struct lpfc_dmabuf *dmabuf, *next;
6239 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6240 list_for_each_entry_safe(dmabuf, next,
6241 &ras_fwlog->fwlog_buff_list,
6243 list_del(&dmabuf->list);
6244 dma_free_coherent(&phba->pcidev->dev,
6245 LPFC_RAS_MAX_ENTRY_SIZE,
6246 dmabuf->virt, dmabuf->phys);
6251 if (ras_fwlog->lwpd.virt) {
6252 dma_free_coherent(&phba->pcidev->dev,
6253 sizeof(uint32_t) * 2,
6254 ras_fwlog->lwpd.virt,
6255 ras_fwlog->lwpd.phys);
6256 ras_fwlog->lwpd.virt = NULL;
6259 ras_fwlog->ras_active = false;
6263 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6264 * @phba: Pointer to HBA context object.
6265 * @fwlog_buff_count: Count of buffers to be created.
6267 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6268 * to update FW log is posted to the adapter.
6269 * Buffer count is calculated based on module param ras_fwlog_buffsize
6270 * Size of each buffer posted to FW is 64K.
6274 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6275 uint32_t fwlog_buff_count)
6277 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6278 struct lpfc_dmabuf *dmabuf;
6281 /* Initialize List */
6282 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6284 /* Allocate memory for the LWPD */
6285 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6286 sizeof(uint32_t) * 2,
6287 &ras_fwlog->lwpd.phys,
6289 if (!ras_fwlog->lwpd.virt) {
6290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6291 "6185 LWPD Memory Alloc Failed\n");
6296 ras_fwlog->fw_buffcount = fwlog_buff_count;
6297 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6298 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6302 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6303 "6186 Memory Alloc failed FW logging");
6307 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6308 LPFC_RAS_MAX_ENTRY_SIZE,
6309 &dmabuf->phys, GFP_KERNEL);
6310 if (!dmabuf->virt) {
6313 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6314 "6187 DMA Alloc Failed FW logging");
6317 dmabuf->buffer_tag = i;
6318 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6323 lpfc_sli4_ras_dma_free(phba);
6329 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6330 * @phba: pointer to lpfc hba data structure.
6331 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6333 * Completion handler for driver's RAS MBX command to the device.
6336 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6339 union lpfc_sli4_cfg_shdr *shdr;
6340 uint32_t shdr_status, shdr_add_status;
6341 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6345 shdr = (union lpfc_sli4_cfg_shdr *)
6346 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6347 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6348 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6350 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6351 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6352 "6188 FW LOG mailbox "
6353 "completed with status x%x add_status x%x,"
6354 " mbx status x%x\n",
6355 shdr_status, shdr_add_status, mb->mbxStatus);
6357 ras_fwlog->ras_hwsupport = false;
6361 ras_fwlog->ras_active = true;
6362 mempool_free(pmb, phba->mbox_mem_pool);
6367 /* Free RAS DMA memory */
6368 lpfc_sli4_ras_dma_free(phba);
6369 mempool_free(pmb, phba->mbox_mem_pool);
6373 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6374 * @phba: pointer to lpfc hba data structure.
6375 * @fwlog_level: Logging verbosity level.
6376 * @fwlog_enable: Enable/Disable logging.
6378 * Initialize memory and post mailbox command to enable FW logging in host
6382 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6383 uint32_t fwlog_level,
6384 uint32_t fwlog_enable)
6386 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6387 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6388 struct lpfc_dmabuf *dmabuf;
6390 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6393 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6394 phba->cfg_ras_fwlog_buffsize);
6395 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6398 * If re-enabling FW logging support use earlier allocated
6399 * DMA buffers while posting MBX command.
6401 if (!ras_fwlog->lwpd.virt) {
6402 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6404 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6405 "6189 FW Log Memory Allocation Failed");
6410 /* Setup Mailbox command */
6411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6414 "6190 RAS MBX Alloc Failed");
6419 ras_fwlog->fw_loglevel = fwlog_level;
6420 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6421 sizeof(struct lpfc_sli4_cfg_mhdr));
6423 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6424 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6425 len, LPFC_SLI4_MBX_EMBED);
6427 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6428 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6430 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6431 ras_fwlog->fw_loglevel);
6432 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6433 ras_fwlog->fw_buffcount);
6434 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6435 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6437 /* Update DMA buffer address */
6438 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6439 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6441 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6442 putPaddrLow(dmabuf->phys);
6444 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6445 putPaddrHigh(dmabuf->phys);
6448 /* Update LPWD address */
6449 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6450 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6452 mbox->vport = phba->pport;
6453 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6457 if (rc == MBX_NOT_FINISHED) {
6458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6459 "6191 FW-Log Mailbox failed. "
6460 "status %d mbxStatus : x%x", rc,
6461 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6462 mempool_free(mbox, phba->mbox_mem_pool);
6469 lpfc_sli4_ras_dma_free(phba);
6475 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6476 * @phba: Pointer to HBA context object.
6478 * Check if RAS is supported on the adapter and initialize it.
6481 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6483 /* Check RAS FW Log needs to be enabled or not */
6484 if (lpfc_check_fwlog_support(phba))
6487 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6488 LPFC_RAS_ENABLE_LOGGING);
6492 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6493 * @phba: Pointer to HBA context object.
6495 * This function allocates all SLI4 resource identifiers.
6498 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6500 int i, rc, error = 0;
6501 uint16_t count, base;
6502 unsigned long longs;
6504 if (!phba->sli4_hba.rpi_hdrs_in_use)
6505 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6506 if (phba->sli4_hba.extents_in_use) {
6508 * The port supports resource extents. The XRI, VPI, VFI, RPI
6509 * resource extent count must be read and allocated before
6510 * provisioning the resource id arrays.
6512 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6513 LPFC_IDX_RSRC_RDY) {
6515 * Extent-based resources are set - the driver could
6516 * be in a port reset. Figure out if any corrective
6517 * actions need to be taken.
6519 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6520 LPFC_RSC_TYPE_FCOE_VFI);
6523 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6524 LPFC_RSC_TYPE_FCOE_VPI);
6527 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6528 LPFC_RSC_TYPE_FCOE_XRI);
6531 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6532 LPFC_RSC_TYPE_FCOE_RPI);
6537 * It's possible that the number of resources
6538 * provided to this port instance changed between
6539 * resets. Detect this condition and reallocate
6540 * resources. Otherwise, there is no action.
6543 lpfc_printf_log(phba, KERN_INFO,
6544 LOG_MBOX | LOG_INIT,
6545 "2931 Detected extent resource "
6546 "change. Reallocating all "
6548 rc = lpfc_sli4_dealloc_extent(phba,
6549 LPFC_RSC_TYPE_FCOE_VFI);
6550 rc = lpfc_sli4_dealloc_extent(phba,
6551 LPFC_RSC_TYPE_FCOE_VPI);
6552 rc = lpfc_sli4_dealloc_extent(phba,
6553 LPFC_RSC_TYPE_FCOE_XRI);
6554 rc = lpfc_sli4_dealloc_extent(phba,
6555 LPFC_RSC_TYPE_FCOE_RPI);
6560 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6564 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6568 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6572 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6575 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6580 * The port does not support resource extents. The XRI, VPI,
6581 * VFI, RPI resource ids were determined from READ_CONFIG.
6582 * Just allocate the bitmasks and provision the resource id
6583 * arrays. If a port reset is active, the resources don't
6584 * need any action - just exit.
6586 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6587 LPFC_IDX_RSRC_RDY) {
6588 lpfc_sli4_dealloc_resource_identifiers(phba);
6589 lpfc_sli4_remove_rpis(phba);
6592 count = phba->sli4_hba.max_cfg_param.max_rpi;
6594 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6595 "3279 Invalid provisioning of "
6600 base = phba->sli4_hba.max_cfg_param.rpi_base;
6601 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6602 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6603 sizeof(unsigned long),
6605 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6609 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6611 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6613 goto free_rpi_bmask;
6616 for (i = 0; i < count; i++)
6617 phba->sli4_hba.rpi_ids[i] = base + i;
6620 count = phba->sli4_hba.max_cfg_param.max_vpi;
6622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6623 "3280 Invalid provisioning of "
6628 base = phba->sli4_hba.max_cfg_param.vpi_base;
6629 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6630 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6632 if (unlikely(!phba->vpi_bmask)) {
6636 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6638 if (unlikely(!phba->vpi_ids)) {
6640 goto free_vpi_bmask;
6643 for (i = 0; i < count; i++)
6644 phba->vpi_ids[i] = base + i;
6647 count = phba->sli4_hba.max_cfg_param.max_xri;
6649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6650 "3281 Invalid provisioning of "
6655 base = phba->sli4_hba.max_cfg_param.xri_base;
6656 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6657 phba->sli4_hba.xri_bmask = kcalloc(longs,
6658 sizeof(unsigned long),
6660 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6664 phba->sli4_hba.max_cfg_param.xri_used = 0;
6665 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6667 if (unlikely(!phba->sli4_hba.xri_ids)) {
6669 goto free_xri_bmask;
6672 for (i = 0; i < count; i++)
6673 phba->sli4_hba.xri_ids[i] = base + i;
6676 count = phba->sli4_hba.max_cfg_param.max_vfi;
6678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6679 "3282 Invalid provisioning of "
6684 base = phba->sli4_hba.max_cfg_param.vfi_base;
6685 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6686 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6687 sizeof(unsigned long),
6689 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6693 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6695 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6697 goto free_vfi_bmask;
6700 for (i = 0; i < count; i++)
6701 phba->sli4_hba.vfi_ids[i] = base + i;
6704 * Mark all resources ready. An HBA reset doesn't need
6705 * to reset the initialization.
6707 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6713 kfree(phba->sli4_hba.vfi_bmask);
6714 phba->sli4_hba.vfi_bmask = NULL;
6716 kfree(phba->sli4_hba.xri_ids);
6717 phba->sli4_hba.xri_ids = NULL;
6719 kfree(phba->sli4_hba.xri_bmask);
6720 phba->sli4_hba.xri_bmask = NULL;
6722 kfree(phba->vpi_ids);
6723 phba->vpi_ids = NULL;
6725 kfree(phba->vpi_bmask);
6726 phba->vpi_bmask = NULL;
6728 kfree(phba->sli4_hba.rpi_ids);
6729 phba->sli4_hba.rpi_ids = NULL;
6731 kfree(phba->sli4_hba.rpi_bmask);
6732 phba->sli4_hba.rpi_bmask = NULL;
6738 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6739 * @phba: Pointer to HBA context object.
6741 * This function allocates the number of elements for the specified
6745 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6747 if (phba->sli4_hba.extents_in_use) {
6748 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6749 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6750 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6751 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6753 kfree(phba->vpi_bmask);
6754 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6755 kfree(phba->vpi_ids);
6756 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6757 kfree(phba->sli4_hba.xri_bmask);
6758 kfree(phba->sli4_hba.xri_ids);
6759 kfree(phba->sli4_hba.vfi_bmask);
6760 kfree(phba->sli4_hba.vfi_ids);
6761 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6762 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6769 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6770 * @phba: Pointer to HBA context object.
6771 * @type: The resource extent type.
6772 * @extnt_count: buffer to hold port extent count response
6773 * @extnt_size: buffer to hold port extent size response.
6775 * This function calls the port to read the host allocated extents
6776 * for a particular type.
6779 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6780 uint16_t *extnt_cnt, uint16_t *extnt_size)
6784 uint16_t curr_blks = 0;
6785 uint32_t req_len, emb_len;
6786 uint32_t alloc_len, mbox_tmo;
6787 struct list_head *blk_list_head;
6788 struct lpfc_rsrc_blks *rsrc_blk;
6790 void *virtaddr = NULL;
6791 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6792 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6793 union lpfc_sli4_cfg_shdr *shdr;
6796 case LPFC_RSC_TYPE_FCOE_VPI:
6797 blk_list_head = &phba->lpfc_vpi_blk_list;
6799 case LPFC_RSC_TYPE_FCOE_XRI:
6800 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6802 case LPFC_RSC_TYPE_FCOE_VFI:
6803 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6805 case LPFC_RSC_TYPE_FCOE_RPI:
6806 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6812 /* Count the number of extents currently allocatd for this type. */
6813 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6814 if (curr_blks == 0) {
6816 * The GET_ALLOCATED mailbox does not return the size,
6817 * just the count. The size should be just the size
6818 * stored in the current allocated block and all sizes
6819 * for an extent type are the same so set the return
6822 *extnt_size = rsrc_blk->rsrc_size;
6828 * Calculate the size of an embedded mailbox. The uint32_t
6829 * accounts for extents-specific word.
6831 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6835 * Presume the allocation and response will fit into an embedded
6836 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6838 emb = LPFC_SLI4_MBX_EMBED;
6840 if (req_len > emb_len) {
6841 req_len = curr_blks * sizeof(uint16_t) +
6842 sizeof(union lpfc_sli4_cfg_shdr) +
6844 emb = LPFC_SLI4_MBX_NEMBED;
6847 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6850 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6852 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6853 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6855 if (alloc_len < req_len) {
6856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6857 "2983 Allocated DMA memory size (x%x) is "
6858 "less than the requested DMA memory "
6859 "size (x%x)\n", alloc_len, req_len);
6863 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6869 if (!phba->sli4_hba.intr_enable)
6870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6872 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6882 * Figure out where the response is located. Then get local pointers
6883 * to the response data. The port does not guarantee to respond to
6884 * all extents counts request so update the local variable with the
6885 * allocated count from the port.
6887 if (emb == LPFC_SLI4_MBX_EMBED) {
6888 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6889 shdr = &rsrc_ext->header.cfg_shdr;
6890 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6892 virtaddr = mbox->sge_array->addr[0];
6893 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6894 shdr = &n_rsrc->cfg_shdr;
6895 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6898 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6899 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6900 "2984 Failed to read allocated resources "
6901 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6903 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6904 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6909 lpfc_sli4_mbox_cmd_free(phba, mbox);
6914 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6915 * @phba: pointer to lpfc hba data structure.
6916 * @pring: Pointer to driver SLI ring object.
6917 * @sgl_list: linked link of sgl buffers to post
6918 * @cnt: number of linked list buffers
6920 * This routine walks the list of buffers that have been allocated and
6921 * repost them to the port by using SGL block post. This is needed after a
6922 * pci_function_reset/warm_start or start. It attempts to construct blocks
6923 * of buffer sgls which contains contiguous xris and uses the non-embedded
6924 * SGL block post mailbox commands to post them to the port. For single
6925 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6926 * mailbox command for posting.
6928 * Returns: 0 = success, non-zero failure.
6931 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6932 struct list_head *sgl_list, int cnt)
6934 struct lpfc_sglq *sglq_entry = NULL;
6935 struct lpfc_sglq *sglq_entry_next = NULL;
6936 struct lpfc_sglq *sglq_entry_first = NULL;
6937 int status, total_cnt;
6938 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6939 int last_xritag = NO_XRI;
6940 LIST_HEAD(prep_sgl_list);
6941 LIST_HEAD(blck_sgl_list);
6942 LIST_HEAD(allc_sgl_list);
6943 LIST_HEAD(post_sgl_list);
6944 LIST_HEAD(free_sgl_list);
6946 spin_lock_irq(&phba->hbalock);
6947 spin_lock(&phba->sli4_hba.sgl_list_lock);
6948 list_splice_init(sgl_list, &allc_sgl_list);
6949 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6950 spin_unlock_irq(&phba->hbalock);
6953 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6954 &allc_sgl_list, list) {
6955 list_del_init(&sglq_entry->list);
6957 if ((last_xritag != NO_XRI) &&
6958 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6959 /* a hole in xri block, form a sgl posting block */
6960 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6961 post_cnt = block_cnt - 1;
6962 /* prepare list for next posting block */
6963 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6966 /* prepare list for next posting block */
6967 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6968 /* enough sgls for non-embed sgl mbox command */
6969 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6970 list_splice_init(&prep_sgl_list,
6972 post_cnt = block_cnt;
6978 /* keep track of last sgl's xritag */
6979 last_xritag = sglq_entry->sli4_xritag;
6981 /* end of repost sgl list condition for buffers */
6982 if (num_posted == total_cnt) {
6983 if (post_cnt == 0) {
6984 list_splice_init(&prep_sgl_list,
6986 post_cnt = block_cnt;
6987 } else if (block_cnt == 1) {
6988 status = lpfc_sli4_post_sgl(phba,
6989 sglq_entry->phys, 0,
6990 sglq_entry->sli4_xritag);
6992 /* successful, put sgl to posted list */
6993 list_add_tail(&sglq_entry->list,
6996 /* Failure, put sgl to free list */
6997 lpfc_printf_log(phba, KERN_WARNING,
6999 "3159 Failed to post "
7000 "sgl, xritag:x%x\n",
7001 sglq_entry->sli4_xritag);
7002 list_add_tail(&sglq_entry->list,
7009 /* continue until a nembed page worth of sgls */
7013 /* post the buffer list sgls as a block */
7014 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7018 /* success, put sgl list to posted sgl list */
7019 list_splice_init(&blck_sgl_list, &post_sgl_list);
7021 /* Failure, put sgl list to free sgl list */
7022 sglq_entry_first = list_first_entry(&blck_sgl_list,
7025 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7026 "3160 Failed to post sgl-list, "
7028 sglq_entry_first->sli4_xritag,
7029 (sglq_entry_first->sli4_xritag +
7031 list_splice_init(&blck_sgl_list, &free_sgl_list);
7032 total_cnt -= post_cnt;
7035 /* don't reset xirtag due to hole in xri block */
7037 last_xritag = NO_XRI;
7039 /* reset sgl post count for next round of posting */
7043 /* free the sgls failed to post */
7044 lpfc_free_sgl_list(phba, &free_sgl_list);
7046 /* push sgls posted to the available list */
7047 if (!list_empty(&post_sgl_list)) {
7048 spin_lock_irq(&phba->hbalock);
7049 spin_lock(&phba->sli4_hba.sgl_list_lock);
7050 list_splice_init(&post_sgl_list, sgl_list);
7051 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7052 spin_unlock_irq(&phba->hbalock);
7054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7055 "3161 Failure to post sgl to port.\n");
7059 /* return the number of XRIs actually posted */
7064 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7065 * @phba: pointer to lpfc hba data structure.
7067 * This routine walks the list of nvme buffers that have been allocated and
7068 * repost them to the port by using SGL block post. This is needed after a
7069 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7070 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7071 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7073 * Returns: 0 = success, non-zero failure.
7076 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7078 LIST_HEAD(post_nblist);
7079 int num_posted, rc = 0;
7081 /* get all NVME buffers need to repost to a local list */
7082 lpfc_io_buf_flush(phba, &post_nblist);
7084 /* post the list of nvme buffer sgls to port if available */
7085 if (!list_empty(&post_nblist)) {
7086 num_posted = lpfc_sli4_post_io_sgl_list(
7087 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7088 /* failed to post any nvme buffer, return error */
7089 if (num_posted == 0)
7096 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7100 len = sizeof(struct lpfc_mbx_set_host_data) -
7101 sizeof(struct lpfc_sli4_cfg_mhdr);
7102 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7103 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7104 LPFC_SLI4_MBX_EMBED);
7106 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7107 mbox->u.mqe.un.set_host_data.param_len =
7108 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7109 snprintf(mbox->u.mqe.un.set_host_data.data,
7110 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7111 "Linux %s v"LPFC_DRIVER_VERSION,
7112 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7116 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7117 struct lpfc_queue *drq, int count, int idx)
7120 struct lpfc_rqe hrqe;
7121 struct lpfc_rqe drqe;
7122 struct lpfc_rqb *rqbp;
7123 unsigned long flags;
7124 struct rqb_dmabuf *rqb_buffer;
7125 LIST_HEAD(rqb_buf_list);
7127 spin_lock_irqsave(&phba->hbalock, flags);
7129 for (i = 0; i < count; i++) {
7130 /* IF RQ is already full, don't bother */
7131 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7133 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7136 rqb_buffer->hrq = hrq;
7137 rqb_buffer->drq = drq;
7138 rqb_buffer->idx = idx;
7139 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7141 while (!list_empty(&rqb_buf_list)) {
7142 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7145 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7146 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7147 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7148 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7149 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7152 "6421 Cannot post to HRQ %d: %x %x %x "
7160 rqbp->rqb_free_buffer(phba, rqb_buffer);
7162 list_add_tail(&rqb_buffer->hbuf.list,
7163 &rqbp->rqb_buffer_list);
7164 rqbp->buffer_count++;
7167 spin_unlock_irqrestore(&phba->hbalock, flags);
7172 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7173 * @phba: Pointer to HBA context object.
7175 * This function is the main SLI4 device initialization PCI function. This
7176 * function is called by the HBA initialization code, HBA reset code and
7177 * HBA error attention handler code. Caller is not required to hold any
7181 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7183 int rc, i, cnt, len;
7184 LPFC_MBOXQ_t *mboxq;
7185 struct lpfc_mqe *mqe;
7188 uint32_t ftr_rsp = 0;
7189 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7190 struct lpfc_vport *vport = phba->pport;
7191 struct lpfc_dmabuf *mp;
7192 struct lpfc_rqb *rqbp;
7194 /* Perform a PCI function reset to start from clean */
7195 rc = lpfc_pci_function_reset(phba);
7199 /* Check the HBA Host Status Register for readyness */
7200 rc = lpfc_sli4_post_status_check(phba);
7204 spin_lock_irq(&phba->hbalock);
7205 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7206 spin_unlock_irq(&phba->hbalock);
7210 * Allocate a single mailbox container for initializing the
7213 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7217 /* Issue READ_REV to collect vpd and FW information. */
7218 vpd_size = SLI4_PAGE_SIZE;
7219 vpd = kzalloc(vpd_size, GFP_KERNEL);
7225 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7231 mqe = &mboxq->u.mqe;
7232 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7233 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7234 phba->hba_flag |= HBA_FCOE_MODE;
7235 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7237 phba->hba_flag &= ~HBA_FCOE_MODE;
7240 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7242 phba->hba_flag |= HBA_FIP_SUPPORT;
7244 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7246 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7248 if (phba->sli_rev != LPFC_SLI_REV4) {
7249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7250 "0376 READ_REV Error. SLI Level %d "
7251 "FCoE enabled %d\n",
7252 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7259 * Continue initialization with default values even if driver failed
7260 * to read FCoE param config regions, only read parameters if the
7263 if (phba->hba_flag & HBA_FCOE_MODE &&
7264 lpfc_sli4_read_fcoe_params(phba))
7265 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7266 "2570 Failed to read FCoE parameters\n");
7269 * Retrieve sli4 device physical port name, failure of doing it
7270 * is considered as non-fatal.
7272 rc = lpfc_sli4_retrieve_pport_name(phba);
7274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7275 "3080 Successful retrieving SLI4 device "
7276 "physical port name: %s.\n", phba->Port);
7278 rc = lpfc_sli4_get_ctl_attr(phba);
7280 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7281 "8351 Successful retrieving SLI4 device "
7285 * Evaluate the read rev and vpd data. Populate the driver
7286 * state with the results. If this routine fails, the failure
7287 * is not fatal as the driver will use generic values.
7289 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7290 if (unlikely(!rc)) {
7291 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7292 "0377 Error %d parsing vpd. "
7293 "Using defaults.\n", rc);
7298 /* Save information as VPD data */
7299 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7300 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7303 * This is because first G7 ASIC doesn't support the standard
7304 * 0x5a NVME cmd descriptor type/subtype
7306 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7307 LPFC_SLI_INTF_IF_TYPE_6) &&
7308 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7309 (phba->vpd.rev.smRev == 0) &&
7310 (phba->cfg_nvme_embed_cmd == 1))
7311 phba->cfg_nvme_embed_cmd = 0;
7313 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7314 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7316 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7318 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7320 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7322 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7323 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7324 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7325 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7326 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7327 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7328 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7329 "(%d):0380 READ_REV Status x%x "
7330 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7331 mboxq->vport ? mboxq->vport->vpi : 0,
7332 bf_get(lpfc_mqe_status, mqe),
7333 phba->vpd.rev.opFwName,
7334 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7335 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7337 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7338 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7339 if (phba->pport->cfg_lun_queue_depth > rc) {
7340 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7341 "3362 LUN queue depth changed from %d to %d\n",
7342 phba->pport->cfg_lun_queue_depth, rc);
7343 phba->pport->cfg_lun_queue_depth = rc;
7346 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7347 LPFC_SLI_INTF_IF_TYPE_0) {
7348 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7350 if (rc == MBX_SUCCESS) {
7351 phba->hba_flag |= HBA_RECOVERABLE_UE;
7352 /* Set 1Sec interval to detect UE */
7353 phba->eratt_poll_interval = 1;
7354 phba->sli4_hba.ue_to_sr = bf_get(
7355 lpfc_mbx_set_feature_UESR,
7356 &mboxq->u.mqe.un.set_feature);
7357 phba->sli4_hba.ue_to_rp = bf_get(
7358 lpfc_mbx_set_feature_UERP,
7359 &mboxq->u.mqe.un.set_feature);
7363 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7364 /* Enable MDS Diagnostics only if the SLI Port supports it */
7365 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7367 if (rc != MBX_SUCCESS)
7368 phba->mds_diags_support = 0;
7372 * Discover the port's supported feature set and match it against the
7375 lpfc_request_features(phba, mboxq);
7376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7383 * The port must support FCP initiator mode as this is the
7384 * only mode running in the host.
7386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7388 "0378 No support for fcpi mode.\n");
7392 /* Performance Hints are ONLY for FCoE */
7393 if (phba->hba_flag & HBA_FCOE_MODE) {
7394 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7395 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7397 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7401 * If the port cannot support the host's requested features
7402 * then turn off the global config parameters to disable the
7403 * feature in the driver. This is not a fatal error.
7405 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7406 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7407 phba->cfg_enable_bg = 0;
7408 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7413 if (phba->max_vpi && phba->cfg_enable_npiv &&
7414 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7418 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7419 "0379 Feature Mismatch Data: x%08x %08x "
7420 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7421 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7422 phba->cfg_enable_npiv, phba->max_vpi);
7423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7424 phba->cfg_enable_bg = 0;
7425 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7426 phba->cfg_enable_npiv = 0;
7429 /* These SLI3 features are assumed in SLI4 */
7430 spin_lock_irq(&phba->hbalock);
7431 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7432 spin_unlock_irq(&phba->hbalock);
7435 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7436 * calls depends on these resources to complete port setup.
7438 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7441 "2920 Failed to alloc Resource IDs "
7446 lpfc_set_host_data(phba, mboxq);
7448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7450 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7451 "2134 Failed to set host os driver version %x",
7455 /* Read the port's service parameters. */
7456 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7458 phba->link_state = LPFC_HBA_ERROR;
7463 mboxq->vport = vport;
7464 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7465 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7466 if (rc == MBX_SUCCESS) {
7467 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7472 * This memory was allocated by the lpfc_read_sparam routine. Release
7473 * it to the mbuf pool.
7475 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7477 mboxq->ctx_buf = NULL;
7479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7480 "0382 READ_SPARAM command failed "
7481 "status %d, mbxStatus x%x\n",
7482 rc, bf_get(lpfc_mqe_status, mqe));
7483 phba->link_state = LPFC_HBA_ERROR;
7488 lpfc_update_vport_wwn(vport);
7490 /* Update the fc_host data structures with new wwn. */
7491 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7492 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7494 /* Create all the SLI4 queues */
7495 rc = lpfc_sli4_queue_create(phba);
7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7498 "3089 Failed to allocate queues\n");
7502 /* Set up all the queues to the device */
7503 rc = lpfc_sli4_queue_setup(phba);
7505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7506 "0381 Error %d during queue setup.\n ", rc);
7507 goto out_stop_timers;
7509 /* Initialize the driver internal SLI layer lists. */
7510 lpfc_sli4_setup(phba);
7511 lpfc_sli4_queue_init(phba);
7513 /* update host els xri-sgl sizes and mappings */
7514 rc = lpfc_sli4_els_sgl_update(phba);
7516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7517 "1400 Failed to update xri-sgl size and "
7518 "mapping: %d\n", rc);
7519 goto out_destroy_queue;
7522 /* register the els sgl pool to the port */
7523 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7524 phba->sli4_hba.els_xri_cnt);
7525 if (unlikely(rc < 0)) {
7526 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7527 "0582 Error %d during els sgl post "
7530 goto out_destroy_queue;
7532 phba->sli4_hba.els_xri_cnt = rc;
7534 if (phba->nvmet_support) {
7535 /* update host nvmet xri-sgl sizes and mappings */
7536 rc = lpfc_sli4_nvmet_sgl_update(phba);
7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7539 "6308 Failed to update nvmet-sgl size "
7540 "and mapping: %d\n", rc);
7541 goto out_destroy_queue;
7544 /* register the nvmet sgl pool to the port */
7545 rc = lpfc_sli4_repost_sgl_list(
7547 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7548 phba->sli4_hba.nvmet_xri_cnt);
7549 if (unlikely(rc < 0)) {
7550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7551 "3117 Error %d during nvmet "
7554 goto out_destroy_queue;
7556 phba->sli4_hba.nvmet_xri_cnt = rc;
7558 cnt = phba->cfg_iocb_cnt * 1024;
7559 /* We need 1 iocbq for every SGL, for IO processing */
7560 cnt += phba->sli4_hba.nvmet_xri_cnt;
7562 /* update host common xri-sgl sizes and mappings */
7563 rc = lpfc_sli4_io_sgl_update(phba);
7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7566 "6082 Failed to update nvme-sgl size "
7567 "and mapping: %d\n", rc);
7568 goto out_destroy_queue;
7571 /* register the allocated common sgl pool to the port */
7572 rc = lpfc_sli4_repost_io_sgl_list(phba);
7574 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7575 "6116 Error %d during nvme sgl post "
7577 /* Some NVME buffers were moved to abort nvme list */
7578 /* A pci function reset will repost them */
7580 goto out_destroy_queue;
7582 cnt = phba->cfg_iocb_cnt * 1024;
7585 if (!phba->sli.iocbq_lookup) {
7586 /* Initialize and populate the iocb list per host */
7587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7588 "2821 initialize iocb list %d total %d\n",
7589 phba->cfg_iocb_cnt, cnt);
7590 rc = lpfc_init_iocb_list(phba, cnt);
7592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7593 "1413 Failed to init iocb list.\n");
7594 goto out_destroy_queue;
7598 if (phba->nvmet_support)
7599 lpfc_nvmet_create_targetport(phba);
7601 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7602 /* Post initial buffers to all RQs created */
7603 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7604 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7605 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7606 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7607 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7608 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7609 rqbp->buffer_count = 0;
7611 lpfc_post_rq_buffer(
7612 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7613 phba->sli4_hba.nvmet_mrq_data[i],
7614 phba->cfg_nvmet_mrq_post, i);
7618 /* Post the rpi header region to the device. */
7619 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7621 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7622 "0393 Error %d during rpi post operation\n",
7625 goto out_destroy_queue;
7627 lpfc_sli4_node_prep(phba);
7629 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7630 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7632 * The FC Port needs to register FCFI (index 0)
7634 lpfc_reg_fcfi(phba, mboxq);
7635 mboxq->vport = phba->pport;
7636 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7637 if (rc != MBX_SUCCESS)
7638 goto out_unset_queue;
7640 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7641 &mboxq->u.mqe.un.reg_fcfi);
7643 /* We are a NVME Target mode with MRQ > 1 */
7645 /* First register the FCFI */
7646 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7647 mboxq->vport = phba->pport;
7648 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7649 if (rc != MBX_SUCCESS)
7650 goto out_unset_queue;
7652 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7653 &mboxq->u.mqe.un.reg_fcfi_mrq);
7655 /* Next register the MRQs */
7656 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7657 mboxq->vport = phba->pport;
7658 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7659 if (rc != MBX_SUCCESS)
7660 goto out_unset_queue;
7663 /* Check if the port is configured to be disabled */
7664 lpfc_sli_read_link_ste(phba);
7667 /* Don't post more new bufs if repost already recovered
7670 if (phba->nvmet_support == 0) {
7671 if (phba->sli4_hba.io_xri_cnt == 0) {
7672 len = lpfc_new_io_buf(
7673 phba, phba->sli4_hba.io_xri_max);
7676 goto out_unset_queue;
7679 if (phba->cfg_xri_rebalancing)
7680 lpfc_create_multixri_pools(phba);
7683 phba->cfg_xri_rebalancing = 0;
7686 /* Allow asynchronous mailbox command to go through */
7687 spin_lock_irq(&phba->hbalock);
7688 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7689 spin_unlock_irq(&phba->hbalock);
7691 /* Post receive buffers to the device */
7692 lpfc_sli4_rb_setup(phba);
7694 /* Reset HBA FCF states after HBA reset */
7695 phba->fcf.fcf_flag = 0;
7696 phba->fcf.current_rec.flag = 0;
7698 /* Start the ELS watchdog timer */
7699 mod_timer(&vport->els_tmofunc,
7700 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7702 /* Start heart beat timer */
7703 mod_timer(&phba->hb_tmofunc,
7704 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7705 phba->hb_outstanding = 0;
7706 phba->last_completion_time = jiffies;
7708 /* start eq_delay heartbeat */
7709 if (phba->cfg_auto_imax)
7710 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7711 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7713 /* Start error attention (ERATT) polling timer */
7714 mod_timer(&phba->eratt_poll,
7715 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7717 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7718 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7719 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7722 "2829 This device supports "
7723 "Advanced Error Reporting (AER)\n");
7724 spin_lock_irq(&phba->hbalock);
7725 phba->hba_flag |= HBA_AER_ENABLED;
7726 spin_unlock_irq(&phba->hbalock);
7728 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7729 "2830 This device does not support "
7730 "Advanced Error Reporting (AER)\n");
7731 phba->cfg_aer_support = 0;
7737 * The port is ready, set the host's link state to LINK_DOWN
7738 * in preparation for link interrupts.
7740 spin_lock_irq(&phba->hbalock);
7741 phba->link_state = LPFC_LINK_DOWN;
7743 /* Check if physical ports are trunked */
7744 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7745 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7746 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7747 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7748 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7749 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7750 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7751 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7752 spin_unlock_irq(&phba->hbalock);
7754 /* Arm the CQs and then EQs on device */
7755 lpfc_sli4_arm_cqeq_intr(phba);
7757 /* Indicate device interrupt mode */
7758 phba->sli4_hba.intr_enable = 1;
7760 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7761 (phba->hba_flag & LINK_DISABLED)) {
7762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7763 "3103 Adapter Link is disabled.\n");
7764 lpfc_down_link(phba, mboxq);
7765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7766 if (rc != MBX_SUCCESS) {
7767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7768 "3104 Adapter failed to issue "
7769 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7770 goto out_io_buff_free;
7772 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7773 /* don't perform init_link on SLI4 FC port loopback test */
7774 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7775 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7777 goto out_io_buff_free;
7780 mempool_free(mboxq, phba->mbox_mem_pool);
7783 /* Free allocated IO Buffers */
7786 /* Unset all the queues set up in this routine when error out */
7787 lpfc_sli4_queue_unset(phba);
7789 lpfc_free_iocb_list(phba);
7790 lpfc_sli4_queue_destroy(phba);
7792 lpfc_stop_hba_timers(phba);
7794 mempool_free(mboxq, phba->mbox_mem_pool);
7799 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7800 * @ptr: context object - pointer to hba structure.
7802 * This is the callback function for mailbox timer. The mailbox
7803 * timer is armed when a new mailbox command is issued and the timer
7804 * is deleted when the mailbox complete. The function is called by
7805 * the kernel timer code when a mailbox does not complete within
7806 * expected time. This function wakes up the worker thread to
7807 * process the mailbox timeout and returns. All the processing is
7808 * done by the worker thread function lpfc_mbox_timeout_handler.
7811 lpfc_mbox_timeout(struct timer_list *t)
7813 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7814 unsigned long iflag;
7815 uint32_t tmo_posted;
7817 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7818 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7820 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7821 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7824 lpfc_worker_wake_up(phba);
7829 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7831 * @phba: Pointer to HBA context object.
7833 * This function checks if any mailbox completions are present on the mailbox
7837 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7841 struct lpfc_queue *mcq;
7842 struct lpfc_mcqe *mcqe;
7843 bool pending_completions = false;
7846 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7849 /* Check for completions on mailbox completion queue */
7851 mcq = phba->sli4_hba.mbx_cq;
7852 idx = mcq->hba_index;
7853 qe_valid = mcq->qe_valid;
7854 while (bf_get_le32(lpfc_cqe_valid,
7855 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7856 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7857 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7858 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7859 pending_completions = true;
7862 idx = (idx + 1) % mcq->entry_count;
7863 if (mcq->hba_index == idx)
7866 /* if the index wrapped around, toggle the valid bit */
7867 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7868 qe_valid = (qe_valid) ? 0 : 1;
7870 return pending_completions;
7875 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7877 * @phba: Pointer to HBA context object.
7879 * For sli4, it is possible to miss an interrupt. As such mbox completions
7880 * maybe missed causing erroneous mailbox timeouts to occur. This function
7881 * checks to see if mbox completions are on the mailbox completion queue
7882 * and will process all the completions associated with the eq for the
7883 * mailbox completion queue.
7886 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7888 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7890 struct lpfc_queue *fpeq = NULL;
7891 struct lpfc_queue *eq;
7894 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7897 /* Find the EQ associated with the mbox CQ */
7898 if (sli4_hba->hdwq) {
7899 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7900 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7901 if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7910 /* Turn off interrupts from this EQ */
7912 sli4_hba->sli4_eq_clr_intr(fpeq);
7914 /* Check to see if a mbox completion is pending */
7916 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7919 * If a mbox completion is pending, process all the events on EQ
7920 * associated with the mbox completion queue (this could include
7921 * mailbox commands, async events, els commands, receive queue data
7926 /* process and rearm the EQ */
7927 lpfc_sli4_process_eq(phba, fpeq);
7929 /* Always clear and re-arm the EQ */
7930 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7932 return mbox_pending;
7937 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7938 * @phba: Pointer to HBA context object.
7940 * This function is called from worker thread when a mailbox command times out.
7941 * The caller is not required to hold any locks. This function will reset the
7942 * HBA and recover all the pending commands.
7945 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7947 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7948 MAILBOX_t *mb = NULL;
7950 struct lpfc_sli *psli = &phba->sli;
7952 /* If the mailbox completed, process the completion and return */
7953 if (lpfc_sli4_process_missed_mbox_completions(phba))
7958 /* Check the pmbox pointer first. There is a race condition
7959 * between the mbox timeout handler getting executed in the
7960 * worklist and the mailbox actually completing. When this
7961 * race condition occurs, the mbox_active will be NULL.
7963 spin_lock_irq(&phba->hbalock);
7964 if (pmbox == NULL) {
7965 lpfc_printf_log(phba, KERN_WARNING,
7967 "0353 Active Mailbox cleared - mailbox timeout "
7969 spin_unlock_irq(&phba->hbalock);
7973 /* Mbox cmd <mbxCommand> timeout */
7974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7975 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7977 phba->pport->port_state,
7979 phba->sli.mbox_active);
7980 spin_unlock_irq(&phba->hbalock);
7982 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7983 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7984 * it to fail all outstanding SCSI IO.
7986 spin_lock_irq(&phba->pport->work_port_lock);
7987 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7988 spin_unlock_irq(&phba->pport->work_port_lock);
7989 spin_lock_irq(&phba->hbalock);
7990 phba->link_state = LPFC_LINK_UNKNOWN;
7991 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7992 spin_unlock_irq(&phba->hbalock);
7994 lpfc_sli_abort_fcp_rings(phba);
7996 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7997 "0345 Resetting board due to mailbox timeout\n");
7999 /* Reset the HBA device */
8000 lpfc_reset_hba(phba);
8004 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8005 * @phba: Pointer to HBA context object.
8006 * @pmbox: Pointer to mailbox object.
8007 * @flag: Flag indicating how the mailbox need to be processed.
8009 * This function is called by discovery code and HBA management code
8010 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8011 * function gets the hbalock to protect the data structures.
8012 * The mailbox command can be submitted in polling mode, in which case
8013 * this function will wait in a polling loop for the completion of the
8015 * If the mailbox is submitted in no_wait mode (not polling) the
8016 * function will submit the command and returns immediately without waiting
8017 * for the mailbox completion. The no_wait is supported only when HBA
8018 * is in SLI2/SLI3 mode - interrupts are enabled.
8019 * The SLI interface allows only one mailbox pending at a time. If the
8020 * mailbox is issued in polling mode and there is already a mailbox
8021 * pending, then the function will return an error. If the mailbox is issued
8022 * in NO_WAIT mode and there is a mailbox pending already, the function
8023 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8024 * The sli layer owns the mailbox object until the completion of mailbox
8025 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8026 * return codes the caller owns the mailbox command after the return of
8030 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8034 struct lpfc_sli *psli = &phba->sli;
8035 uint32_t status, evtctr;
8036 uint32_t ha_copy, hc_copy;
8038 unsigned long timeout;
8039 unsigned long drvr_flag = 0;
8040 uint32_t word0, ldata;
8041 void __iomem *to_slim;
8042 int processing_queue = 0;
8044 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8046 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8047 /* processing mbox queue from intr_handler */
8048 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8052 processing_queue = 1;
8053 pmbox = lpfc_mbox_get(phba);
8055 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8060 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8061 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8064 lpfc_printf_log(phba, KERN_ERR,
8065 LOG_MBOX | LOG_VPORT,
8066 "1806 Mbox x%x failed. No vport\n",
8067 pmbox->u.mb.mbxCommand);
8069 goto out_not_finished;
8073 /* If the PCI channel is in offline state, do not post mbox. */
8074 if (unlikely(pci_channel_offline(phba->pcidev))) {
8075 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8076 goto out_not_finished;
8079 /* If HBA has a deferred error attention, fail the iocb. */
8080 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8081 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8082 goto out_not_finished;
8088 status = MBX_SUCCESS;
8090 if (phba->link_state == LPFC_HBA_ERROR) {
8091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8093 /* Mbox command <mbxCommand> cannot issue */
8094 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8095 "(%d):0311 Mailbox command x%x cannot "
8096 "issue Data: x%x x%x\n",
8097 pmbox->vport ? pmbox->vport->vpi : 0,
8098 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8099 goto out_not_finished;
8102 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8103 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8104 !(hc_copy & HC_MBINT_ENA)) {
8105 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8107 "(%d):2528 Mailbox command x%x cannot "
8108 "issue Data: x%x x%x\n",
8109 pmbox->vport ? pmbox->vport->vpi : 0,
8110 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8111 goto out_not_finished;
8115 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8116 /* Polling for a mbox command when another one is already active
8117 * is not allowed in SLI. Also, the driver must have established
8118 * SLI2 mode to queue and process multiple mbox commands.
8121 if (flag & MBX_POLL) {
8122 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8124 /* Mbox command <mbxCommand> cannot issue */
8125 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8126 "(%d):2529 Mailbox command x%x "
8127 "cannot issue Data: x%x x%x\n",
8128 pmbox->vport ? pmbox->vport->vpi : 0,
8129 pmbox->u.mb.mbxCommand,
8130 psli->sli_flag, flag);
8131 goto out_not_finished;
8134 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8135 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8136 /* Mbox command <mbxCommand> cannot issue */
8137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8138 "(%d):2530 Mailbox command x%x "
8139 "cannot issue Data: x%x x%x\n",
8140 pmbox->vport ? pmbox->vport->vpi : 0,
8141 pmbox->u.mb.mbxCommand,
8142 psli->sli_flag, flag);
8143 goto out_not_finished;
8146 /* Another mailbox command is still being processed, queue this
8147 * command to be processed later.
8149 lpfc_mbox_put(phba, pmbox);
8151 /* Mbox cmd issue - BUSY */
8152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8153 "(%d):0308 Mbox cmd issue - BUSY Data: "
8154 "x%x x%x x%x x%x\n",
8155 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8157 phba->pport ? phba->pport->port_state : 0xff,
8158 psli->sli_flag, flag);
8160 psli->slistat.mbox_busy++;
8161 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8164 lpfc_debugfs_disc_trc(pmbox->vport,
8165 LPFC_DISC_TRC_MBOX_VPORT,
8166 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8167 (uint32_t)mbx->mbxCommand,
8168 mbx->un.varWords[0], mbx->un.varWords[1]);
8171 lpfc_debugfs_disc_trc(phba->pport,
8173 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8174 (uint32_t)mbx->mbxCommand,
8175 mbx->un.varWords[0], mbx->un.varWords[1]);
8181 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8183 /* If we are not polling, we MUST be in SLI2 mode */
8184 if (flag != MBX_POLL) {
8185 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8186 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8188 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8189 /* Mbox command <mbxCommand> cannot issue */
8190 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8191 "(%d):2531 Mailbox command x%x "
8192 "cannot issue Data: x%x x%x\n",
8193 pmbox->vport ? pmbox->vport->vpi : 0,
8194 pmbox->u.mb.mbxCommand,
8195 psli->sli_flag, flag);
8196 goto out_not_finished;
8198 /* timeout active mbox command */
8199 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8201 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8204 /* Mailbox cmd <cmd> issue */
8205 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8206 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8208 pmbox->vport ? pmbox->vport->vpi : 0,
8210 phba->pport ? phba->pport->port_state : 0xff,
8211 psli->sli_flag, flag);
8213 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8215 lpfc_debugfs_disc_trc(pmbox->vport,
8216 LPFC_DISC_TRC_MBOX_VPORT,
8217 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8218 (uint32_t)mbx->mbxCommand,
8219 mbx->un.varWords[0], mbx->un.varWords[1]);
8222 lpfc_debugfs_disc_trc(phba->pport,
8224 "MBOX Send: cmd:x%x mb:x%x x%x",
8225 (uint32_t)mbx->mbxCommand,
8226 mbx->un.varWords[0], mbx->un.varWords[1]);
8230 psli->slistat.mbox_cmd++;
8231 evtctr = psli->slistat.mbox_event;
8233 /* next set own bit for the adapter and copy over command word */
8234 mbx->mbxOwner = OWN_CHIP;
8236 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8237 /* Populate mbox extension offset word. */
8238 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8239 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8240 = (uint8_t *)phba->mbox_ext
8241 - (uint8_t *)phba->mbox;
8244 /* Copy the mailbox extension data */
8245 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8246 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8247 (uint8_t *)phba->mbox_ext,
8248 pmbox->in_ext_byte_len);
8250 /* Copy command data to host SLIM area */
8251 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8253 /* Populate mbox extension offset word. */
8254 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8255 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8256 = MAILBOX_HBA_EXT_OFFSET;
8258 /* Copy the mailbox extension data */
8259 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8260 lpfc_memcpy_to_slim(phba->MBslimaddr +
8261 MAILBOX_HBA_EXT_OFFSET,
8262 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8264 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8265 /* copy command data into host mbox for cmpl */
8266 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8269 /* First copy mbox command data to HBA SLIM, skip past first
8271 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8272 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8273 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8275 /* Next copy over first word, with mbxOwner set */
8276 ldata = *((uint32_t *)mbx);
8277 to_slim = phba->MBslimaddr;
8278 writel(ldata, to_slim);
8279 readl(to_slim); /* flush */
8281 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8282 /* switch over to host mailbox */
8283 psli->sli_flag |= LPFC_SLI_ACTIVE;
8290 /* Set up reference to mailbox command */
8291 psli->mbox_active = pmbox;
8292 /* Interrupt board to do it */
8293 writel(CA_MBATT, phba->CAregaddr);
8294 readl(phba->CAregaddr); /* flush */
8295 /* Don't wait for it to finish, just return */
8299 /* Set up null reference to mailbox command */
8300 psli->mbox_active = NULL;
8301 /* Interrupt board to do it */
8302 writel(CA_MBATT, phba->CAregaddr);
8303 readl(phba->CAregaddr); /* flush */
8305 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8306 /* First read mbox status word */
8307 word0 = *((uint32_t *)phba->mbox);
8308 word0 = le32_to_cpu(word0);
8310 /* First read mbox status word */
8311 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8312 spin_unlock_irqrestore(&phba->hbalock,
8314 goto out_not_finished;
8318 /* Read the HBA Host Attention Register */
8319 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8320 spin_unlock_irqrestore(&phba->hbalock,
8322 goto out_not_finished;
8324 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8327 /* Wait for command to complete */
8328 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8329 (!(ha_copy & HA_MBATT) &&
8330 (phba->link_state > LPFC_WARM_START))) {
8331 if (time_after(jiffies, timeout)) {
8332 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8333 spin_unlock_irqrestore(&phba->hbalock,
8335 goto out_not_finished;
8338 /* Check if we took a mbox interrupt while we were
8340 if (((word0 & OWN_CHIP) != OWN_CHIP)
8341 && (evtctr != psli->slistat.mbox_event))
8345 spin_unlock_irqrestore(&phba->hbalock,
8348 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8351 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8352 /* First copy command data */
8353 word0 = *((uint32_t *)phba->mbox);
8354 word0 = le32_to_cpu(word0);
8355 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8358 /* Check real SLIM for any errors */
8359 slimword0 = readl(phba->MBslimaddr);
8360 slimmb = (MAILBOX_t *) & slimword0;
8361 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8362 && slimmb->mbxStatus) {
8369 /* First copy command data */
8370 word0 = readl(phba->MBslimaddr);
8372 /* Read the HBA Host Attention Register */
8373 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8374 spin_unlock_irqrestore(&phba->hbalock,
8376 goto out_not_finished;
8380 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8381 /* copy results back to user */
8382 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8384 /* Copy the mailbox extension data */
8385 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8386 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8388 pmbox->out_ext_byte_len);
8391 /* First copy command data */
8392 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8394 /* Copy the mailbox extension data */
8395 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8396 lpfc_memcpy_from_slim(
8399 MAILBOX_HBA_EXT_OFFSET,
8400 pmbox->out_ext_byte_len);
8404 writel(HA_MBATT, phba->HAregaddr);
8405 readl(phba->HAregaddr); /* flush */
8407 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8408 status = mbx->mbxStatus;
8411 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8415 if (processing_queue) {
8416 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8417 lpfc_mbox_cmpl_put(phba, pmbox);
8419 return MBX_NOT_FINISHED;
8423 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8424 * @phba: Pointer to HBA context object.
8426 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8427 * the driver internal pending mailbox queue. It will then try to wait out the
8428 * possible outstanding mailbox command before return.
8431 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8432 * the outstanding mailbox command timed out.
8435 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8437 struct lpfc_sli *psli = &phba->sli;
8439 unsigned long timeout = 0;
8441 /* Mark the asynchronous mailbox command posting as blocked */
8442 spin_lock_irq(&phba->hbalock);
8443 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8444 /* Determine how long we might wait for the active mailbox
8445 * command to be gracefully completed by firmware.
8447 if (phba->sli.mbox_active)
8448 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8449 phba->sli.mbox_active) *
8451 spin_unlock_irq(&phba->hbalock);
8453 /* Make sure the mailbox is really active */
8455 lpfc_sli4_process_missed_mbox_completions(phba);
8457 /* Wait for the outstnading mailbox command to complete */
8458 while (phba->sli.mbox_active) {
8459 /* Check active mailbox complete status every 2ms */
8461 if (time_after(jiffies, timeout)) {
8462 /* Timeout, marked the outstanding cmd not complete */
8468 /* Can not cleanly block async mailbox command, fails it */
8470 spin_lock_irq(&phba->hbalock);
8471 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8472 spin_unlock_irq(&phba->hbalock);
8478 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8479 * @phba: Pointer to HBA context object.
8481 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8482 * commands from the driver internal pending mailbox queue. It makes sure
8483 * that there is no outstanding mailbox command before resuming posting
8484 * asynchronous mailbox commands. If, for any reason, there is outstanding
8485 * mailbox command, it will try to wait it out before resuming asynchronous
8486 * mailbox command posting.
8489 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8491 struct lpfc_sli *psli = &phba->sli;
8493 spin_lock_irq(&phba->hbalock);
8494 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8495 /* Asynchronous mailbox posting is not blocked, do nothing */
8496 spin_unlock_irq(&phba->hbalock);
8500 /* Outstanding synchronous mailbox command is guaranteed to be done,
8501 * successful or timeout, after timing-out the outstanding mailbox
8502 * command shall always be removed, so just unblock posting async
8503 * mailbox command and resume
8505 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8506 spin_unlock_irq(&phba->hbalock);
8508 /* wake up worker thread to post asynchronlous mailbox command */
8509 lpfc_worker_wake_up(phba);
8513 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8514 * @phba: Pointer to HBA context object.
8515 * @mboxq: Pointer to mailbox object.
8517 * The function waits for the bootstrap mailbox register ready bit from
8518 * port for twice the regular mailbox command timeout value.
8520 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8521 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8524 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8527 unsigned long timeout;
8528 struct lpfc_register bmbx_reg;
8530 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8534 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8535 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8539 if (time_after(jiffies, timeout))
8540 return MBXERR_ERROR;
8541 } while (!db_ready);
8547 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8548 * @phba: Pointer to HBA context object.
8549 * @mboxq: Pointer to mailbox object.
8551 * The function posts a mailbox to the port. The mailbox is expected
8552 * to be comletely filled in and ready for the port to operate on it.
8553 * This routine executes a synchronous completion operation on the
8554 * mailbox by polling for its completion.
8556 * The caller must not be holding any locks when calling this routine.
8559 * MBX_SUCCESS - mailbox posted successfully
8560 * Any of the MBX error values.
8563 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8565 int rc = MBX_SUCCESS;
8566 unsigned long iflag;
8567 uint32_t mcqe_status;
8569 struct lpfc_sli *psli = &phba->sli;
8570 struct lpfc_mqe *mb = &mboxq->u.mqe;
8571 struct lpfc_bmbx_create *mbox_rgn;
8572 struct dma_address *dma_address;
8575 * Only one mailbox can be active to the bootstrap mailbox region
8576 * at a time and there is no queueing provided.
8578 spin_lock_irqsave(&phba->hbalock, iflag);
8579 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8580 spin_unlock_irqrestore(&phba->hbalock, iflag);
8581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8582 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8583 "cannot issue Data: x%x x%x\n",
8584 mboxq->vport ? mboxq->vport->vpi : 0,
8585 mboxq->u.mb.mbxCommand,
8586 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8587 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8588 psli->sli_flag, MBX_POLL);
8589 return MBXERR_ERROR;
8591 /* The server grabs the token and owns it until release */
8592 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8593 phba->sli.mbox_active = mboxq;
8594 spin_unlock_irqrestore(&phba->hbalock, iflag);
8596 /* wait for bootstrap mbox register for readyness */
8597 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8601 * Initialize the bootstrap memory region to avoid stale data areas
8602 * in the mailbox post. Then copy the caller's mailbox contents to
8603 * the bmbx mailbox region.
8605 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8606 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8607 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8608 sizeof(struct lpfc_mqe));
8610 /* Post the high mailbox dma address to the port and wait for ready. */
8611 dma_address = &phba->sli4_hba.bmbx.dma_address;
8612 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8614 /* wait for bootstrap mbox register for hi-address write done */
8615 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8619 /* Post the low mailbox dma address to the port. */
8620 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8622 /* wait for bootstrap mbox register for low address write done */
8623 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8628 * Read the CQ to ensure the mailbox has completed.
8629 * If so, update the mailbox status so that the upper layers
8630 * can complete the request normally.
8632 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8633 sizeof(struct lpfc_mqe));
8634 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8635 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8636 sizeof(struct lpfc_mcqe));
8637 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8639 * When the CQE status indicates a failure and the mailbox status
8640 * indicates success then copy the CQE status into the mailbox status
8641 * (and prefix it with x4000).
8643 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8644 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8645 bf_set(lpfc_mqe_status, mb,
8646 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8649 lpfc_sli4_swap_str(phba, mboxq);
8651 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8652 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8653 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8654 " x%x x%x CQ: x%x x%x x%x x%x\n",
8655 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8656 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8657 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8658 bf_get(lpfc_mqe_status, mb),
8659 mb->un.mb_words[0], mb->un.mb_words[1],
8660 mb->un.mb_words[2], mb->un.mb_words[3],
8661 mb->un.mb_words[4], mb->un.mb_words[5],
8662 mb->un.mb_words[6], mb->un.mb_words[7],
8663 mb->un.mb_words[8], mb->un.mb_words[9],
8664 mb->un.mb_words[10], mb->un.mb_words[11],
8665 mb->un.mb_words[12], mboxq->mcqe.word0,
8666 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8667 mboxq->mcqe.trailer);
8669 /* We are holding the token, no needed for lock when release */
8670 spin_lock_irqsave(&phba->hbalock, iflag);
8671 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8672 phba->sli.mbox_active = NULL;
8673 spin_unlock_irqrestore(&phba->hbalock, iflag);
8678 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8679 * @phba: Pointer to HBA context object.
8680 * @pmbox: Pointer to mailbox object.
8681 * @flag: Flag indicating how the mailbox need to be processed.
8683 * This function is called by discovery code and HBA management code to submit
8684 * a mailbox command to firmware with SLI-4 interface spec.
8686 * Return codes the caller owns the mailbox command after the return of the
8690 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8693 struct lpfc_sli *psli = &phba->sli;
8694 unsigned long iflags;
8697 /* dump from issue mailbox command if setup */
8698 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8700 rc = lpfc_mbox_dev_check(phba);
8702 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8703 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8704 "cannot issue Data: x%x x%x\n",
8705 mboxq->vport ? mboxq->vport->vpi : 0,
8706 mboxq->u.mb.mbxCommand,
8707 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8708 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8709 psli->sli_flag, flag);
8710 goto out_not_finished;
8713 /* Detect polling mode and jump to a handler */
8714 if (!phba->sli4_hba.intr_enable) {
8715 if (flag == MBX_POLL)
8716 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8719 if (rc != MBX_SUCCESS)
8720 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8721 "(%d):2541 Mailbox command x%x "
8722 "(x%x/x%x) failure: "
8723 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8725 mboxq->vport ? mboxq->vport->vpi : 0,
8726 mboxq->u.mb.mbxCommand,
8727 lpfc_sli_config_mbox_subsys_get(phba,
8729 lpfc_sli_config_mbox_opcode_get(phba,
8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8733 bf_get(lpfc_mcqe_ext_status,
8735 psli->sli_flag, flag);
8737 } else if (flag == MBX_POLL) {
8738 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8739 "(%d):2542 Try to issue mailbox command "
8740 "x%x (x%x/x%x) synchronously ahead of async "
8741 "mailbox command queue: x%x x%x\n",
8742 mboxq->vport ? mboxq->vport->vpi : 0,
8743 mboxq->u.mb.mbxCommand,
8744 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8745 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8746 psli->sli_flag, flag);
8747 /* Try to block the asynchronous mailbox posting */
8748 rc = lpfc_sli4_async_mbox_block(phba);
8750 /* Successfully blocked, now issue sync mbox cmd */
8751 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8752 if (rc != MBX_SUCCESS)
8753 lpfc_printf_log(phba, KERN_WARNING,
8755 "(%d):2597 Sync Mailbox command "
8756 "x%x (x%x/x%x) failure: "
8757 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8759 mboxq->vport ? mboxq->vport->vpi : 0,
8760 mboxq->u.mb.mbxCommand,
8761 lpfc_sli_config_mbox_subsys_get(phba,
8763 lpfc_sli_config_mbox_opcode_get(phba,
8765 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8766 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8767 bf_get(lpfc_mcqe_ext_status,
8769 psli->sli_flag, flag);
8770 /* Unblock the async mailbox posting afterward */
8771 lpfc_sli4_async_mbox_unblock(phba);
8776 /* Now, interrupt mode asynchrous mailbox command */
8777 rc = lpfc_mbox_cmd_check(phba, mboxq);
8779 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8780 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8781 "cannot issue Data: x%x x%x\n",
8782 mboxq->vport ? mboxq->vport->vpi : 0,
8783 mboxq->u.mb.mbxCommand,
8784 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8785 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8786 psli->sli_flag, flag);
8787 goto out_not_finished;
8790 /* Put the mailbox command to the driver internal FIFO */
8791 psli->slistat.mbox_busy++;
8792 spin_lock_irqsave(&phba->hbalock, iflags);
8793 lpfc_mbox_put(phba, mboxq);
8794 spin_unlock_irqrestore(&phba->hbalock, iflags);
8795 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8796 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8797 "x%x (x%x/x%x) x%x x%x x%x\n",
8798 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8799 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8800 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8801 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8802 phba->pport->port_state,
8803 psli->sli_flag, MBX_NOWAIT);
8804 /* Wake up worker thread to transport mailbox command from head */
8805 lpfc_worker_wake_up(phba);
8810 return MBX_NOT_FINISHED;
8814 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8815 * @phba: Pointer to HBA context object.
8817 * This function is called by worker thread to send a mailbox command to
8818 * SLI4 HBA firmware.
8822 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8824 struct lpfc_sli *psli = &phba->sli;
8825 LPFC_MBOXQ_t *mboxq;
8826 int rc = MBX_SUCCESS;
8827 unsigned long iflags;
8828 struct lpfc_mqe *mqe;
8831 /* Check interrupt mode before post async mailbox command */
8832 if (unlikely(!phba->sli4_hba.intr_enable))
8833 return MBX_NOT_FINISHED;
8835 /* Check for mailbox command service token */
8836 spin_lock_irqsave(&phba->hbalock, iflags);
8837 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8838 spin_unlock_irqrestore(&phba->hbalock, iflags);
8839 return MBX_NOT_FINISHED;
8841 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8842 spin_unlock_irqrestore(&phba->hbalock, iflags);
8843 return MBX_NOT_FINISHED;
8845 if (unlikely(phba->sli.mbox_active)) {
8846 spin_unlock_irqrestore(&phba->hbalock, iflags);
8847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8848 "0384 There is pending active mailbox cmd\n");
8849 return MBX_NOT_FINISHED;
8851 /* Take the mailbox command service token */
8852 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8854 /* Get the next mailbox command from head of queue */
8855 mboxq = lpfc_mbox_get(phba);
8857 /* If no more mailbox command waiting for post, we're done */
8859 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8860 spin_unlock_irqrestore(&phba->hbalock, iflags);
8863 phba->sli.mbox_active = mboxq;
8864 spin_unlock_irqrestore(&phba->hbalock, iflags);
8866 /* Check device readiness for posting mailbox command */
8867 rc = lpfc_mbox_dev_check(phba);
8869 /* Driver clean routine will clean up pending mailbox */
8870 goto out_not_finished;
8872 /* Prepare the mbox command to be posted */
8873 mqe = &mboxq->u.mqe;
8874 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8876 /* Start timer for the mbox_tmo and log some mailbox post messages */
8877 mod_timer(&psli->mbox_tmo, (jiffies +
8878 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8880 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8881 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8883 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8884 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8885 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8886 phba->pport->port_state, psli->sli_flag);
8888 if (mbx_cmnd != MBX_HEARTBEAT) {
8890 lpfc_debugfs_disc_trc(mboxq->vport,
8891 LPFC_DISC_TRC_MBOX_VPORT,
8892 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8893 mbx_cmnd, mqe->un.mb_words[0],
8894 mqe->un.mb_words[1]);
8896 lpfc_debugfs_disc_trc(phba->pport,
8898 "MBOX Send: cmd:x%x mb:x%x x%x",
8899 mbx_cmnd, mqe->un.mb_words[0],
8900 mqe->un.mb_words[1]);
8903 psli->slistat.mbox_cmd++;
8905 /* Post the mailbox command to the port */
8906 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8907 if (rc != MBX_SUCCESS) {
8908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8909 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8910 "cannot issue Data: x%x x%x\n",
8911 mboxq->vport ? mboxq->vport->vpi : 0,
8912 mboxq->u.mb.mbxCommand,
8913 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8914 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8915 psli->sli_flag, MBX_NOWAIT);
8916 goto out_not_finished;
8922 spin_lock_irqsave(&phba->hbalock, iflags);
8923 if (phba->sli.mbox_active) {
8924 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8925 __lpfc_mbox_cmpl_put(phba, mboxq);
8926 /* Release the token */
8927 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8928 phba->sli.mbox_active = NULL;
8930 spin_unlock_irqrestore(&phba->hbalock, iflags);
8932 return MBX_NOT_FINISHED;
8936 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8937 * @phba: Pointer to HBA context object.
8938 * @pmbox: Pointer to mailbox object.
8939 * @flag: Flag indicating how the mailbox need to be processed.
8941 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8942 * the API jump table function pointer from the lpfc_hba struct.
8944 * Return codes the caller owns the mailbox command after the return of the
8948 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8950 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8954 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8955 * @phba: The hba struct for which this call is being executed.
8956 * @dev_grp: The HBA PCI-Device group number.
8958 * This routine sets up the mbox interface API function jump table in @phba
8960 * Returns: 0 - success, -ENODEV - failure.
8963 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8967 case LPFC_PCI_DEV_LP:
8968 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8969 phba->lpfc_sli_handle_slow_ring_event =
8970 lpfc_sli_handle_slow_ring_event_s3;
8971 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8972 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8973 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8975 case LPFC_PCI_DEV_OC:
8976 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8977 phba->lpfc_sli_handle_slow_ring_event =
8978 lpfc_sli_handle_slow_ring_event_s4;
8979 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8980 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8981 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8985 "1420 Invalid HBA PCI-device group: 0x%x\n",
8994 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8995 * @phba: Pointer to HBA context object.
8996 * @pring: Pointer to driver SLI ring object.
8997 * @piocb: Pointer to address of newly added command iocb.
8999 * This function is called with hbalock held to add a command
9000 * iocb to the txq when SLI layer cannot submit the command iocb
9004 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9005 struct lpfc_iocbq *piocb)
9007 lockdep_assert_held(&phba->hbalock);
9008 /* Insert the caller's iocb in the txq tail for later processing. */
9009 list_add_tail(&piocb->list, &pring->txq);
9013 * lpfc_sli_next_iocb - Get the next iocb in the txq
9014 * @phba: Pointer to HBA context object.
9015 * @pring: Pointer to driver SLI ring object.
9016 * @piocb: Pointer to address of newly added command iocb.
9018 * This function is called with hbalock held before a new
9019 * iocb is submitted to the firmware. This function checks
9020 * txq to flush the iocbs in txq to Firmware before
9021 * submitting new iocbs to the Firmware.
9022 * If there are iocbs in the txq which need to be submitted
9023 * to firmware, lpfc_sli_next_iocb returns the first element
9024 * of the txq after dequeuing it from txq.
9025 * If there is no iocb in the txq then the function will return
9026 * *piocb and *piocb is set to NULL. Caller needs to check
9027 * *piocb to find if there are more commands in the txq.
9029 static struct lpfc_iocbq *
9030 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9031 struct lpfc_iocbq **piocb)
9033 struct lpfc_iocbq * nextiocb;
9035 lockdep_assert_held(&phba->hbalock);
9037 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9047 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9048 * @phba: Pointer to HBA context object.
9049 * @ring_number: SLI ring number to issue iocb on.
9050 * @piocb: Pointer to command iocb.
9051 * @flag: Flag indicating if this command can be put into txq.
9053 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9054 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9055 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9056 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9057 * this function allows only iocbs for posting buffers. This function finds
9058 * next available slot in the command ring and posts the command to the
9059 * available slot and writes the port attention register to request HBA start
9060 * processing new iocb. If there is no slot available in the ring and
9061 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9062 * the function returns IOCB_BUSY.
9064 * This function is called with hbalock held. The function will return success
9065 * after it successfully submit the iocb to firmware or after adding to the
9069 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9070 struct lpfc_iocbq *piocb, uint32_t flag)
9072 struct lpfc_iocbq *nextiocb;
9074 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9076 lockdep_assert_held(&phba->hbalock);
9078 if (piocb->iocb_cmpl && (!piocb->vport) &&
9079 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9080 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9081 lpfc_printf_log(phba, KERN_ERR,
9082 LOG_SLI | LOG_VPORT,
9083 "1807 IOCB x%x failed. No vport\n",
9084 piocb->iocb.ulpCommand);
9090 /* If the PCI channel is in offline state, do not post iocbs. */
9091 if (unlikely(pci_channel_offline(phba->pcidev)))
9094 /* If HBA has a deferred error attention, fail the iocb. */
9095 if (unlikely(phba->hba_flag & DEFER_ERATT))
9099 * We should never get an IOCB if we are in a < LINK_DOWN state
9101 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9105 * Check to see if we are blocking IOCB processing because of a
9106 * outstanding event.
9108 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9111 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9113 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9114 * can be issued if the link is not up.
9116 switch (piocb->iocb.ulpCommand) {
9117 case CMD_GEN_REQUEST64_CR:
9118 case CMD_GEN_REQUEST64_CX:
9119 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9120 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9121 FC_RCTL_DD_UNSOL_CMD) ||
9122 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9123 MENLO_TRANSPORT_TYPE))
9127 case CMD_QUE_RING_BUF_CN:
9128 case CMD_QUE_RING_BUF64_CN:
9130 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9131 * completion, iocb_cmpl MUST be 0.
9133 if (piocb->iocb_cmpl)
9134 piocb->iocb_cmpl = NULL;
9136 case CMD_CREATE_XRI_CR:
9137 case CMD_CLOSE_XRI_CN:
9138 case CMD_CLOSE_XRI_CX:
9145 * For FCP commands, we must be in a state where we can process link
9148 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9149 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9153 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9154 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9155 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9158 lpfc_sli_update_ring(phba, pring);
9160 lpfc_sli_update_full_ring(phba, pring);
9163 return IOCB_SUCCESS;
9168 pring->stats.iocb_cmd_delay++;
9172 if (!(flag & SLI_IOCB_RET_IOCB)) {
9173 __lpfc_sli_ringtx_put(phba, pring, piocb);
9174 return IOCB_SUCCESS;
9181 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9182 * @phba: Pointer to HBA context object.
9183 * @piocb: Pointer to command iocb.
9184 * @sglq: Pointer to the scatter gather queue object.
9186 * This routine converts the bpl or bde that is in the IOCB
9187 * to a sgl list for the sli4 hardware. The physical address
9188 * of the bpl/bde is converted back to a virtual address.
9189 * If the IOCB contains a BPL then the list of BDE's is
9190 * converted to sli4_sge's. If the IOCB contains a single
9191 * BDE then it is converted to a single sli_sge.
9192 * The IOCB is still in cpu endianess so the contents of
9193 * the bpl can be used without byte swapping.
9195 * Returns valid XRI = Success, NO_XRI = Failure.
9198 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9199 struct lpfc_sglq *sglq)
9201 uint16_t xritag = NO_XRI;
9202 struct ulp_bde64 *bpl = NULL;
9203 struct ulp_bde64 bde;
9204 struct sli4_sge *sgl = NULL;
9205 struct lpfc_dmabuf *dmabuf;
9209 uint32_t offset = 0; /* accumulated offset in the sg request list */
9210 int inbound = 0; /* number of sg reply entries inbound from firmware */
9212 if (!piocbq || !sglq)
9215 sgl = (struct sli4_sge *)sglq->sgl;
9216 icmd = &piocbq->iocb;
9217 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9218 return sglq->sli4_xritag;
9219 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9220 numBdes = icmd->un.genreq64.bdl.bdeSize /
9221 sizeof(struct ulp_bde64);
9222 /* The addrHigh and addrLow fields within the IOCB
9223 * have not been byteswapped yet so there is no
9224 * need to swap them back.
9226 if (piocbq->context3)
9227 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9231 bpl = (struct ulp_bde64 *)dmabuf->virt;
9235 for (i = 0; i < numBdes; i++) {
9236 /* Should already be byte swapped. */
9237 sgl->addr_hi = bpl->addrHigh;
9238 sgl->addr_lo = bpl->addrLow;
9240 sgl->word2 = le32_to_cpu(sgl->word2);
9241 if ((i+1) == numBdes)
9242 bf_set(lpfc_sli4_sge_last, sgl, 1);
9244 bf_set(lpfc_sli4_sge_last, sgl, 0);
9245 /* swap the size field back to the cpu so we
9246 * can assign it to the sgl.
9248 bde.tus.w = le32_to_cpu(bpl->tus.w);
9249 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9250 /* The offsets in the sgl need to be accumulated
9251 * separately for the request and reply lists.
9252 * The request is always first, the reply follows.
9254 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9255 /* add up the reply sg entries */
9256 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9258 /* first inbound? reset the offset */
9261 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9262 bf_set(lpfc_sli4_sge_type, sgl,
9263 LPFC_SGE_TYPE_DATA);
9264 offset += bde.tus.f.bdeSize;
9266 sgl->word2 = cpu_to_le32(sgl->word2);
9270 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9271 /* The addrHigh and addrLow fields of the BDE have not
9272 * been byteswapped yet so they need to be swapped
9273 * before putting them in the sgl.
9276 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9278 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9279 sgl->word2 = le32_to_cpu(sgl->word2);
9280 bf_set(lpfc_sli4_sge_last, sgl, 1);
9281 sgl->word2 = cpu_to_le32(sgl->word2);
9283 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9285 return sglq->sli4_xritag;
9289 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9290 * @phba: Pointer to HBA context object.
9291 * @piocb: Pointer to command iocb.
9292 * @wqe: Pointer to the work queue entry.
9294 * This routine converts the iocb command to its Work Queue Entry
9295 * equivalent. The wqe pointer should not have any fields set when
9296 * this routine is called because it will memcpy over them.
9297 * This routine does not set the CQ_ID or the WQEC bits in the
9300 * Returns: 0 = Success, IOCB_ERROR = Failure.
9303 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9304 union lpfc_wqe128 *wqe)
9306 uint32_t xmit_len = 0, total_len = 0;
9310 uint8_t command_type = ELS_COMMAND_NON_FIP;
9313 uint16_t abrt_iotag;
9314 struct lpfc_iocbq *abrtiocbq;
9315 struct ulp_bde64 *bpl = NULL;
9316 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9318 struct ulp_bde64 bde;
9319 struct lpfc_nodelist *ndlp;
9323 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9324 /* The fcp commands will set command type */
9325 if (iocbq->iocb_flag & LPFC_IO_FCP)
9326 command_type = FCP_COMMAND;
9327 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9328 command_type = ELS_COMMAND_FIP;
9330 command_type = ELS_COMMAND_NON_FIP;
9332 if (phba->fcp_embed_io)
9333 memset(wqe, 0, sizeof(union lpfc_wqe128));
9334 /* Some of the fields are in the right position already */
9335 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9336 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9337 /* The ct field has moved so reset */
9338 wqe->generic.wqe_com.word7 = 0;
9339 wqe->generic.wqe_com.word10 = 0;
9342 abort_tag = (uint32_t) iocbq->iotag;
9343 xritag = iocbq->sli4_xritag;
9344 /* words0-2 bpl convert bde */
9345 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9346 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9347 sizeof(struct ulp_bde64);
9348 bpl = (struct ulp_bde64 *)
9349 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9353 /* Should already be byte swapped. */
9354 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9355 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9356 /* swap the size field back to the cpu so we
9357 * can assign it to the sgl.
9359 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9360 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9362 for (i = 0; i < numBdes; i++) {
9363 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9364 total_len += bde.tus.f.bdeSize;
9367 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9369 iocbq->iocb.ulpIoTag = iocbq->iotag;
9370 cmnd = iocbq->iocb.ulpCommand;
9372 switch (iocbq->iocb.ulpCommand) {
9373 case CMD_ELS_REQUEST64_CR:
9374 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9375 ndlp = iocbq->context_un.ndlp;
9377 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9378 if (!iocbq->iocb.ulpLe) {
9379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9380 "2007 Only Limited Edition cmd Format"
9381 " supported 0x%x\n",
9382 iocbq->iocb.ulpCommand);
9386 wqe->els_req.payload_len = xmit_len;
9387 /* Els_reguest64 has a TMO */
9388 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9389 iocbq->iocb.ulpTimeout);
9390 /* Need a VF for word 4 set the vf bit*/
9391 bf_set(els_req64_vf, &wqe->els_req, 0);
9392 /* And a VFID for word 12 */
9393 bf_set(els_req64_vfid, &wqe->els_req, 0);
9394 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9395 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9396 iocbq->iocb.ulpContext);
9397 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9398 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9399 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9400 if (command_type == ELS_COMMAND_FIP)
9401 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9402 >> LPFC_FIP_ELS_ID_SHIFT);
9403 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9404 iocbq->context2)->virt);
9405 if_type = bf_get(lpfc_sli_intf_if_type,
9406 &phba->sli4_hba.sli_intf);
9407 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9408 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9409 *pcmd == ELS_CMD_SCR ||
9410 *pcmd == ELS_CMD_RSCN_XMT ||
9411 *pcmd == ELS_CMD_FDISC ||
9412 *pcmd == ELS_CMD_LOGO ||
9413 *pcmd == ELS_CMD_PLOGI)) {
9414 bf_set(els_req64_sp, &wqe->els_req, 1);
9415 bf_set(els_req64_sid, &wqe->els_req,
9416 iocbq->vport->fc_myDID);
9417 if ((*pcmd == ELS_CMD_FLOGI) &&
9418 !(phba->fc_topology ==
9419 LPFC_TOPOLOGY_LOOP))
9420 bf_set(els_req64_sid, &wqe->els_req, 0);
9421 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9422 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9423 phba->vpi_ids[iocbq->vport->vpi]);
9424 } else if (pcmd && iocbq->context1) {
9425 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9426 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9427 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9430 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9431 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9432 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9433 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9434 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9435 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9436 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9437 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9438 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9440 case CMD_XMIT_SEQUENCE64_CX:
9441 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9442 iocbq->iocb.un.ulpWord[3]);
9443 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9444 iocbq->iocb.unsli3.rcvsli3.ox_id);
9445 /* The entire sequence is transmitted for this IOCB */
9446 xmit_len = total_len;
9447 cmnd = CMD_XMIT_SEQUENCE64_CR;
9448 if (phba->link_flag & LS_LOOPBACK_MODE)
9449 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9451 case CMD_XMIT_SEQUENCE64_CR:
9452 /* word3 iocb=io_tag32 wqe=reserved */
9453 wqe->xmit_sequence.rsvd3 = 0;
9454 /* word4 relative_offset memcpy */
9455 /* word5 r_ctl/df_ctl memcpy */
9456 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9457 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9458 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9459 LPFC_WQE_IOD_WRITE);
9460 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9461 LPFC_WQE_LENLOC_WORD12);
9462 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9463 wqe->xmit_sequence.xmit_len = xmit_len;
9464 command_type = OTHER_COMMAND;
9466 case CMD_XMIT_BCAST64_CN:
9467 /* word3 iocb=iotag32 wqe=seq_payload_len */
9468 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9469 /* word4 iocb=rsvd wqe=rsvd */
9470 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9471 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9472 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9473 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9474 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9475 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9476 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9477 LPFC_WQE_LENLOC_WORD3);
9478 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9480 case CMD_FCP_IWRITE64_CR:
9481 command_type = FCP_COMMAND_DATA_OUT;
9482 /* word3 iocb=iotag wqe=payload_offset_len */
9483 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9484 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9485 xmit_len + sizeof(struct fcp_rsp));
9486 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9488 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9489 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9490 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9491 iocbq->iocb.ulpFCP2Rcvy);
9492 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9493 /* Always open the exchange */
9494 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9495 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9496 LPFC_WQE_LENLOC_WORD4);
9497 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9498 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9499 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9500 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9501 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9502 if (iocbq->priority) {
9503 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9504 (iocbq->priority << 1));
9506 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9507 (phba->cfg_XLanePriority << 1));
9510 /* Note, word 10 is already initialized to 0 */
9512 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9513 if (phba->cfg_enable_pbde)
9514 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9516 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9518 if (phba->fcp_embed_io) {
9519 struct lpfc_io_buf *lpfc_cmd;
9520 struct sli4_sge *sgl;
9521 struct fcp_cmnd *fcp_cmnd;
9524 /* 128 byte wqe support here */
9526 lpfc_cmd = iocbq->context1;
9527 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9528 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9530 /* Word 0-2 - FCP_CMND */
9531 wqe->generic.bde.tus.f.bdeFlags =
9532 BUFF_TYPE_BDE_IMMED;
9533 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9534 wqe->generic.bde.addrHigh = 0;
9535 wqe->generic.bde.addrLow = 88; /* Word 22 */
9537 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9538 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9540 /* Word 22-29 FCP CMND Payload */
9541 ptr = &wqe->words[22];
9542 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9545 case CMD_FCP_IREAD64_CR:
9546 /* word3 iocb=iotag wqe=payload_offset_len */
9547 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9548 bf_set(payload_offset_len, &wqe->fcp_iread,
9549 xmit_len + sizeof(struct fcp_rsp));
9550 bf_set(cmd_buff_len, &wqe->fcp_iread,
9552 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9553 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9554 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9555 iocbq->iocb.ulpFCP2Rcvy);
9556 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9557 /* Always open the exchange */
9558 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9559 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9560 LPFC_WQE_LENLOC_WORD4);
9561 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9562 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9563 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9564 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9565 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9566 if (iocbq->priority) {
9567 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9568 (iocbq->priority << 1));
9570 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9571 (phba->cfg_XLanePriority << 1));
9574 /* Note, word 10 is already initialized to 0 */
9576 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9577 if (phba->cfg_enable_pbde)
9578 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9580 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9582 if (phba->fcp_embed_io) {
9583 struct lpfc_io_buf *lpfc_cmd;
9584 struct sli4_sge *sgl;
9585 struct fcp_cmnd *fcp_cmnd;
9588 /* 128 byte wqe support here */
9590 lpfc_cmd = iocbq->context1;
9591 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9592 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9594 /* Word 0-2 - FCP_CMND */
9595 wqe->generic.bde.tus.f.bdeFlags =
9596 BUFF_TYPE_BDE_IMMED;
9597 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9598 wqe->generic.bde.addrHigh = 0;
9599 wqe->generic.bde.addrLow = 88; /* Word 22 */
9601 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9602 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9604 /* Word 22-29 FCP CMND Payload */
9605 ptr = &wqe->words[22];
9606 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9609 case CMD_FCP_ICMND64_CR:
9610 /* word3 iocb=iotag wqe=payload_offset_len */
9611 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9612 bf_set(payload_offset_len, &wqe->fcp_icmd,
9613 xmit_len + sizeof(struct fcp_rsp));
9614 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9616 /* word3 iocb=IO_TAG wqe=reserved */
9617 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9618 /* Always open the exchange */
9619 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9620 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9621 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9622 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9623 LPFC_WQE_LENLOC_NONE);
9624 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9625 iocbq->iocb.ulpFCP2Rcvy);
9626 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9627 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9628 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9629 if (iocbq->priority) {
9630 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9631 (iocbq->priority << 1));
9633 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9634 (phba->cfg_XLanePriority << 1));
9637 /* Note, word 10 is already initialized to 0 */
9639 if (phba->fcp_embed_io) {
9640 struct lpfc_io_buf *lpfc_cmd;
9641 struct sli4_sge *sgl;
9642 struct fcp_cmnd *fcp_cmnd;
9645 /* 128 byte wqe support here */
9647 lpfc_cmd = iocbq->context1;
9648 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9649 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9651 /* Word 0-2 - FCP_CMND */
9652 wqe->generic.bde.tus.f.bdeFlags =
9653 BUFF_TYPE_BDE_IMMED;
9654 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9655 wqe->generic.bde.addrHigh = 0;
9656 wqe->generic.bde.addrLow = 88; /* Word 22 */
9658 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9659 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9661 /* Word 22-29 FCP CMND Payload */
9662 ptr = &wqe->words[22];
9663 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9666 case CMD_GEN_REQUEST64_CR:
9667 /* For this command calculate the xmit length of the
9671 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9672 sizeof(struct ulp_bde64);
9673 for (i = 0; i < numBdes; i++) {
9674 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9675 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9677 xmit_len += bde.tus.f.bdeSize;
9679 /* word3 iocb=IO_TAG wqe=request_payload_len */
9680 wqe->gen_req.request_payload_len = xmit_len;
9681 /* word4 iocb=parameter wqe=relative_offset memcpy */
9682 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9683 /* word6 context tag copied in memcpy */
9684 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9685 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9687 "2015 Invalid CT %x command 0x%x\n",
9688 ct, iocbq->iocb.ulpCommand);
9691 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9692 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9693 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9694 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9695 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9696 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9697 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9698 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9699 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9700 command_type = OTHER_COMMAND;
9702 case CMD_XMIT_ELS_RSP64_CX:
9703 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9704 /* words0-2 BDE memcpy */
9705 /* word3 iocb=iotag32 wqe=response_payload_len */
9706 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9708 wqe->xmit_els_rsp.word4 = 0;
9709 /* word5 iocb=rsvd wge=did */
9710 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9711 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9713 if_type = bf_get(lpfc_sli_intf_if_type,
9714 &phba->sli4_hba.sli_intf);
9715 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9716 if (iocbq->vport->fc_flag & FC_PT2PT) {
9717 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9718 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9719 iocbq->vport->fc_myDID);
9720 if (iocbq->vport->fc_myDID == Fabric_DID) {
9722 &wqe->xmit_els_rsp.wqe_dest, 0);
9726 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9727 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9728 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9729 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9730 iocbq->iocb.unsli3.rcvsli3.ox_id);
9731 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9732 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9733 phba->vpi_ids[iocbq->vport->vpi]);
9734 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9735 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9736 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9737 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9738 LPFC_WQE_LENLOC_WORD3);
9739 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9740 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9741 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9742 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9743 iocbq->context2)->virt);
9744 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9745 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9746 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9747 iocbq->vport->fc_myDID);
9748 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9749 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9750 phba->vpi_ids[phba->pport->vpi]);
9752 command_type = OTHER_COMMAND;
9754 case CMD_CLOSE_XRI_CN:
9755 case CMD_ABORT_XRI_CN:
9756 case CMD_ABORT_XRI_CX:
9757 /* words 0-2 memcpy should be 0 rserved */
9758 /* port will send abts */
9759 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9760 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9761 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9762 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9766 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9768 * The link is down, or the command was ELS_FIP
9769 * so the fw does not need to send abts
9772 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9774 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9775 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9776 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9777 wqe->abort_cmd.rsrvd5 = 0;
9778 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9779 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9780 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9782 * The abort handler will send us CMD_ABORT_XRI_CN or
9783 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9785 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9786 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9787 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9788 LPFC_WQE_LENLOC_NONE);
9789 cmnd = CMD_ABORT_XRI_CX;
9790 command_type = OTHER_COMMAND;
9793 case CMD_XMIT_BLS_RSP64_CX:
9794 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9795 /* As BLS ABTS RSP WQE is very different from other WQEs,
9796 * we re-construct this WQE here based on information in
9797 * iocbq from scratch.
9799 memset(wqe, 0, sizeof(union lpfc_wqe));
9800 /* OX_ID is invariable to who sent ABTS to CT exchange */
9801 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9802 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9803 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9804 LPFC_ABTS_UNSOL_INT) {
9805 /* ABTS sent by initiator to CT exchange, the
9806 * RX_ID field will be filled with the newly
9807 * allocated responder XRI.
9809 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9810 iocbq->sli4_xritag);
9812 /* ABTS sent by responder to CT exchange, the
9813 * RX_ID field will be filled with the responder
9816 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9817 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9819 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9820 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9823 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9825 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9826 iocbq->iocb.ulpContext);
9827 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9828 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9829 phba->vpi_ids[phba->pport->vpi]);
9830 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9831 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9832 LPFC_WQE_LENLOC_NONE);
9833 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9834 command_type = OTHER_COMMAND;
9835 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9836 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9837 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9838 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9839 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9840 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9841 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9845 case CMD_SEND_FRAME:
9846 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9849 case CMD_XRI_ABORTED_CX:
9850 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9851 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9852 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9853 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9854 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9857 "2014 Invalid command 0x%x\n",
9858 iocbq->iocb.ulpCommand);
9863 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9864 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9865 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9866 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9867 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9868 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9869 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9870 LPFC_IO_DIF_INSERT);
9871 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9872 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9873 wqe->generic.wqe_com.abort_tag = abort_tag;
9874 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9875 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9876 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9877 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9882 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9883 * @phba: Pointer to HBA context object.
9884 * @ring_number: SLI ring number to issue iocb on.
9885 * @piocb: Pointer to command iocb.
9886 * @flag: Flag indicating if this command can be put into txq.
9888 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9889 * an iocb command to an HBA with SLI-4 interface spec.
9891 * This function is called with hbalock held. The function will return success
9892 * after it successfully submit the iocb to firmware or after adding to the
9896 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9897 struct lpfc_iocbq *piocb, uint32_t flag)
9899 struct lpfc_sglq *sglq;
9900 union lpfc_wqe128 wqe;
9901 struct lpfc_queue *wq;
9902 struct lpfc_sli_ring *pring;
9905 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9906 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9907 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
9909 wq = phba->sli4_hba.els_wq;
9912 /* Get corresponding ring */
9916 * The WQE can be either 64 or 128 bytes,
9919 lockdep_assert_held(&pring->ring_lock);
9921 if (piocb->sli4_xritag == NO_XRI) {
9922 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9923 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9926 if (!list_empty(&pring->txq)) {
9927 if (!(flag & SLI_IOCB_RET_IOCB)) {
9928 __lpfc_sli_ringtx_put(phba,
9930 return IOCB_SUCCESS;
9935 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9937 if (!(flag & SLI_IOCB_RET_IOCB)) {
9938 __lpfc_sli_ringtx_put(phba,
9941 return IOCB_SUCCESS;
9947 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9948 /* These IO's already have an XRI and a mapped sgl. */
9952 * This is a continuation of a commandi,(CX) so this
9953 * sglq is on the active list
9955 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9961 piocb->sli4_lxritag = sglq->sli4_lxritag;
9962 piocb->sli4_xritag = sglq->sli4_xritag;
9963 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9967 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9970 if (lpfc_sli4_wq_put(wq, &wqe))
9972 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9978 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9980 * This routine wraps the actual lockless version for issusing IOCB function
9981 * pointer from the lpfc_hba struct.
9984 * IOCB_ERROR - Error
9985 * IOCB_SUCCESS - Success
9989 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9990 struct lpfc_iocbq *piocb, uint32_t flag)
9992 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9996 * lpfc_sli_api_table_setup - Set up sli api function jump table
9997 * @phba: The hba struct for which this call is being executed.
9998 * @dev_grp: The HBA PCI-Device group number.
10000 * This routine sets up the SLI interface API function jump table in @phba
10002 * Returns: 0 - success, -ENODEV - failure.
10005 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10009 case LPFC_PCI_DEV_LP:
10010 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10011 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10013 case LPFC_PCI_DEV_OC:
10014 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10015 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10019 "1419 Invalid HBA PCI-device group: 0x%x\n",
10024 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10029 * lpfc_sli4_calc_ring - Calculates which ring to use
10030 * @phba: Pointer to HBA context object.
10031 * @piocb: Pointer to command iocb.
10033 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10034 * hba_wqidx, thus we need to calculate the corresponding ring.
10035 * Since ABORTS must go on the same WQ of the command they are
10036 * aborting, we use command's hba_wqidx.
10038 struct lpfc_sli_ring *
10039 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10041 struct lpfc_io_buf *lpfc_cmd;
10043 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10044 if (unlikely(!phba->sli4_hba.hdwq))
10047 * for abort iocb hba_wqidx should already
10048 * be setup based on what work queue we used.
10050 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10051 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10052 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10054 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
10056 if (unlikely(!phba->sli4_hba.els_wq))
10058 piocb->hba_wqidx = 0;
10059 return phba->sli4_hba.els_wq->pring;
10064 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10065 * @phba: Pointer to HBA context object.
10066 * @pring: Pointer to driver SLI ring object.
10067 * @piocb: Pointer to command iocb.
10068 * @flag: Flag indicating if this command can be put into txq.
10070 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10071 * function. This function gets the hbalock and calls
10072 * __lpfc_sli_issue_iocb function and will return the error returned
10073 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10074 * functions which do not hold hbalock.
10077 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10078 struct lpfc_iocbq *piocb, uint32_t flag)
10080 struct lpfc_sli_ring *pring;
10081 unsigned long iflags;
10084 if (phba->sli_rev == LPFC_SLI_REV4) {
10085 pring = lpfc_sli4_calc_ring(phba, piocb);
10086 if (unlikely(pring == NULL))
10089 spin_lock_irqsave(&pring->ring_lock, iflags);
10090 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10091 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10093 /* For now, SLI2/3 will still use hbalock */
10094 spin_lock_irqsave(&phba->hbalock, iflags);
10095 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10096 spin_unlock_irqrestore(&phba->hbalock, iflags);
10102 * lpfc_extra_ring_setup - Extra ring setup function
10103 * @phba: Pointer to HBA context object.
10105 * This function is called while driver attaches with the
10106 * HBA to setup the extra ring. The extra ring is used
10107 * only when driver needs to support target mode functionality
10108 * or IP over FC functionalities.
10110 * This function is called with no lock held. SLI3 only.
10113 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10115 struct lpfc_sli *psli;
10116 struct lpfc_sli_ring *pring;
10120 /* Adjust cmd/rsp ring iocb entries more evenly */
10122 /* Take some away from the FCP ring */
10123 pring = &psli->sli3_ring[LPFC_FCP_RING];
10124 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10125 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10126 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10127 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10129 /* and give them to the extra ring */
10130 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10132 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10133 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10134 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10135 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10137 /* Setup default profile for this ring */
10138 pring->iotag_max = 4096;
10139 pring->num_mask = 1;
10140 pring->prt[0].profile = 0; /* Mask 0 */
10141 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10142 pring->prt[0].type = phba->cfg_multi_ring_type;
10143 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10147 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10148 * @phba: Pointer to HBA context object.
10149 * @iocbq: Pointer to iocb object.
10151 * The async_event handler calls this routine when it receives
10152 * an ASYNC_STATUS_CN event from the port. The port generates
10153 * this event when an Abort Sequence request to an rport fails
10154 * twice in succession. The abort could be originated by the
10155 * driver or by the port. The ABTS could have been for an ELS
10156 * or FCP IO. The port only generates this event when an ABTS
10157 * fails to complete after one retry.
10160 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10161 struct lpfc_iocbq *iocbq)
10163 struct lpfc_nodelist *ndlp = NULL;
10164 uint16_t rpi = 0, vpi = 0;
10165 struct lpfc_vport *vport = NULL;
10167 /* The rpi in the ulpContext is vport-sensitive. */
10168 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10169 rpi = iocbq->iocb.ulpContext;
10171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10172 "3092 Port generated ABTS async event "
10173 "on vpi %d rpi %d status 0x%x\n",
10174 vpi, rpi, iocbq->iocb.ulpStatus);
10176 vport = lpfc_find_vport_by_vpid(phba, vpi);
10179 ndlp = lpfc_findnode_rpi(vport, rpi);
10180 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10183 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10184 lpfc_sli_abts_recover_port(vport, ndlp);
10188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10189 "3095 Event Context not found, no "
10190 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10191 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10195 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10196 * @phba: pointer to HBA context object.
10197 * @ndlp: nodelist pointer for the impacted rport.
10198 * @axri: pointer to the wcqe containing the failed exchange.
10200 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10201 * port. The port generates this event when an abort exchange request to an
10202 * rport fails twice in succession with no reply. The abort could be originated
10203 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10206 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10207 struct lpfc_nodelist *ndlp,
10208 struct sli4_wcqe_xri_aborted *axri)
10210 struct lpfc_vport *vport;
10211 uint32_t ext_status = 0;
10213 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10214 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10215 "3115 Node Context not found, driver "
10216 "ignoring abts err event\n");
10220 vport = ndlp->vport;
10221 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10222 "3116 Port generated FCP XRI ABORT event on "
10223 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10224 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10225 bf_get(lpfc_wcqe_xa_xri, axri),
10226 bf_get(lpfc_wcqe_xa_status, axri),
10230 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10231 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10232 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10234 ext_status = axri->parameter & IOERR_PARAM_MASK;
10235 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10236 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10237 lpfc_sli_abts_recover_port(vport, ndlp);
10241 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10242 * @phba: Pointer to HBA context object.
10243 * @pring: Pointer to driver SLI ring object.
10244 * @iocbq: Pointer to iocb object.
10246 * This function is called by the slow ring event handler
10247 * function when there is an ASYNC event iocb in the ring.
10248 * This function is called with no lock held.
10249 * Currently this function handles only temperature related
10250 * ASYNC events. The function decodes the temperature sensor
10251 * event message and posts events for the management applications.
10254 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10255 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10259 struct temp_event temp_event_data;
10260 struct Scsi_Host *shost;
10263 icmd = &iocbq->iocb;
10264 evt_code = icmd->un.asyncstat.evt_code;
10266 switch (evt_code) {
10267 case ASYNC_TEMP_WARN:
10268 case ASYNC_TEMP_SAFE:
10269 temp_event_data.data = (uint32_t) icmd->ulpContext;
10270 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10271 if (evt_code == ASYNC_TEMP_WARN) {
10272 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10273 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10274 "0347 Adapter is very hot, please take "
10275 "corrective action. temperature : %d Celsius\n",
10276 (uint32_t) icmd->ulpContext);
10278 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10279 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10280 "0340 Adapter temperature is OK now. "
10281 "temperature : %d Celsius\n",
10282 (uint32_t) icmd->ulpContext);
10285 /* Send temperature change event to applications */
10286 shost = lpfc_shost_from_vport(phba->pport);
10287 fc_host_post_vendor_event(shost, fc_get_event_number(),
10288 sizeof(temp_event_data), (char *) &temp_event_data,
10289 LPFC_NL_VENDOR_ID);
10291 case ASYNC_STATUS_CN:
10292 lpfc_sli_abts_err_handler(phba, iocbq);
10295 iocb_w = (uint32_t *) icmd;
10296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10297 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10299 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10300 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10301 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10302 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10303 pring->ringno, icmd->un.asyncstat.evt_code,
10304 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10305 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10306 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10307 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10315 * lpfc_sli4_setup - SLI ring setup function
10316 * @phba: Pointer to HBA context object.
10318 * lpfc_sli_setup sets up rings of the SLI interface with
10319 * number of iocbs per ring and iotags. This function is
10320 * called while driver attach to the HBA and before the
10321 * interrupts are enabled. So there is no need for locking.
10323 * This function always returns 0.
10326 lpfc_sli4_setup(struct lpfc_hba *phba)
10328 struct lpfc_sli_ring *pring;
10330 pring = phba->sli4_hba.els_wq->pring;
10331 pring->num_mask = LPFC_MAX_RING_MASK;
10332 pring->prt[0].profile = 0; /* Mask 0 */
10333 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10334 pring->prt[0].type = FC_TYPE_ELS;
10335 pring->prt[0].lpfc_sli_rcv_unsol_event =
10336 lpfc_els_unsol_event;
10337 pring->prt[1].profile = 0; /* Mask 1 */
10338 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10339 pring->prt[1].type = FC_TYPE_ELS;
10340 pring->prt[1].lpfc_sli_rcv_unsol_event =
10341 lpfc_els_unsol_event;
10342 pring->prt[2].profile = 0; /* Mask 2 */
10343 /* NameServer Inquiry */
10344 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10346 pring->prt[2].type = FC_TYPE_CT;
10347 pring->prt[2].lpfc_sli_rcv_unsol_event =
10348 lpfc_ct_unsol_event;
10349 pring->prt[3].profile = 0; /* Mask 3 */
10350 /* NameServer response */
10351 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10353 pring->prt[3].type = FC_TYPE_CT;
10354 pring->prt[3].lpfc_sli_rcv_unsol_event =
10355 lpfc_ct_unsol_event;
10360 * lpfc_sli_setup - SLI ring setup function
10361 * @phba: Pointer to HBA context object.
10363 * lpfc_sli_setup sets up rings of the SLI interface with
10364 * number of iocbs per ring and iotags. This function is
10365 * called while driver attach to the HBA and before the
10366 * interrupts are enabled. So there is no need for locking.
10368 * This function always returns 0. SLI3 only.
10371 lpfc_sli_setup(struct lpfc_hba *phba)
10373 int i, totiocbsize = 0;
10374 struct lpfc_sli *psli = &phba->sli;
10375 struct lpfc_sli_ring *pring;
10377 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10378 psli->sli_flag = 0;
10380 psli->iocbq_lookup = NULL;
10381 psli->iocbq_lookup_len = 0;
10382 psli->last_iotag = 0;
10384 for (i = 0; i < psli->num_rings; i++) {
10385 pring = &psli->sli3_ring[i];
10387 case LPFC_FCP_RING: /* ring 0 - FCP */
10388 /* numCiocb and numRiocb are used in config_port */
10389 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10390 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10391 pring->sli.sli3.numCiocb +=
10392 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10393 pring->sli.sli3.numRiocb +=
10394 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10395 pring->sli.sli3.numCiocb +=
10396 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10397 pring->sli.sli3.numRiocb +=
10398 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10399 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10400 SLI3_IOCB_CMD_SIZE :
10401 SLI2_IOCB_CMD_SIZE;
10402 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10403 SLI3_IOCB_RSP_SIZE :
10404 SLI2_IOCB_RSP_SIZE;
10405 pring->iotag_ctr = 0;
10407 (phba->cfg_hba_queue_depth * 2);
10408 pring->fast_iotag = pring->iotag_max;
10409 pring->num_mask = 0;
10411 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10412 /* numCiocb and numRiocb are used in config_port */
10413 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10414 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10415 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10416 SLI3_IOCB_CMD_SIZE :
10417 SLI2_IOCB_CMD_SIZE;
10418 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10419 SLI3_IOCB_RSP_SIZE :
10420 SLI2_IOCB_RSP_SIZE;
10421 pring->iotag_max = phba->cfg_hba_queue_depth;
10422 pring->num_mask = 0;
10424 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10425 /* numCiocb and numRiocb are used in config_port */
10426 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10427 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10428 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10429 SLI3_IOCB_CMD_SIZE :
10430 SLI2_IOCB_CMD_SIZE;
10431 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10432 SLI3_IOCB_RSP_SIZE :
10433 SLI2_IOCB_RSP_SIZE;
10434 pring->fast_iotag = 0;
10435 pring->iotag_ctr = 0;
10436 pring->iotag_max = 4096;
10437 pring->lpfc_sli_rcv_async_status =
10438 lpfc_sli_async_event_handler;
10439 pring->num_mask = LPFC_MAX_RING_MASK;
10440 pring->prt[0].profile = 0; /* Mask 0 */
10441 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10442 pring->prt[0].type = FC_TYPE_ELS;
10443 pring->prt[0].lpfc_sli_rcv_unsol_event =
10444 lpfc_els_unsol_event;
10445 pring->prt[1].profile = 0; /* Mask 1 */
10446 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10447 pring->prt[1].type = FC_TYPE_ELS;
10448 pring->prt[1].lpfc_sli_rcv_unsol_event =
10449 lpfc_els_unsol_event;
10450 pring->prt[2].profile = 0; /* Mask 2 */
10451 /* NameServer Inquiry */
10452 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10454 pring->prt[2].type = FC_TYPE_CT;
10455 pring->prt[2].lpfc_sli_rcv_unsol_event =
10456 lpfc_ct_unsol_event;
10457 pring->prt[3].profile = 0; /* Mask 3 */
10458 /* NameServer response */
10459 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10461 pring->prt[3].type = FC_TYPE_CT;
10462 pring->prt[3].lpfc_sli_rcv_unsol_event =
10463 lpfc_ct_unsol_event;
10466 totiocbsize += (pring->sli.sli3.numCiocb *
10467 pring->sli.sli3.sizeCiocb) +
10468 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10470 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10471 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10472 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10473 "SLI2 SLIM Data: x%x x%lx\n",
10474 phba->brd_no, totiocbsize,
10475 (unsigned long) MAX_SLIM_IOCB_SIZE);
10477 if (phba->cfg_multi_ring_support == 2)
10478 lpfc_extra_ring_setup(phba);
10484 * lpfc_sli4_queue_init - Queue initialization function
10485 * @phba: Pointer to HBA context object.
10487 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10488 * ring. This function also initializes ring indices of each ring.
10489 * This function is called during the initialization of the SLI
10490 * interface of an HBA.
10491 * This function is called with no lock held and always returns
10495 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10497 struct lpfc_sli *psli;
10498 struct lpfc_sli_ring *pring;
10502 spin_lock_irq(&phba->hbalock);
10503 INIT_LIST_HEAD(&psli->mboxq);
10504 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10505 /* Initialize list headers for txq and txcmplq as double linked lists */
10506 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10507 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
10509 pring->ringno = LPFC_FCP_RING;
10510 pring->txcmplq_cnt = 0;
10511 INIT_LIST_HEAD(&pring->txq);
10512 INIT_LIST_HEAD(&pring->txcmplq);
10513 INIT_LIST_HEAD(&pring->iocb_continueq);
10514 spin_lock_init(&pring->ring_lock);
10516 pring = phba->sli4_hba.els_wq->pring;
10518 pring->ringno = LPFC_ELS_RING;
10519 pring->txcmplq_cnt = 0;
10520 INIT_LIST_HEAD(&pring->txq);
10521 INIT_LIST_HEAD(&pring->txcmplq);
10522 INIT_LIST_HEAD(&pring->iocb_continueq);
10523 spin_lock_init(&pring->ring_lock);
10525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10526 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10527 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10529 pring->ringno = LPFC_FCP_RING;
10530 pring->txcmplq_cnt = 0;
10531 INIT_LIST_HEAD(&pring->txq);
10532 INIT_LIST_HEAD(&pring->txcmplq);
10533 INIT_LIST_HEAD(&pring->iocb_continueq);
10534 spin_lock_init(&pring->ring_lock);
10536 pring = phba->sli4_hba.nvmels_wq->pring;
10538 pring->ringno = LPFC_ELS_RING;
10539 pring->txcmplq_cnt = 0;
10540 INIT_LIST_HEAD(&pring->txq);
10541 INIT_LIST_HEAD(&pring->txcmplq);
10542 INIT_LIST_HEAD(&pring->iocb_continueq);
10543 spin_lock_init(&pring->ring_lock);
10546 spin_unlock_irq(&phba->hbalock);
10550 * lpfc_sli_queue_init - Queue initialization function
10551 * @phba: Pointer to HBA context object.
10553 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10554 * ring. This function also initializes ring indices of each ring.
10555 * This function is called during the initialization of the SLI
10556 * interface of an HBA.
10557 * This function is called with no lock held and always returns
10561 lpfc_sli_queue_init(struct lpfc_hba *phba)
10563 struct lpfc_sli *psli;
10564 struct lpfc_sli_ring *pring;
10568 spin_lock_irq(&phba->hbalock);
10569 INIT_LIST_HEAD(&psli->mboxq);
10570 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10571 /* Initialize list headers for txq and txcmplq as double linked lists */
10572 for (i = 0; i < psli->num_rings; i++) {
10573 pring = &psli->sli3_ring[i];
10575 pring->sli.sli3.next_cmdidx = 0;
10576 pring->sli.sli3.local_getidx = 0;
10577 pring->sli.sli3.cmdidx = 0;
10578 INIT_LIST_HEAD(&pring->iocb_continueq);
10579 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10580 INIT_LIST_HEAD(&pring->postbufq);
10582 INIT_LIST_HEAD(&pring->txq);
10583 INIT_LIST_HEAD(&pring->txcmplq);
10584 spin_lock_init(&pring->ring_lock);
10586 spin_unlock_irq(&phba->hbalock);
10590 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10591 * @phba: Pointer to HBA context object.
10593 * This routine flushes the mailbox command subsystem. It will unconditionally
10594 * flush all the mailbox commands in the three possible stages in the mailbox
10595 * command sub-system: pending mailbox command queue; the outstanding mailbox
10596 * command; and completed mailbox command queue. It is caller's responsibility
10597 * to make sure that the driver is in the proper state to flush the mailbox
10598 * command sub-system. Namely, the posting of mailbox commands into the
10599 * pending mailbox command queue from the various clients must be stopped;
10600 * either the HBA is in a state that it will never works on the outstanding
10601 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10602 * mailbox command has been completed.
10605 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10607 LIST_HEAD(completions);
10608 struct lpfc_sli *psli = &phba->sli;
10610 unsigned long iflag;
10612 /* Disable softirqs, including timers from obtaining phba->hbalock */
10613 local_bh_disable();
10615 /* Flush all the mailbox commands in the mbox system */
10616 spin_lock_irqsave(&phba->hbalock, iflag);
10618 /* The pending mailbox command queue */
10619 list_splice_init(&phba->sli.mboxq, &completions);
10620 /* The outstanding active mailbox command */
10621 if (psli->mbox_active) {
10622 list_add_tail(&psli->mbox_active->list, &completions);
10623 psli->mbox_active = NULL;
10624 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10626 /* The completed mailbox command queue */
10627 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10628 spin_unlock_irqrestore(&phba->hbalock, iflag);
10630 /* Enable softirqs again, done with phba->hbalock */
10633 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10634 while (!list_empty(&completions)) {
10635 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10636 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10637 if (pmb->mbox_cmpl)
10638 pmb->mbox_cmpl(phba, pmb);
10643 * lpfc_sli_host_down - Vport cleanup function
10644 * @vport: Pointer to virtual port object.
10646 * lpfc_sli_host_down is called to clean up the resources
10647 * associated with a vport before destroying virtual
10648 * port data structures.
10649 * This function does following operations:
10650 * - Free discovery resources associated with this virtual
10652 * - Free iocbs associated with this virtual port in
10654 * - Send abort for all iocb commands associated with this
10655 * vport in txcmplq.
10657 * This function is called with no lock held and always returns 1.
10660 lpfc_sli_host_down(struct lpfc_vport *vport)
10662 LIST_HEAD(completions);
10663 struct lpfc_hba *phba = vport->phba;
10664 struct lpfc_sli *psli = &phba->sli;
10665 struct lpfc_queue *qp = NULL;
10666 struct lpfc_sli_ring *pring;
10667 struct lpfc_iocbq *iocb, *next_iocb;
10669 unsigned long flags = 0;
10670 uint16_t prev_pring_flag;
10672 lpfc_cleanup_discovery_resources(vport);
10674 spin_lock_irqsave(&phba->hbalock, flags);
10677 * Error everything on the txq since these iocbs
10678 * have not been given to the FW yet.
10679 * Also issue ABTS for everything on the txcmplq
10681 if (phba->sli_rev != LPFC_SLI_REV4) {
10682 for (i = 0; i < psli->num_rings; i++) {
10683 pring = &psli->sli3_ring[i];
10684 prev_pring_flag = pring->flag;
10685 /* Only slow rings */
10686 if (pring->ringno == LPFC_ELS_RING) {
10687 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10688 /* Set the lpfc data pending flag */
10689 set_bit(LPFC_DATA_READY, &phba->data_flags);
10691 list_for_each_entry_safe(iocb, next_iocb,
10692 &pring->txq, list) {
10693 if (iocb->vport != vport)
10695 list_move_tail(&iocb->list, &completions);
10697 list_for_each_entry_safe(iocb, next_iocb,
10698 &pring->txcmplq, list) {
10699 if (iocb->vport != vport)
10701 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10703 pring->flag = prev_pring_flag;
10706 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10710 if (pring == phba->sli4_hba.els_wq->pring) {
10711 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10712 /* Set the lpfc data pending flag */
10713 set_bit(LPFC_DATA_READY, &phba->data_flags);
10715 prev_pring_flag = pring->flag;
10716 spin_lock_irq(&pring->ring_lock);
10717 list_for_each_entry_safe(iocb, next_iocb,
10718 &pring->txq, list) {
10719 if (iocb->vport != vport)
10721 list_move_tail(&iocb->list, &completions);
10723 spin_unlock_irq(&pring->ring_lock);
10724 list_for_each_entry_safe(iocb, next_iocb,
10725 &pring->txcmplq, list) {
10726 if (iocb->vport != vport)
10728 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10730 pring->flag = prev_pring_flag;
10733 spin_unlock_irqrestore(&phba->hbalock, flags);
10735 /* Cancel all the IOCBs from the completions list */
10736 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10742 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10743 * @phba: Pointer to HBA context object.
10745 * This function cleans up all iocb, buffers, mailbox commands
10746 * while shutting down the HBA. This function is called with no
10747 * lock held and always returns 1.
10748 * This function does the following to cleanup driver resources:
10749 * - Free discovery resources for each virtual port
10750 * - Cleanup any pending fabric iocbs
10751 * - Iterate through the iocb txq and free each entry
10753 * - Free up any buffer posted to the HBA
10754 * - Free mailbox commands in the mailbox queue.
10757 lpfc_sli_hba_down(struct lpfc_hba *phba)
10759 LIST_HEAD(completions);
10760 struct lpfc_sli *psli = &phba->sli;
10761 struct lpfc_queue *qp = NULL;
10762 struct lpfc_sli_ring *pring;
10763 struct lpfc_dmabuf *buf_ptr;
10764 unsigned long flags = 0;
10767 /* Shutdown the mailbox command sub-system */
10768 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10770 lpfc_hba_down_prep(phba);
10772 /* Disable softirqs, including timers from obtaining phba->hbalock */
10773 local_bh_disable();
10775 lpfc_fabric_abort_hba(phba);
10777 spin_lock_irqsave(&phba->hbalock, flags);
10780 * Error everything on the txq since these iocbs
10781 * have not been given to the FW yet.
10783 if (phba->sli_rev != LPFC_SLI_REV4) {
10784 for (i = 0; i < psli->num_rings; i++) {
10785 pring = &psli->sli3_ring[i];
10786 /* Only slow rings */
10787 if (pring->ringno == LPFC_ELS_RING) {
10788 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10789 /* Set the lpfc data pending flag */
10790 set_bit(LPFC_DATA_READY, &phba->data_flags);
10792 list_splice_init(&pring->txq, &completions);
10795 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10799 spin_lock_irq(&pring->ring_lock);
10800 list_splice_init(&pring->txq, &completions);
10801 spin_unlock_irq(&pring->ring_lock);
10802 if (pring == phba->sli4_hba.els_wq->pring) {
10803 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10804 /* Set the lpfc data pending flag */
10805 set_bit(LPFC_DATA_READY, &phba->data_flags);
10809 spin_unlock_irqrestore(&phba->hbalock, flags);
10811 /* Cancel all the IOCBs from the completions list */
10812 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10815 spin_lock_irqsave(&phba->hbalock, flags);
10816 list_splice_init(&phba->elsbuf, &completions);
10817 phba->elsbuf_cnt = 0;
10818 phba->elsbuf_prev_cnt = 0;
10819 spin_unlock_irqrestore(&phba->hbalock, flags);
10821 while (!list_empty(&completions)) {
10822 list_remove_head(&completions, buf_ptr,
10823 struct lpfc_dmabuf, list);
10824 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10828 /* Enable softirqs again, done with phba->hbalock */
10831 /* Return any active mbox cmds */
10832 del_timer_sync(&psli->mbox_tmo);
10834 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10835 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10836 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10842 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10843 * @srcp: Source memory pointer.
10844 * @destp: Destination memory pointer.
10845 * @cnt: Number of words required to be copied.
10847 * This function is used for copying data between driver memory
10848 * and the SLI memory. This function also changes the endianness
10849 * of each word if native endianness is different from SLI
10850 * endianness. This function can be called with or without
10854 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10856 uint32_t *src = srcp;
10857 uint32_t *dest = destp;
10861 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10863 ldata = le32_to_cpu(ldata);
10872 * lpfc_sli_bemem_bcopy - SLI memory copy function
10873 * @srcp: Source memory pointer.
10874 * @destp: Destination memory pointer.
10875 * @cnt: Number of words required to be copied.
10877 * This function is used for copying data between a data structure
10878 * with big endian representation to local endianness.
10879 * This function can be called with or without lock.
10882 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10884 uint32_t *src = srcp;
10885 uint32_t *dest = destp;
10889 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10891 ldata = be32_to_cpu(ldata);
10899 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10900 * @phba: Pointer to HBA context object.
10901 * @pring: Pointer to driver SLI ring object.
10902 * @mp: Pointer to driver buffer object.
10904 * This function is called with no lock held.
10905 * It always return zero after adding the buffer to the postbufq
10909 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10910 struct lpfc_dmabuf *mp)
10912 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10914 spin_lock_irq(&phba->hbalock);
10915 list_add_tail(&mp->list, &pring->postbufq);
10916 pring->postbufq_cnt++;
10917 spin_unlock_irq(&phba->hbalock);
10922 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10923 * @phba: Pointer to HBA context object.
10925 * When HBQ is enabled, buffers are searched based on tags. This function
10926 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10927 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10928 * does not conflict with tags of buffer posted for unsolicited events.
10929 * The function returns the allocated tag. The function is called with
10933 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10935 spin_lock_irq(&phba->hbalock);
10936 phba->buffer_tag_count++;
10938 * Always set the QUE_BUFTAG_BIT to distiguish between
10939 * a tag assigned by HBQ.
10941 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10942 spin_unlock_irq(&phba->hbalock);
10943 return phba->buffer_tag_count;
10947 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10948 * @phba: Pointer to HBA context object.
10949 * @pring: Pointer to driver SLI ring object.
10950 * @tag: Buffer tag.
10952 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10953 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10954 * iocb is posted to the response ring with the tag of the buffer.
10955 * This function searches the pring->postbufq list using the tag
10956 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10957 * iocb. If the buffer is found then lpfc_dmabuf object of the
10958 * buffer is returned to the caller else NULL is returned.
10959 * This function is called with no lock held.
10961 struct lpfc_dmabuf *
10962 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10965 struct lpfc_dmabuf *mp, *next_mp;
10966 struct list_head *slp = &pring->postbufq;
10968 /* Search postbufq, from the beginning, looking for a match on tag */
10969 spin_lock_irq(&phba->hbalock);
10970 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10971 if (mp->buffer_tag == tag) {
10972 list_del_init(&mp->list);
10973 pring->postbufq_cnt--;
10974 spin_unlock_irq(&phba->hbalock);
10979 spin_unlock_irq(&phba->hbalock);
10980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10981 "0402 Cannot find virtual addr for buffer tag on "
10982 "ring %d Data x%lx x%p x%p x%x\n",
10983 pring->ringno, (unsigned long) tag,
10984 slp->next, slp->prev, pring->postbufq_cnt);
10990 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10991 * @phba: Pointer to HBA context object.
10992 * @pring: Pointer to driver SLI ring object.
10993 * @phys: DMA address of the buffer.
10995 * This function searches the buffer list using the dma_address
10996 * of unsolicited event to find the driver's lpfc_dmabuf object
10997 * corresponding to the dma_address. The function returns the
10998 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10999 * This function is called by the ct and els unsolicited event
11000 * handlers to get the buffer associated with the unsolicited
11003 * This function is called with no lock held.
11005 struct lpfc_dmabuf *
11006 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11009 struct lpfc_dmabuf *mp, *next_mp;
11010 struct list_head *slp = &pring->postbufq;
11012 /* Search postbufq, from the beginning, looking for a match on phys */
11013 spin_lock_irq(&phba->hbalock);
11014 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11015 if (mp->phys == phys) {
11016 list_del_init(&mp->list);
11017 pring->postbufq_cnt--;
11018 spin_unlock_irq(&phba->hbalock);
11023 spin_unlock_irq(&phba->hbalock);
11024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11025 "0410 Cannot find virtual addr for mapped buf on "
11026 "ring %d Data x%llx x%p x%p x%x\n",
11027 pring->ringno, (unsigned long long)phys,
11028 slp->next, slp->prev, pring->postbufq_cnt);
11033 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11034 * @phba: Pointer to HBA context object.
11035 * @cmdiocb: Pointer to driver command iocb object.
11036 * @rspiocb: Pointer to driver response iocb object.
11038 * This function is the completion handler for the abort iocbs for
11039 * ELS commands. This function is called from the ELS ring event
11040 * handler with no lock held. This function frees memory resources
11041 * associated with the abort iocb.
11044 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11045 struct lpfc_iocbq *rspiocb)
11047 IOCB_t *irsp = &rspiocb->iocb;
11048 uint16_t abort_iotag, abort_context;
11049 struct lpfc_iocbq *abort_iocb = NULL;
11051 if (irsp->ulpStatus) {
11054 * Assume that the port already completed and returned, or
11055 * will return the iocb. Just Log the message.
11057 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11058 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11060 spin_lock_irq(&phba->hbalock);
11061 if (phba->sli_rev < LPFC_SLI_REV4) {
11062 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11063 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11064 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11065 spin_unlock_irq(&phba->hbalock);
11068 if (abort_iotag != 0 &&
11069 abort_iotag <= phba->sli.last_iotag)
11071 phba->sli.iocbq_lookup[abort_iotag];
11073 /* For sli4 the abort_tag is the XRI,
11074 * so the abort routine puts the iotag of the iocb
11075 * being aborted in the context field of the abort
11078 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11081 "0327 Cannot abort els iocb %p "
11082 "with tag %x context %x, abort status %x, "
11084 abort_iocb, abort_iotag, abort_context,
11085 irsp->ulpStatus, irsp->un.ulpWord[4]);
11087 spin_unlock_irq(&phba->hbalock);
11090 lpfc_sli_release_iocbq(phba, cmdiocb);
11095 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11096 * @phba: Pointer to HBA context object.
11097 * @cmdiocb: Pointer to driver command iocb object.
11098 * @rspiocb: Pointer to driver response iocb object.
11100 * The function is called from SLI ring event handler with no
11101 * lock held. This function is the completion handler for ELS commands
11102 * which are aborted. The function frees memory resources used for
11103 * the aborted ELS commands.
11106 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11107 struct lpfc_iocbq *rspiocb)
11109 IOCB_t *irsp = &rspiocb->iocb;
11111 /* ELS cmd tag <ulpIoTag> completes */
11112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11113 "0139 Ignoring ELS cmd tag x%x completion Data: "
11115 irsp->ulpIoTag, irsp->ulpStatus,
11116 irsp->un.ulpWord[4], irsp->ulpTimeout);
11117 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11118 lpfc_ct_free_iocb(phba, cmdiocb);
11120 lpfc_els_free_iocb(phba, cmdiocb);
11125 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11126 * @phba: Pointer to HBA context object.
11127 * @pring: Pointer to driver SLI ring object.
11128 * @cmdiocb: Pointer to driver command iocb object.
11130 * This function issues an abort iocb for the provided command iocb down to
11131 * the port. Other than the case the outstanding command iocb is an abort
11132 * request, this function issues abort out unconditionally. This function is
11133 * called with hbalock held. The function returns 0 when it fails due to
11134 * memory allocation failure or when the command iocb is an abort request.
11137 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11138 struct lpfc_iocbq *cmdiocb)
11140 struct lpfc_vport *vport = cmdiocb->vport;
11141 struct lpfc_iocbq *abtsiocbp;
11142 IOCB_t *icmd = NULL;
11143 IOCB_t *iabt = NULL;
11145 unsigned long iflags;
11146 struct lpfc_nodelist *ndlp;
11148 lockdep_assert_held(&phba->hbalock);
11151 * There are certain command types we don't want to abort. And we
11152 * don't want to abort commands that are already in the process of
11155 icmd = &cmdiocb->iocb;
11156 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11157 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11158 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11161 /* issue ABTS for this IOCB based on iotag */
11162 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11163 if (abtsiocbp == NULL)
11166 /* This signals the response to set the correct status
11167 * before calling the completion handler
11169 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11171 iabt = &abtsiocbp->iocb;
11172 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11173 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11174 if (phba->sli_rev == LPFC_SLI_REV4) {
11175 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11176 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11178 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11179 if (pring->ringno == LPFC_ELS_RING) {
11180 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11181 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11185 iabt->ulpClass = icmd->ulpClass;
11187 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11188 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11189 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11190 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11191 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11192 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11194 if (phba->link_state >= LPFC_LINK_UP)
11195 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11197 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11199 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11200 abtsiocbp->vport = vport;
11202 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11203 "0339 Abort xri x%x, original iotag x%x, "
11204 "abort cmd iotag x%x\n",
11205 iabt->un.acxri.abortIoTag,
11206 iabt->un.acxri.abortContextTag,
11209 if (phba->sli_rev == LPFC_SLI_REV4) {
11210 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11211 if (unlikely(pring == NULL))
11213 /* Note: both hbalock and ring_lock need to be set here */
11214 spin_lock_irqsave(&pring->ring_lock, iflags);
11215 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11217 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11219 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11224 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11227 * Caller to this routine should check for IOCB_ERROR
11228 * and handle it properly. This routine no longer removes
11229 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11235 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11236 * @phba: Pointer to HBA context object.
11237 * @pring: Pointer to driver SLI ring object.
11238 * @cmdiocb: Pointer to driver command iocb object.
11240 * This function issues an abort iocb for the provided command iocb. In case
11241 * of unloading, the abort iocb will not be issued to commands on the ELS
11242 * ring. Instead, the callback function shall be changed to those commands
11243 * so that nothing happens when them finishes. This function is called with
11244 * hbalock held. The function returns 0 when the command iocb is an abort
11248 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11249 struct lpfc_iocbq *cmdiocb)
11251 struct lpfc_vport *vport = cmdiocb->vport;
11252 int retval = IOCB_ERROR;
11253 IOCB_t *icmd = NULL;
11255 lockdep_assert_held(&phba->hbalock);
11258 * There are certain command types we don't want to abort. And we
11259 * don't want to abort commands that are already in the process of
11262 icmd = &cmdiocb->iocb;
11263 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11264 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11265 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11269 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11270 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11272 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11273 goto abort_iotag_exit;
11277 * If we're unloading, don't abort iocb on the ELS ring, but change
11278 * the callback so that nothing happens when it finishes.
11280 if ((vport->load_flag & FC_UNLOADING) &&
11281 (pring->ringno == LPFC_ELS_RING)) {
11282 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11283 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11285 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11286 goto abort_iotag_exit;
11289 /* Now, we try to issue the abort to the cmdiocb out */
11290 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11294 * Caller to this routine should check for IOCB_ERROR
11295 * and handle it properly. This routine no longer removes
11296 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11302 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11303 * @phba: pointer to lpfc HBA data structure.
11305 * This routine will abort all pending and outstanding iocbs to an HBA.
11308 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11310 struct lpfc_sli *psli = &phba->sli;
11311 struct lpfc_sli_ring *pring;
11312 struct lpfc_queue *qp = NULL;
11315 if (phba->sli_rev != LPFC_SLI_REV4) {
11316 for (i = 0; i < psli->num_rings; i++) {
11317 pring = &psli->sli3_ring[i];
11318 lpfc_sli_abort_iocb_ring(phba, pring);
11322 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11326 lpfc_sli_abort_iocb_ring(phba, pring);
11331 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11332 * @iocbq: Pointer to driver iocb object.
11333 * @vport: Pointer to driver virtual port object.
11334 * @tgt_id: SCSI ID of the target.
11335 * @lun_id: LUN ID of the scsi device.
11336 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11338 * This function acts as an iocb filter for functions which abort or count
11339 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11340 * 0 if the filtering criteria is met for the given iocb and will return
11341 * 1 if the filtering criteria is not met.
11342 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11343 * given iocb is for the SCSI device specified by vport, tgt_id and
11344 * lun_id parameter.
11345 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11346 * given iocb is for the SCSI target specified by vport and tgt_id
11348 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11349 * given iocb is for the SCSI host associated with the given vport.
11350 * This function is called with no locks held.
11353 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11354 uint16_t tgt_id, uint64_t lun_id,
11355 lpfc_ctx_cmd ctx_cmd)
11357 struct lpfc_io_buf *lpfc_cmd;
11360 if (iocbq->vport != vport)
11363 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11364 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11367 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11369 if (lpfc_cmd->pCmd == NULL)
11374 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11375 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11376 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11380 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11381 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11384 case LPFC_CTX_HOST:
11388 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11389 __func__, ctx_cmd);
11397 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11398 * @vport: Pointer to virtual port.
11399 * @tgt_id: SCSI ID of the target.
11400 * @lun_id: LUN ID of the scsi device.
11401 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11403 * This function returns number of FCP commands pending for the vport.
11404 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11405 * commands pending on the vport associated with SCSI device specified
11406 * by tgt_id and lun_id parameters.
11407 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11408 * commands pending on the vport associated with SCSI target specified
11409 * by tgt_id parameter.
11410 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11411 * commands pending on the vport.
11412 * This function returns the number of iocbs which satisfy the filter.
11413 * This function is called without any lock held.
11416 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11417 lpfc_ctx_cmd ctx_cmd)
11419 struct lpfc_hba *phba = vport->phba;
11420 struct lpfc_iocbq *iocbq;
11423 spin_lock_irq(&phba->hbalock);
11424 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11425 iocbq = phba->sli.iocbq_lookup[i];
11427 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11431 spin_unlock_irq(&phba->hbalock);
11437 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11438 * @phba: Pointer to HBA context object
11439 * @cmdiocb: Pointer to command iocb object.
11440 * @rspiocb: Pointer to response iocb object.
11442 * This function is called when an aborted FCP iocb completes. This
11443 * function is called by the ring event handler with no lock held.
11444 * This function frees the iocb.
11447 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11448 struct lpfc_iocbq *rspiocb)
11450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11451 "3096 ABORT_XRI_CN completing on rpi x%x "
11452 "original iotag x%x, abort cmd iotag x%x "
11453 "status 0x%x, reason 0x%x\n",
11454 cmdiocb->iocb.un.acxri.abortContextTag,
11455 cmdiocb->iocb.un.acxri.abortIoTag,
11456 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11457 rspiocb->iocb.un.ulpWord[4]);
11458 lpfc_sli_release_iocbq(phba, cmdiocb);
11463 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11464 * @vport: Pointer to virtual port.
11465 * @pring: Pointer to driver SLI ring object.
11466 * @tgt_id: SCSI ID of the target.
11467 * @lun_id: LUN ID of the scsi device.
11468 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11470 * This function sends an abort command for every SCSI command
11471 * associated with the given virtual port pending on the ring
11472 * filtered by lpfc_sli_validate_fcp_iocb function.
11473 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11474 * FCP iocbs associated with lun specified by tgt_id and lun_id
11476 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11477 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11478 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11479 * FCP iocbs associated with virtual port.
11480 * This function returns number of iocbs it failed to abort.
11481 * This function is called with no locks held.
11484 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11485 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11487 struct lpfc_hba *phba = vport->phba;
11488 struct lpfc_iocbq *iocbq;
11489 struct lpfc_iocbq *abtsiocb;
11490 struct lpfc_sli_ring *pring_s4;
11491 IOCB_t *cmd = NULL;
11492 int errcnt = 0, ret_val = 0;
11495 /* all I/Os are in process of being flushed */
11496 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11499 for (i = 1; i <= phba->sli.last_iotag; i++) {
11500 iocbq = phba->sli.iocbq_lookup[i];
11502 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11507 * If the iocbq is already being aborted, don't take a second
11508 * action, but do count it.
11510 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11513 /* issue ABTS for this IOCB based on iotag */
11514 abtsiocb = lpfc_sli_get_iocbq(phba);
11515 if (abtsiocb == NULL) {
11520 /* indicate the IO is being aborted by the driver. */
11521 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11523 cmd = &iocbq->iocb;
11524 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11525 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11526 if (phba->sli_rev == LPFC_SLI_REV4)
11527 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11529 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11530 abtsiocb->iocb.ulpLe = 1;
11531 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11532 abtsiocb->vport = vport;
11534 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11535 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11536 if (iocbq->iocb_flag & LPFC_IO_FCP)
11537 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11538 if (iocbq->iocb_flag & LPFC_IO_FOF)
11539 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11541 if (lpfc_is_link_up(phba))
11542 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11544 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11546 /* Setup callback routine and issue the command. */
11547 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11548 if (phba->sli_rev == LPFC_SLI_REV4) {
11549 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11552 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11555 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11557 if (ret_val == IOCB_ERROR) {
11558 lpfc_sli_release_iocbq(phba, abtsiocb);
11568 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11569 * @vport: Pointer to virtual port.
11570 * @pring: Pointer to driver SLI ring object.
11571 * @tgt_id: SCSI ID of the target.
11572 * @lun_id: LUN ID of the scsi device.
11573 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11575 * This function sends an abort command for every SCSI command
11576 * associated with the given virtual port pending on the ring
11577 * filtered by lpfc_sli_validate_fcp_iocb function.
11578 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11579 * FCP iocbs associated with lun specified by tgt_id and lun_id
11581 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11582 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11583 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11584 * FCP iocbs associated with virtual port.
11585 * This function returns number of iocbs it aborted .
11586 * This function is called with no locks held right after a taskmgmt
11590 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11591 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11593 struct lpfc_hba *phba = vport->phba;
11594 struct lpfc_io_buf *lpfc_cmd;
11595 struct lpfc_iocbq *abtsiocbq;
11596 struct lpfc_nodelist *ndlp;
11597 struct lpfc_iocbq *iocbq;
11599 int sum, i, ret_val;
11600 unsigned long iflags;
11601 struct lpfc_sli_ring *pring_s4 = NULL;
11603 spin_lock_irqsave(&phba->hbalock, iflags);
11605 /* all I/Os are in process of being flushed */
11606 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11607 spin_unlock_irqrestore(&phba->hbalock, iflags);
11612 for (i = 1; i <= phba->sli.last_iotag; i++) {
11613 iocbq = phba->sli.iocbq_lookup[i];
11615 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11619 /* Guard against IO completion being called at same time */
11620 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11621 spin_lock(&lpfc_cmd->buf_lock);
11623 if (!lpfc_cmd->pCmd) {
11624 spin_unlock(&lpfc_cmd->buf_lock);
11628 if (phba->sli_rev == LPFC_SLI_REV4) {
11630 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11632 spin_unlock(&lpfc_cmd->buf_lock);
11635 /* Note: both hbalock and ring_lock must be set here */
11636 spin_lock(&pring_s4->ring_lock);
11640 * If the iocbq is already being aborted, don't take a second
11641 * action, but do count it.
11643 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11644 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11645 if (phba->sli_rev == LPFC_SLI_REV4)
11646 spin_unlock(&pring_s4->ring_lock);
11647 spin_unlock(&lpfc_cmd->buf_lock);
11651 /* issue ABTS for this IOCB based on iotag */
11652 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11654 if (phba->sli_rev == LPFC_SLI_REV4)
11655 spin_unlock(&pring_s4->ring_lock);
11656 spin_unlock(&lpfc_cmd->buf_lock);
11660 icmd = &iocbq->iocb;
11661 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11662 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11663 if (phba->sli_rev == LPFC_SLI_REV4)
11664 abtsiocbq->iocb.un.acxri.abortIoTag =
11665 iocbq->sli4_xritag;
11667 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11668 abtsiocbq->iocb.ulpLe = 1;
11669 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11670 abtsiocbq->vport = vport;
11672 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11673 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11674 if (iocbq->iocb_flag & LPFC_IO_FCP)
11675 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11676 if (iocbq->iocb_flag & LPFC_IO_FOF)
11677 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11679 ndlp = lpfc_cmd->rdata->pnode;
11681 if (lpfc_is_link_up(phba) &&
11682 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11683 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11685 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11687 /* Setup callback routine and issue the command. */
11688 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11691 * Indicate the IO is being aborted by the driver and set
11692 * the caller's flag into the aborted IO.
11694 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11696 if (phba->sli_rev == LPFC_SLI_REV4) {
11697 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11699 spin_unlock(&pring_s4->ring_lock);
11701 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11705 spin_unlock(&lpfc_cmd->buf_lock);
11707 if (ret_val == IOCB_ERROR)
11708 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11712 spin_unlock_irqrestore(&phba->hbalock, iflags);
11717 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11718 * @phba: Pointer to HBA context object.
11719 * @cmdiocbq: Pointer to command iocb.
11720 * @rspiocbq: Pointer to response iocb.
11722 * This function is the completion handler for iocbs issued using
11723 * lpfc_sli_issue_iocb_wait function. This function is called by the
11724 * ring event handler function without any lock held. This function
11725 * can be called from both worker thread context and interrupt
11726 * context. This function also can be called from other thread which
11727 * cleans up the SLI layer objects.
11728 * This function copy the contents of the response iocb to the
11729 * response iocb memory object provided by the caller of
11730 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11731 * sleeps for the iocb completion.
11734 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11735 struct lpfc_iocbq *cmdiocbq,
11736 struct lpfc_iocbq *rspiocbq)
11738 wait_queue_head_t *pdone_q;
11739 unsigned long iflags;
11740 struct lpfc_io_buf *lpfc_cmd;
11742 spin_lock_irqsave(&phba->hbalock, iflags);
11743 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11746 * A time out has occurred for the iocb. If a time out
11747 * completion handler has been supplied, call it. Otherwise,
11748 * just free the iocbq.
11751 spin_unlock_irqrestore(&phba->hbalock, iflags);
11752 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11753 cmdiocbq->wait_iocb_cmpl = NULL;
11754 if (cmdiocbq->iocb_cmpl)
11755 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11757 lpfc_sli_release_iocbq(phba, cmdiocbq);
11761 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11762 if (cmdiocbq->context2 && rspiocbq)
11763 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11764 &rspiocbq->iocb, sizeof(IOCB_t));
11766 /* Set the exchange busy flag for task management commands */
11767 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11768 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11769 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11771 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11774 pdone_q = cmdiocbq->context_un.wait_queue;
11777 spin_unlock_irqrestore(&phba->hbalock, iflags);
11782 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11783 * @phba: Pointer to HBA context object..
11784 * @piocbq: Pointer to command iocb.
11785 * @flag: Flag to test.
11787 * This routine grabs the hbalock and then test the iocb_flag to
11788 * see if the passed in flag is set.
11790 * 1 if flag is set.
11791 * 0 if flag is not set.
11794 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11795 struct lpfc_iocbq *piocbq, uint32_t flag)
11797 unsigned long iflags;
11800 spin_lock_irqsave(&phba->hbalock, iflags);
11801 ret = piocbq->iocb_flag & flag;
11802 spin_unlock_irqrestore(&phba->hbalock, iflags);
11808 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11809 * @phba: Pointer to HBA context object..
11810 * @pring: Pointer to sli ring.
11811 * @piocb: Pointer to command iocb.
11812 * @prspiocbq: Pointer to response iocb.
11813 * @timeout: Timeout in number of seconds.
11815 * This function issues the iocb to firmware and waits for the
11816 * iocb to complete. The iocb_cmpl field of the shall be used
11817 * to handle iocbs which time out. If the field is NULL, the
11818 * function shall free the iocbq structure. If more clean up is
11819 * needed, the caller is expected to provide a completion function
11820 * that will provide the needed clean up. If the iocb command is
11821 * not completed within timeout seconds, the function will either
11822 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11823 * completion function set in the iocb_cmpl field and then return
11824 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11825 * resources if this function returns IOCB_TIMEDOUT.
11826 * The function waits for the iocb completion using an
11827 * non-interruptible wait.
11828 * This function will sleep while waiting for iocb completion.
11829 * So, this function should not be called from any context which
11830 * does not allow sleeping. Due to the same reason, this function
11831 * cannot be called with interrupt disabled.
11832 * This function assumes that the iocb completions occur while
11833 * this function sleep. So, this function cannot be called from
11834 * the thread which process iocb completion for this ring.
11835 * This function clears the iocb_flag of the iocb object before
11836 * issuing the iocb and the iocb completion handler sets this
11837 * flag and wakes this thread when the iocb completes.
11838 * The contents of the response iocb will be copied to prspiocbq
11839 * by the completion handler when the command completes.
11840 * This function returns IOCB_SUCCESS when success.
11841 * This function is called with no lock held.
11844 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11845 uint32_t ring_number,
11846 struct lpfc_iocbq *piocb,
11847 struct lpfc_iocbq *prspiocbq,
11850 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11851 long timeleft, timeout_req = 0;
11852 int retval = IOCB_SUCCESS;
11854 struct lpfc_iocbq *iocb;
11856 int txcmplq_cnt = 0;
11857 struct lpfc_sli_ring *pring;
11858 unsigned long iflags;
11859 bool iocb_completed = true;
11861 if (phba->sli_rev >= LPFC_SLI_REV4)
11862 pring = lpfc_sli4_calc_ring(phba, piocb);
11864 pring = &phba->sli.sli3_ring[ring_number];
11866 * If the caller has provided a response iocbq buffer, then context2
11867 * is NULL or its an error.
11870 if (piocb->context2)
11872 piocb->context2 = prspiocbq;
11875 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11876 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11877 piocb->context_un.wait_queue = &done_q;
11878 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11880 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11881 if (lpfc_readl(phba->HCregaddr, &creg_val))
11883 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11884 writel(creg_val, phba->HCregaddr);
11885 readl(phba->HCregaddr); /* flush */
11888 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11889 SLI_IOCB_RET_IOCB);
11890 if (retval == IOCB_SUCCESS) {
11891 timeout_req = msecs_to_jiffies(timeout * 1000);
11892 timeleft = wait_event_timeout(done_q,
11893 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11895 spin_lock_irqsave(&phba->hbalock, iflags);
11896 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11899 * IOCB timed out. Inform the wake iocb wait
11900 * completion function and set local status
11903 iocb_completed = false;
11904 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11906 spin_unlock_irqrestore(&phba->hbalock, iflags);
11907 if (iocb_completed) {
11908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11909 "0331 IOCB wake signaled\n");
11910 /* Note: we are not indicating if the IOCB has a success
11911 * status or not - that's for the caller to check.
11912 * IOCB_SUCCESS means just that the command was sent and
11913 * completed. Not that it completed successfully.
11915 } else if (timeleft == 0) {
11916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11917 "0338 IOCB wait timeout error - no "
11918 "wake response Data x%x\n", timeout);
11919 retval = IOCB_TIMEDOUT;
11921 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11922 "0330 IOCB wake NOT set, "
11924 timeout, (timeleft / jiffies));
11925 retval = IOCB_TIMEDOUT;
11927 } else if (retval == IOCB_BUSY) {
11928 if (phba->cfg_log_verbose & LOG_SLI) {
11929 list_for_each_entry(iocb, &pring->txq, list) {
11932 list_for_each_entry(iocb, &pring->txcmplq, list) {
11935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11936 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11937 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11941 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11942 "0332 IOCB wait issue failed, Data x%x\n",
11944 retval = IOCB_ERROR;
11947 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11948 if (lpfc_readl(phba->HCregaddr, &creg_val))
11950 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11951 writel(creg_val, phba->HCregaddr);
11952 readl(phba->HCregaddr); /* flush */
11956 piocb->context2 = NULL;
11958 piocb->context_un.wait_queue = NULL;
11959 piocb->iocb_cmpl = NULL;
11964 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11965 * @phba: Pointer to HBA context object.
11966 * @pmboxq: Pointer to driver mailbox object.
11967 * @timeout: Timeout in number of seconds.
11969 * This function issues the mailbox to firmware and waits for the
11970 * mailbox command to complete. If the mailbox command is not
11971 * completed within timeout seconds, it returns MBX_TIMEOUT.
11972 * The function waits for the mailbox completion using an
11973 * interruptible wait. If the thread is woken up due to a
11974 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11975 * should not free the mailbox resources, if this function returns
11977 * This function will sleep while waiting for mailbox completion.
11978 * So, this function should not be called from any context which
11979 * does not allow sleeping. Due to the same reason, this function
11980 * cannot be called with interrupt disabled.
11981 * This function assumes that the mailbox completion occurs while
11982 * this function sleep. So, this function cannot be called from
11983 * the worker thread which processes mailbox completion.
11984 * This function is called in the context of HBA management
11986 * This function returns MBX_SUCCESS when successful.
11987 * This function is called with no lock held.
11990 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11993 struct completion mbox_done;
11995 unsigned long flag;
11997 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11998 /* setup wake call as IOCB callback */
11999 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12001 /* setup context3 field to pass wait_queue pointer to wake function */
12002 init_completion(&mbox_done);
12003 pmboxq->context3 = &mbox_done;
12004 /* now issue the command */
12005 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12006 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12007 wait_for_completion_timeout(&mbox_done,
12008 msecs_to_jiffies(timeout * 1000));
12010 spin_lock_irqsave(&phba->hbalock, flag);
12011 pmboxq->context3 = NULL;
12013 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12014 * else do not free the resources.
12016 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12017 retval = MBX_SUCCESS;
12019 retval = MBX_TIMEOUT;
12020 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12022 spin_unlock_irqrestore(&phba->hbalock, flag);
12028 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12029 * @phba: Pointer to HBA context.
12031 * This function is called to shutdown the driver's mailbox sub-system.
12032 * It first marks the mailbox sub-system is in a block state to prevent
12033 * the asynchronous mailbox command from issued off the pending mailbox
12034 * command queue. If the mailbox command sub-system shutdown is due to
12035 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12036 * the mailbox sub-system flush routine to forcefully bring down the
12037 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12038 * as with offline or HBA function reset), this routine will wait for the
12039 * outstanding mailbox command to complete before invoking the mailbox
12040 * sub-system flush routine to gracefully bring down mailbox sub-system.
12043 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12045 struct lpfc_sli *psli = &phba->sli;
12046 unsigned long timeout;
12048 if (mbx_action == LPFC_MBX_NO_WAIT) {
12049 /* delay 100ms for port state */
12051 lpfc_sli_mbox_sys_flush(phba);
12054 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12056 /* Disable softirqs, including timers from obtaining phba->hbalock */
12057 local_bh_disable();
12059 spin_lock_irq(&phba->hbalock);
12060 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12062 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12063 /* Determine how long we might wait for the active mailbox
12064 * command to be gracefully completed by firmware.
12066 if (phba->sli.mbox_active)
12067 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12068 phba->sli.mbox_active) *
12070 spin_unlock_irq(&phba->hbalock);
12072 /* Enable softirqs again, done with phba->hbalock */
12075 while (phba->sli.mbox_active) {
12076 /* Check active mailbox complete status every 2ms */
12078 if (time_after(jiffies, timeout))
12079 /* Timeout, let the mailbox flush routine to
12080 * forcefully release active mailbox command
12085 spin_unlock_irq(&phba->hbalock);
12087 /* Enable softirqs again, done with phba->hbalock */
12091 lpfc_sli_mbox_sys_flush(phba);
12095 * lpfc_sli_eratt_read - read sli-3 error attention events
12096 * @phba: Pointer to HBA context.
12098 * This function is called to read the SLI3 device error attention registers
12099 * for possible error attention events. The caller must hold the hostlock
12100 * with spin_lock_irq().
12102 * This function returns 1 when there is Error Attention in the Host Attention
12103 * Register and returns 0 otherwise.
12106 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12110 /* Read chip Host Attention (HA) register */
12111 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12114 if (ha_copy & HA_ERATT) {
12115 /* Read host status register to retrieve error event */
12116 if (lpfc_sli_read_hs(phba))
12119 /* Check if there is a deferred error condition is active */
12120 if ((HS_FFER1 & phba->work_hs) &&
12121 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12122 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12123 phba->hba_flag |= DEFER_ERATT;
12124 /* Clear all interrupt enable conditions */
12125 writel(0, phba->HCregaddr);
12126 readl(phba->HCregaddr);
12129 /* Set the driver HA work bitmap */
12130 phba->work_ha |= HA_ERATT;
12131 /* Indicate polling handles this ERATT */
12132 phba->hba_flag |= HBA_ERATT_HANDLED;
12138 /* Set the driver HS work bitmap */
12139 phba->work_hs |= UNPLUG_ERR;
12140 /* Set the driver HA work bitmap */
12141 phba->work_ha |= HA_ERATT;
12142 /* Indicate polling handles this ERATT */
12143 phba->hba_flag |= HBA_ERATT_HANDLED;
12148 * lpfc_sli4_eratt_read - read sli-4 error attention events
12149 * @phba: Pointer to HBA context.
12151 * This function is called to read the SLI4 device error attention registers
12152 * for possible error attention events. The caller must hold the hostlock
12153 * with spin_lock_irq().
12155 * This function returns 1 when there is Error Attention in the Host Attention
12156 * Register and returns 0 otherwise.
12159 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12161 uint32_t uerr_sta_hi, uerr_sta_lo;
12162 uint32_t if_type, portsmphr;
12163 struct lpfc_register portstat_reg;
12166 * For now, use the SLI4 device internal unrecoverable error
12167 * registers for error attention. This can be changed later.
12169 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12171 case LPFC_SLI_INTF_IF_TYPE_0:
12172 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12174 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12176 phba->work_hs |= UNPLUG_ERR;
12177 phba->work_ha |= HA_ERATT;
12178 phba->hba_flag |= HBA_ERATT_HANDLED;
12181 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12182 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12184 "1423 HBA Unrecoverable error: "
12185 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12186 "ue_mask_lo_reg=0x%x, "
12187 "ue_mask_hi_reg=0x%x\n",
12188 uerr_sta_lo, uerr_sta_hi,
12189 phba->sli4_hba.ue_mask_lo,
12190 phba->sli4_hba.ue_mask_hi);
12191 phba->work_status[0] = uerr_sta_lo;
12192 phba->work_status[1] = uerr_sta_hi;
12193 phba->work_ha |= HA_ERATT;
12194 phba->hba_flag |= HBA_ERATT_HANDLED;
12198 case LPFC_SLI_INTF_IF_TYPE_2:
12199 case LPFC_SLI_INTF_IF_TYPE_6:
12200 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12201 &portstat_reg.word0) ||
12202 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12204 phba->work_hs |= UNPLUG_ERR;
12205 phba->work_ha |= HA_ERATT;
12206 phba->hba_flag |= HBA_ERATT_HANDLED;
12209 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12210 phba->work_status[0] =
12211 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12212 phba->work_status[1] =
12213 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12215 "2885 Port Status Event: "
12216 "port status reg 0x%x, "
12217 "port smphr reg 0x%x, "
12218 "error 1=0x%x, error 2=0x%x\n",
12219 portstat_reg.word0,
12221 phba->work_status[0],
12222 phba->work_status[1]);
12223 phba->work_ha |= HA_ERATT;
12224 phba->hba_flag |= HBA_ERATT_HANDLED;
12228 case LPFC_SLI_INTF_IF_TYPE_1:
12230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12231 "2886 HBA Error Attention on unsupported "
12232 "if type %d.", if_type);
12240 * lpfc_sli_check_eratt - check error attention events
12241 * @phba: Pointer to HBA context.
12243 * This function is called from timer soft interrupt context to check HBA's
12244 * error attention register bit for error attention events.
12246 * This function returns 1 when there is Error Attention in the Host Attention
12247 * Register and returns 0 otherwise.
12250 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12254 /* If somebody is waiting to handle an eratt, don't process it
12255 * here. The brdkill function will do this.
12257 if (phba->link_flag & LS_IGNORE_ERATT)
12260 /* Check if interrupt handler handles this ERATT */
12261 spin_lock_irq(&phba->hbalock);
12262 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12263 /* Interrupt handler has handled ERATT */
12264 spin_unlock_irq(&phba->hbalock);
12269 * If there is deferred error attention, do not check for error
12272 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12273 spin_unlock_irq(&phba->hbalock);
12277 /* If PCI channel is offline, don't process it */
12278 if (unlikely(pci_channel_offline(phba->pcidev))) {
12279 spin_unlock_irq(&phba->hbalock);
12283 switch (phba->sli_rev) {
12284 case LPFC_SLI_REV2:
12285 case LPFC_SLI_REV3:
12286 /* Read chip Host Attention (HA) register */
12287 ha_copy = lpfc_sli_eratt_read(phba);
12289 case LPFC_SLI_REV4:
12290 /* Read device Uncoverable Error (UERR) registers */
12291 ha_copy = lpfc_sli4_eratt_read(phba);
12294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12295 "0299 Invalid SLI revision (%d)\n",
12300 spin_unlock_irq(&phba->hbalock);
12306 * lpfc_intr_state_check - Check device state for interrupt handling
12307 * @phba: Pointer to HBA context.
12309 * This inline routine checks whether a device or its PCI slot is in a state
12310 * that the interrupt should be handled.
12312 * This function returns 0 if the device or the PCI slot is in a state that
12313 * interrupt should be handled, otherwise -EIO.
12316 lpfc_intr_state_check(struct lpfc_hba *phba)
12318 /* If the pci channel is offline, ignore all the interrupts */
12319 if (unlikely(pci_channel_offline(phba->pcidev)))
12322 /* Update device level interrupt statistics */
12323 phba->sli.slistat.sli_intr++;
12325 /* Ignore all interrupts during initialization. */
12326 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12333 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12334 * @irq: Interrupt number.
12335 * @dev_id: The device context pointer.
12337 * This function is directly called from the PCI layer as an interrupt
12338 * service routine when device with SLI-3 interface spec is enabled with
12339 * MSI-X multi-message interrupt mode and there are slow-path events in
12340 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12341 * interrupt mode, this function is called as part of the device-level
12342 * interrupt handler. When the PCI slot is in error recovery or the HBA
12343 * is undergoing initialization, the interrupt handler will not process
12344 * the interrupt. The link attention and ELS ring attention events are
12345 * handled by the worker thread. The interrupt handler signals the worker
12346 * thread and returns for these events. This function is called without
12347 * any lock held. It gets the hbalock to access and update SLI data
12350 * This function returns IRQ_HANDLED when interrupt is handled else it
12351 * returns IRQ_NONE.
12354 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12356 struct lpfc_hba *phba;
12357 uint32_t ha_copy, hc_copy;
12358 uint32_t work_ha_copy;
12359 unsigned long status;
12360 unsigned long iflag;
12363 MAILBOX_t *mbox, *pmbox;
12364 struct lpfc_vport *vport;
12365 struct lpfc_nodelist *ndlp;
12366 struct lpfc_dmabuf *mp;
12371 * Get the driver's phba structure from the dev_id and
12372 * assume the HBA is not interrupting.
12374 phba = (struct lpfc_hba *)dev_id;
12376 if (unlikely(!phba))
12380 * Stuff needs to be attented to when this function is invoked as an
12381 * individual interrupt handler in MSI-X multi-message interrupt mode
12383 if (phba->intr_type == MSIX) {
12384 /* Check device state for handling interrupt */
12385 if (lpfc_intr_state_check(phba))
12387 /* Need to read HA REG for slow-path events */
12388 spin_lock_irqsave(&phba->hbalock, iflag);
12389 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12391 /* If somebody is waiting to handle an eratt don't process it
12392 * here. The brdkill function will do this.
12394 if (phba->link_flag & LS_IGNORE_ERATT)
12395 ha_copy &= ~HA_ERATT;
12396 /* Check the need for handling ERATT in interrupt handler */
12397 if (ha_copy & HA_ERATT) {
12398 if (phba->hba_flag & HBA_ERATT_HANDLED)
12399 /* ERATT polling has handled ERATT */
12400 ha_copy &= ~HA_ERATT;
12402 /* Indicate interrupt handler handles ERATT */
12403 phba->hba_flag |= HBA_ERATT_HANDLED;
12407 * If there is deferred error attention, do not check for any
12410 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12411 spin_unlock_irqrestore(&phba->hbalock, iflag);
12415 /* Clear up only attention source related to slow-path */
12416 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12419 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12420 HC_LAINT_ENA | HC_ERINT_ENA),
12422 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12424 writel(hc_copy, phba->HCregaddr);
12425 readl(phba->HAregaddr); /* flush */
12426 spin_unlock_irqrestore(&phba->hbalock, iflag);
12428 ha_copy = phba->ha_copy;
12430 work_ha_copy = ha_copy & phba->work_ha_mask;
12432 if (work_ha_copy) {
12433 if (work_ha_copy & HA_LATT) {
12434 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12436 * Turn off Link Attention interrupts
12437 * until CLEAR_LA done
12439 spin_lock_irqsave(&phba->hbalock, iflag);
12440 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12441 if (lpfc_readl(phba->HCregaddr, &control))
12443 control &= ~HC_LAINT_ENA;
12444 writel(control, phba->HCregaddr);
12445 readl(phba->HCregaddr); /* flush */
12446 spin_unlock_irqrestore(&phba->hbalock, iflag);
12449 work_ha_copy &= ~HA_LATT;
12452 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12454 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12455 * the only slow ring.
12457 status = (work_ha_copy &
12458 (HA_RXMASK << (4*LPFC_ELS_RING)));
12459 status >>= (4*LPFC_ELS_RING);
12460 if (status & HA_RXMASK) {
12461 spin_lock_irqsave(&phba->hbalock, iflag);
12462 if (lpfc_readl(phba->HCregaddr, &control))
12465 lpfc_debugfs_slow_ring_trc(phba,
12466 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12468 (uint32_t)phba->sli.slistat.sli_intr);
12470 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12471 lpfc_debugfs_slow_ring_trc(phba,
12472 "ISR Disable ring:"
12473 "pwork:x%x hawork:x%x wait:x%x",
12474 phba->work_ha, work_ha_copy,
12475 (uint32_t)((unsigned long)
12476 &phba->work_waitq));
12479 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12480 writel(control, phba->HCregaddr);
12481 readl(phba->HCregaddr); /* flush */
12484 lpfc_debugfs_slow_ring_trc(phba,
12485 "ISR slow ring: pwork:"
12486 "x%x hawork:x%x wait:x%x",
12487 phba->work_ha, work_ha_copy,
12488 (uint32_t)((unsigned long)
12489 &phba->work_waitq));
12491 spin_unlock_irqrestore(&phba->hbalock, iflag);
12494 spin_lock_irqsave(&phba->hbalock, iflag);
12495 if (work_ha_copy & HA_ERATT) {
12496 if (lpfc_sli_read_hs(phba))
12499 * Check if there is a deferred error condition
12502 if ((HS_FFER1 & phba->work_hs) &&
12503 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12504 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12506 phba->hba_flag |= DEFER_ERATT;
12507 /* Clear all interrupt enable conditions */
12508 writel(0, phba->HCregaddr);
12509 readl(phba->HCregaddr);
12513 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12514 pmb = phba->sli.mbox_active;
12515 pmbox = &pmb->u.mb;
12517 vport = pmb->vport;
12519 /* First check out the status word */
12520 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12521 if (pmbox->mbxOwner != OWN_HOST) {
12522 spin_unlock_irqrestore(&phba->hbalock, iflag);
12524 * Stray Mailbox Interrupt, mbxCommand <cmd>
12525 * mbxStatus <status>
12527 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12529 "(%d):0304 Stray Mailbox "
12530 "Interrupt mbxCommand x%x "
12532 (vport ? vport->vpi : 0),
12535 /* clear mailbox attention bit */
12536 work_ha_copy &= ~HA_MBATT;
12538 phba->sli.mbox_active = NULL;
12539 spin_unlock_irqrestore(&phba->hbalock, iflag);
12540 phba->last_completion_time = jiffies;
12541 del_timer(&phba->sli.mbox_tmo);
12542 if (pmb->mbox_cmpl) {
12543 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12545 if (pmb->out_ext_byte_len &&
12547 lpfc_sli_pcimem_bcopy(
12550 pmb->out_ext_byte_len);
12552 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12553 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12555 lpfc_debugfs_disc_trc(vport,
12556 LPFC_DISC_TRC_MBOX_VPORT,
12557 "MBOX dflt rpi: : "
12558 "status:x%x rpi:x%x",
12559 (uint32_t)pmbox->mbxStatus,
12560 pmbox->un.varWords[0], 0);
12562 if (!pmbox->mbxStatus) {
12563 mp = (struct lpfc_dmabuf *)
12565 ndlp = (struct lpfc_nodelist *)
12568 /* Reg_LOGIN of dflt RPI was
12569 * successful. new lets get
12570 * rid of the RPI using the
12571 * same mbox buffer.
12573 lpfc_unreg_login(phba,
12575 pmbox->un.varWords[0],
12578 lpfc_mbx_cmpl_dflt_rpi;
12580 pmb->ctx_ndlp = ndlp;
12581 pmb->vport = vport;
12582 rc = lpfc_sli_issue_mbox(phba,
12585 if (rc != MBX_BUSY)
12586 lpfc_printf_log(phba,
12588 LOG_MBOX | LOG_SLI,
12589 "0350 rc should have"
12590 "been MBX_BUSY\n");
12591 if (rc != MBX_NOT_FINISHED)
12592 goto send_current_mbox;
12596 &phba->pport->work_port_lock,
12598 phba->pport->work_port_events &=
12600 spin_unlock_irqrestore(
12601 &phba->pport->work_port_lock,
12603 lpfc_mbox_cmpl_put(phba, pmb);
12606 spin_unlock_irqrestore(&phba->hbalock, iflag);
12608 if ((work_ha_copy & HA_MBATT) &&
12609 (phba->sli.mbox_active == NULL)) {
12611 /* Process next mailbox command if there is one */
12613 rc = lpfc_sli_issue_mbox(phba, NULL,
12615 } while (rc == MBX_NOT_FINISHED);
12616 if (rc != MBX_SUCCESS)
12617 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12618 LOG_SLI, "0349 rc should be "
12622 spin_lock_irqsave(&phba->hbalock, iflag);
12623 phba->work_ha |= work_ha_copy;
12624 spin_unlock_irqrestore(&phba->hbalock, iflag);
12625 lpfc_worker_wake_up(phba);
12627 return IRQ_HANDLED;
12629 spin_unlock_irqrestore(&phba->hbalock, iflag);
12630 return IRQ_HANDLED;
12632 } /* lpfc_sli_sp_intr_handler */
12635 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12636 * @irq: Interrupt number.
12637 * @dev_id: The device context pointer.
12639 * This function is directly called from the PCI layer as an interrupt
12640 * service routine when device with SLI-3 interface spec is enabled with
12641 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12642 * ring event in the HBA. However, when the device is enabled with either
12643 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12644 * device-level interrupt handler. When the PCI slot is in error recovery
12645 * or the HBA is undergoing initialization, the interrupt handler will not
12646 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12647 * the intrrupt context. This function is called without any lock held.
12648 * It gets the hbalock to access and update SLI data structures.
12650 * This function returns IRQ_HANDLED when interrupt is handled else it
12651 * returns IRQ_NONE.
12654 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12656 struct lpfc_hba *phba;
12658 unsigned long status;
12659 unsigned long iflag;
12660 struct lpfc_sli_ring *pring;
12662 /* Get the driver's phba structure from the dev_id and
12663 * assume the HBA is not interrupting.
12665 phba = (struct lpfc_hba *) dev_id;
12667 if (unlikely(!phba))
12671 * Stuff needs to be attented to when this function is invoked as an
12672 * individual interrupt handler in MSI-X multi-message interrupt mode
12674 if (phba->intr_type == MSIX) {
12675 /* Check device state for handling interrupt */
12676 if (lpfc_intr_state_check(phba))
12678 /* Need to read HA REG for FCP ring and other ring events */
12679 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12680 return IRQ_HANDLED;
12681 /* Clear up only attention source related to fast-path */
12682 spin_lock_irqsave(&phba->hbalock, iflag);
12684 * If there is deferred error attention, do not check for
12687 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12688 spin_unlock_irqrestore(&phba->hbalock, iflag);
12691 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12693 readl(phba->HAregaddr); /* flush */
12694 spin_unlock_irqrestore(&phba->hbalock, iflag);
12696 ha_copy = phba->ha_copy;
12699 * Process all events on FCP ring. Take the optimized path for FCP IO.
12701 ha_copy &= ~(phba->work_ha_mask);
12703 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12704 status >>= (4*LPFC_FCP_RING);
12705 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12706 if (status & HA_RXMASK)
12707 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12709 if (phba->cfg_multi_ring_support == 2) {
12711 * Process all events on extra ring. Take the optimized path
12712 * for extra ring IO.
12714 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12715 status >>= (4*LPFC_EXTRA_RING);
12716 if (status & HA_RXMASK) {
12717 lpfc_sli_handle_fast_ring_event(phba,
12718 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12722 return IRQ_HANDLED;
12723 } /* lpfc_sli_fp_intr_handler */
12726 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12727 * @irq: Interrupt number.
12728 * @dev_id: The device context pointer.
12730 * This function is the HBA device-level interrupt handler to device with
12731 * SLI-3 interface spec, called from the PCI layer when either MSI or
12732 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12733 * requires driver attention. This function invokes the slow-path interrupt
12734 * attention handling function and fast-path interrupt attention handling
12735 * function in turn to process the relevant HBA attention events. This
12736 * function is called without any lock held. It gets the hbalock to access
12737 * and update SLI data structures.
12739 * This function returns IRQ_HANDLED when interrupt is handled, else it
12740 * returns IRQ_NONE.
12743 lpfc_sli_intr_handler(int irq, void *dev_id)
12745 struct lpfc_hba *phba;
12746 irqreturn_t sp_irq_rc, fp_irq_rc;
12747 unsigned long status1, status2;
12751 * Get the driver's phba structure from the dev_id and
12752 * assume the HBA is not interrupting.
12754 phba = (struct lpfc_hba *) dev_id;
12756 if (unlikely(!phba))
12759 /* Check device state for handling interrupt */
12760 if (lpfc_intr_state_check(phba))
12763 spin_lock(&phba->hbalock);
12764 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12765 spin_unlock(&phba->hbalock);
12766 return IRQ_HANDLED;
12769 if (unlikely(!phba->ha_copy)) {
12770 spin_unlock(&phba->hbalock);
12772 } else if (phba->ha_copy & HA_ERATT) {
12773 if (phba->hba_flag & HBA_ERATT_HANDLED)
12774 /* ERATT polling has handled ERATT */
12775 phba->ha_copy &= ~HA_ERATT;
12777 /* Indicate interrupt handler handles ERATT */
12778 phba->hba_flag |= HBA_ERATT_HANDLED;
12782 * If there is deferred error attention, do not check for any interrupt.
12784 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12785 spin_unlock(&phba->hbalock);
12789 /* Clear attention sources except link and error attentions */
12790 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12791 spin_unlock(&phba->hbalock);
12792 return IRQ_HANDLED;
12794 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12795 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12797 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12798 writel(hc_copy, phba->HCregaddr);
12799 readl(phba->HAregaddr); /* flush */
12800 spin_unlock(&phba->hbalock);
12803 * Invokes slow-path host attention interrupt handling as appropriate.
12806 /* status of events with mailbox and link attention */
12807 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12809 /* status of events with ELS ring */
12810 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12811 status2 >>= (4*LPFC_ELS_RING);
12813 if (status1 || (status2 & HA_RXMASK))
12814 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12816 sp_irq_rc = IRQ_NONE;
12819 * Invoke fast-path host attention interrupt handling as appropriate.
12822 /* status of events with FCP ring */
12823 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12824 status1 >>= (4*LPFC_FCP_RING);
12826 /* status of events with extra ring */
12827 if (phba->cfg_multi_ring_support == 2) {
12828 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12829 status2 >>= (4*LPFC_EXTRA_RING);
12833 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12834 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12836 fp_irq_rc = IRQ_NONE;
12838 /* Return device-level interrupt handling status */
12839 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12840 } /* lpfc_sli_intr_handler */
12843 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12844 * @phba: pointer to lpfc hba data structure.
12846 * This routine is invoked by the worker thread to process all the pending
12847 * SLI4 els abort xri events.
12849 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12851 struct lpfc_cq_event *cq_event;
12853 /* First, declare the els xri abort event has been handled */
12854 spin_lock_irq(&phba->hbalock);
12855 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12856 spin_unlock_irq(&phba->hbalock);
12857 /* Now, handle all the els xri abort events */
12858 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12859 /* Get the first event from the head of the event queue */
12860 spin_lock_irq(&phba->hbalock);
12861 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12862 cq_event, struct lpfc_cq_event, list);
12863 spin_unlock_irq(&phba->hbalock);
12864 /* Notify aborted XRI for ELS work queue */
12865 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12866 /* Free the event processed back to the free pool */
12867 lpfc_sli4_cq_event_release(phba, cq_event);
12872 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12873 * @phba: pointer to lpfc hba data structure
12874 * @pIocbIn: pointer to the rspiocbq
12875 * @pIocbOut: pointer to the cmdiocbq
12876 * @wcqe: pointer to the complete wcqe
12878 * This routine transfers the fields of a command iocbq to a response iocbq
12879 * by copying all the IOCB fields from command iocbq and transferring the
12880 * completion status information from the complete wcqe.
12883 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12884 struct lpfc_iocbq *pIocbIn,
12885 struct lpfc_iocbq *pIocbOut,
12886 struct lpfc_wcqe_complete *wcqe)
12889 unsigned long iflags;
12890 uint32_t status, max_response;
12891 struct lpfc_dmabuf *dmabuf;
12892 struct ulp_bde64 *bpl, bde;
12893 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12895 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12896 sizeof(struct lpfc_iocbq) - offset);
12897 /* Map WCQE parameters into irspiocb parameters */
12898 status = bf_get(lpfc_wcqe_c_status, wcqe);
12899 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12900 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12901 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12902 pIocbIn->iocb.un.fcpi.fcpi_parm =
12903 pIocbOut->iocb.un.fcpi.fcpi_parm -
12904 wcqe->total_data_placed;
12906 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12908 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12909 switch (pIocbOut->iocb.ulpCommand) {
12910 case CMD_ELS_REQUEST64_CR:
12911 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12912 bpl = (struct ulp_bde64 *)dmabuf->virt;
12913 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12914 max_response = bde.tus.f.bdeSize;
12916 case CMD_GEN_REQUEST64_CR:
12918 if (!pIocbOut->context3)
12920 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12921 sizeof(struct ulp_bde64);
12922 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12923 bpl = (struct ulp_bde64 *)dmabuf->virt;
12924 for (i = 0; i < numBdes; i++) {
12925 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12926 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12927 max_response += bde.tus.f.bdeSize;
12931 max_response = wcqe->total_data_placed;
12934 if (max_response < wcqe->total_data_placed)
12935 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12937 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12938 wcqe->total_data_placed;
12941 /* Convert BG errors for completion status */
12942 if (status == CQE_STATUS_DI_ERROR) {
12943 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12945 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12946 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12948 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12950 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12951 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12952 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12953 BGS_GUARD_ERR_MASK;
12954 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12955 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12956 BGS_APPTAG_ERR_MASK;
12957 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12958 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12959 BGS_REFTAG_ERR_MASK;
12961 /* Check to see if there was any good data before the error */
12962 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12963 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12964 BGS_HI_WATER_MARK_PRESENT_MASK;
12965 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12966 wcqe->total_data_placed;
12970 * Set ALL the error bits to indicate we don't know what
12971 * type of error it is.
12973 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12974 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12975 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12976 BGS_GUARD_ERR_MASK);
12979 /* Pick up HBA exchange busy condition */
12980 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12981 spin_lock_irqsave(&phba->hbalock, iflags);
12982 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12983 spin_unlock_irqrestore(&phba->hbalock, iflags);
12988 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12989 * @phba: Pointer to HBA context object.
12990 * @wcqe: Pointer to work-queue completion queue entry.
12992 * This routine handles an ELS work-queue completion event and construct
12993 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12994 * discovery engine to handle.
12996 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12998 static struct lpfc_iocbq *
12999 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13000 struct lpfc_iocbq *irspiocbq)
13002 struct lpfc_sli_ring *pring;
13003 struct lpfc_iocbq *cmdiocbq;
13004 struct lpfc_wcqe_complete *wcqe;
13005 unsigned long iflags;
13007 pring = lpfc_phba_elsring(phba);
13008 if (unlikely(!pring))
13011 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13012 pring->stats.iocb_event++;
13013 /* Look up the ELS command IOCB and create pseudo response IOCB */
13014 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13015 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13016 if (unlikely(!cmdiocbq)) {
13017 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13018 "0386 ELS complete with no corresponding "
13019 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13020 wcqe->word0, wcqe->total_data_placed,
13021 wcqe->parameter, wcqe->word3);
13022 lpfc_sli_release_iocbq(phba, irspiocbq);
13026 spin_lock_irqsave(&pring->ring_lock, iflags);
13027 /* Put the iocb back on the txcmplq */
13028 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13029 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13031 /* Fake the irspiocbq and copy necessary response information */
13032 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13037 inline struct lpfc_cq_event *
13038 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13040 struct lpfc_cq_event *cq_event;
13042 /* Allocate a new internal CQ_EVENT entry */
13043 cq_event = lpfc_sli4_cq_event_alloc(phba);
13045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13046 "0602 Failed to alloc CQ_EVENT entry\n");
13050 /* Move the CQE into the event */
13051 memcpy(&cq_event->cqe, entry, size);
13056 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13057 * @phba: Pointer to HBA context object.
13058 * @cqe: Pointer to mailbox completion queue entry.
13060 * This routine process a mailbox completion queue entry with asynchrous
13063 * Return: true if work posted to worker thread, otherwise false.
13066 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13068 struct lpfc_cq_event *cq_event;
13069 unsigned long iflags;
13071 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13072 "0392 Async Event: word0:x%x, word1:x%x, "
13073 "word2:x%x, word3:x%x\n", mcqe->word0,
13074 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13076 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13079 spin_lock_irqsave(&phba->hbalock, iflags);
13080 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13081 /* Set the async event flag */
13082 phba->hba_flag |= ASYNC_EVENT;
13083 spin_unlock_irqrestore(&phba->hbalock, iflags);
13089 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13090 * @phba: Pointer to HBA context object.
13091 * @cqe: Pointer to mailbox completion queue entry.
13093 * This routine process a mailbox completion queue entry with mailbox
13094 * completion event.
13096 * Return: true if work posted to worker thread, otherwise false.
13099 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13101 uint32_t mcqe_status;
13102 MAILBOX_t *mbox, *pmbox;
13103 struct lpfc_mqe *mqe;
13104 struct lpfc_vport *vport;
13105 struct lpfc_nodelist *ndlp;
13106 struct lpfc_dmabuf *mp;
13107 unsigned long iflags;
13109 bool workposted = false;
13112 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13113 if (!bf_get(lpfc_trailer_completed, mcqe))
13114 goto out_no_mqe_complete;
13116 /* Get the reference to the active mbox command */
13117 spin_lock_irqsave(&phba->hbalock, iflags);
13118 pmb = phba->sli.mbox_active;
13119 if (unlikely(!pmb)) {
13120 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13121 "1832 No pending MBOX command to handle\n");
13122 spin_unlock_irqrestore(&phba->hbalock, iflags);
13123 goto out_no_mqe_complete;
13125 spin_unlock_irqrestore(&phba->hbalock, iflags);
13127 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13129 vport = pmb->vport;
13131 /* Reset heartbeat timer */
13132 phba->last_completion_time = jiffies;
13133 del_timer(&phba->sli.mbox_tmo);
13135 /* Move mbox data to caller's mailbox region, do endian swapping */
13136 if (pmb->mbox_cmpl && mbox)
13137 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13140 * For mcqe errors, conditionally move a modified error code to
13141 * the mbox so that the error will not be missed.
13143 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13144 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13145 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13146 bf_set(lpfc_mqe_status, mqe,
13147 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13149 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13150 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13151 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13152 "MBOX dflt rpi: status:x%x rpi:x%x",
13154 pmbox->un.varWords[0], 0);
13155 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13156 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13157 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13158 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13159 * RID of the PPI using the same mbox buffer.
13161 lpfc_unreg_login(phba, vport->vpi,
13162 pmbox->un.varWords[0], pmb);
13163 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13165 pmb->ctx_ndlp = ndlp;
13166 pmb->vport = vport;
13167 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13168 if (rc != MBX_BUSY)
13169 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13170 LOG_SLI, "0385 rc should "
13171 "have been MBX_BUSY\n");
13172 if (rc != MBX_NOT_FINISHED)
13173 goto send_current_mbox;
13176 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13177 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13178 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13180 /* There is mailbox completion work to do */
13181 spin_lock_irqsave(&phba->hbalock, iflags);
13182 __lpfc_mbox_cmpl_put(phba, pmb);
13183 phba->work_ha |= HA_MBATT;
13184 spin_unlock_irqrestore(&phba->hbalock, iflags);
13188 spin_lock_irqsave(&phba->hbalock, iflags);
13189 /* Release the mailbox command posting token */
13190 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13191 /* Setting active mailbox pointer need to be in sync to flag clear */
13192 phba->sli.mbox_active = NULL;
13193 spin_unlock_irqrestore(&phba->hbalock, iflags);
13194 /* Wake up worker thread to post the next pending mailbox command */
13195 lpfc_worker_wake_up(phba);
13196 out_no_mqe_complete:
13197 if (bf_get(lpfc_trailer_consumed, mcqe))
13198 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13203 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13204 * @phba: Pointer to HBA context object.
13205 * @cqe: Pointer to mailbox completion queue entry.
13207 * This routine process a mailbox completion queue entry, it invokes the
13208 * proper mailbox complete handling or asynchrous event handling routine
13209 * according to the MCQE's async bit.
13211 * Return: true if work posted to worker thread, otherwise false.
13214 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13215 struct lpfc_cqe *cqe)
13217 struct lpfc_mcqe mcqe;
13222 /* Copy the mailbox MCQE and convert endian order as needed */
13223 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13225 /* Invoke the proper event handling routine */
13226 if (!bf_get(lpfc_trailer_async, &mcqe))
13227 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13229 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13234 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13235 * @phba: Pointer to HBA context object.
13236 * @cq: Pointer to associated CQ
13237 * @wcqe: Pointer to work-queue completion queue entry.
13239 * This routine handles an ELS work-queue completion event.
13241 * Return: true if work posted to worker thread, otherwise false.
13244 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13245 struct lpfc_wcqe_complete *wcqe)
13247 struct lpfc_iocbq *irspiocbq;
13248 unsigned long iflags;
13249 struct lpfc_sli_ring *pring = cq->pring;
13251 int txcmplq_cnt = 0;
13252 int fcp_txcmplq_cnt = 0;
13254 /* Check for response status */
13255 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13256 /* Log the error status */
13257 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13258 "0357 ELS CQE error: status=x%x: "
13259 "CQE: %08x %08x %08x %08x\n",
13260 bf_get(lpfc_wcqe_c_status, wcqe),
13261 wcqe->word0, wcqe->total_data_placed,
13262 wcqe->parameter, wcqe->word3);
13265 /* Get an irspiocbq for later ELS response processing use */
13266 irspiocbq = lpfc_sli_get_iocbq(phba);
13268 if (!list_empty(&pring->txq))
13270 if (!list_empty(&pring->txcmplq))
13272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13273 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13274 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13275 txq_cnt, phba->iocb_cnt,
13281 /* Save off the slow-path queue event for work thread to process */
13282 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13283 spin_lock_irqsave(&phba->hbalock, iflags);
13284 list_add_tail(&irspiocbq->cq_event.list,
13285 &phba->sli4_hba.sp_queue_event);
13286 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13287 spin_unlock_irqrestore(&phba->hbalock, iflags);
13293 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13294 * @phba: Pointer to HBA context object.
13295 * @wcqe: Pointer to work-queue completion queue entry.
13297 * This routine handles slow-path WQ entry consumed event by invoking the
13298 * proper WQ release routine to the slow-path WQ.
13301 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13302 struct lpfc_wcqe_release *wcqe)
13304 /* sanity check on queue memory */
13305 if (unlikely(!phba->sli4_hba.els_wq))
13307 /* Check for the slow-path ELS work queue */
13308 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13309 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13310 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13312 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13313 "2579 Slow-path wqe consume event carries "
13314 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13315 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13316 phba->sli4_hba.els_wq->queue_id);
13320 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13321 * @phba: Pointer to HBA context object.
13322 * @cq: Pointer to a WQ completion queue.
13323 * @wcqe: Pointer to work-queue completion queue entry.
13325 * This routine handles an XRI abort event.
13327 * Return: true if work posted to worker thread, otherwise false.
13330 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13331 struct lpfc_queue *cq,
13332 struct sli4_wcqe_xri_aborted *wcqe)
13334 bool workposted = false;
13335 struct lpfc_cq_event *cq_event;
13336 unsigned long iflags;
13338 switch (cq->subtype) {
13340 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13341 workposted = false;
13343 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13345 cq_event = lpfc_cq_event_setup(
13346 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13349 cq_event->hdwq = cq->hdwq;
13350 spin_lock_irqsave(&phba->hbalock, iflags);
13351 list_add_tail(&cq_event->list,
13352 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13353 /* Set the els xri abort event flag */
13354 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13355 spin_unlock_irqrestore(&phba->hbalock, iflags);
13359 /* Notify aborted XRI for NVME work queue */
13360 if (phba->nvmet_support)
13361 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13363 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13365 workposted = false;
13368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13369 "0603 Invalid CQ subtype %d: "
13370 "%08x %08x %08x %08x\n",
13371 cq->subtype, wcqe->word0, wcqe->parameter,
13372 wcqe->word2, wcqe->word3);
13373 workposted = false;
13379 #define FC_RCTL_MDS_DIAGS 0xF4
13382 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13383 * @phba: Pointer to HBA context object.
13384 * @rcqe: Pointer to receive-queue completion queue entry.
13386 * This routine process a receive-queue completion queue entry.
13388 * Return: true if work posted to worker thread, otherwise false.
13391 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13393 bool workposted = false;
13394 struct fc_frame_header *fc_hdr;
13395 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13396 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13397 struct lpfc_nvmet_tgtport *tgtp;
13398 struct hbq_dmabuf *dma_buf;
13399 uint32_t status, rq_id;
13400 unsigned long iflags;
13402 /* sanity check on queue memory */
13403 if (unlikely(!hrq) || unlikely(!drq))
13406 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13407 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13409 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13410 if (rq_id != hrq->queue_id)
13413 status = bf_get(lpfc_rcqe_status, rcqe);
13415 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13416 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13417 "2537 Receive Frame Truncated!!\n");
13419 case FC_STATUS_RQ_SUCCESS:
13420 spin_lock_irqsave(&phba->hbalock, iflags);
13421 lpfc_sli4_rq_release(hrq, drq);
13422 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13424 hrq->RQ_no_buf_found++;
13425 spin_unlock_irqrestore(&phba->hbalock, iflags);
13429 hrq->RQ_buf_posted--;
13430 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13432 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13434 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13435 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13436 spin_unlock_irqrestore(&phba->hbalock, iflags);
13437 /* Handle MDS Loopback frames */
13438 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13442 /* save off the frame for the work thread to process */
13443 list_add_tail(&dma_buf->cq_event.list,
13444 &phba->sli4_hba.sp_queue_event);
13445 /* Frame received */
13446 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13447 spin_unlock_irqrestore(&phba->hbalock, iflags);
13450 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13451 if (phba->nvmet_support) {
13452 tgtp = phba->targetport->private;
13453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13454 "6402 RQE Error x%x, posted %d err_cnt "
13456 status, hrq->RQ_buf_posted,
13457 hrq->RQ_no_posted_buf,
13458 atomic_read(&tgtp->rcv_fcp_cmd_in),
13459 atomic_read(&tgtp->rcv_fcp_cmd_out),
13460 atomic_read(&tgtp->xmt_fcp_release));
13464 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13465 hrq->RQ_no_posted_buf++;
13466 /* Post more buffers if possible */
13467 spin_lock_irqsave(&phba->hbalock, iflags);
13468 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13469 spin_unlock_irqrestore(&phba->hbalock, iflags);
13478 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13479 * @phba: Pointer to HBA context object.
13480 * @cq: Pointer to the completion queue.
13481 * @cqe: Pointer to a completion queue entry.
13483 * This routine process a slow-path work-queue or receive queue completion queue
13486 * Return: true if work posted to worker thread, otherwise false.
13489 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13490 struct lpfc_cqe *cqe)
13492 struct lpfc_cqe cqevt;
13493 bool workposted = false;
13495 /* Copy the work queue CQE and convert endian order if needed */
13496 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13498 /* Check and process for different type of WCQE and dispatch */
13499 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13500 case CQE_CODE_COMPL_WQE:
13501 /* Process the WQ/RQ complete event */
13502 phba->last_completion_time = jiffies;
13503 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13504 (struct lpfc_wcqe_complete *)&cqevt);
13506 case CQE_CODE_RELEASE_WQE:
13507 /* Process the WQ release event */
13508 lpfc_sli4_sp_handle_rel_wcqe(phba,
13509 (struct lpfc_wcqe_release *)&cqevt);
13511 case CQE_CODE_XRI_ABORTED:
13512 /* Process the WQ XRI abort event */
13513 phba->last_completion_time = jiffies;
13514 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13515 (struct sli4_wcqe_xri_aborted *)&cqevt);
13517 case CQE_CODE_RECEIVE:
13518 case CQE_CODE_RECEIVE_V1:
13519 /* Process the RQ event */
13520 phba->last_completion_time = jiffies;
13521 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13522 (struct lpfc_rcqe *)&cqevt);
13525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13526 "0388 Not a valid WCQE code: x%x\n",
13527 bf_get(lpfc_cqe_code, &cqevt));
13534 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13535 * @phba: Pointer to HBA context object.
13536 * @eqe: Pointer to fast-path event queue entry.
13538 * This routine process a event queue entry from the slow-path event queue.
13539 * It will check the MajorCode and MinorCode to determine this is for a
13540 * completion event on a completion queue, if not, an error shall be logged
13541 * and just return. Otherwise, it will get to the corresponding completion
13542 * queue and process all the entries on that completion queue, rearm the
13543 * completion queue, and then return.
13547 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13548 struct lpfc_queue *speq)
13550 struct lpfc_queue *cq = NULL, *childq;
13553 /* Get the reference to the corresponding CQ */
13554 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13556 list_for_each_entry(childq, &speq->child_list, list) {
13557 if (childq->queue_id == cqid) {
13562 if (unlikely(!cq)) {
13563 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13565 "0365 Slow-path CQ identifier "
13566 "(%d) does not exist\n", cqid);
13570 /* Save EQ associated with this CQ */
13571 cq->assoc_qp = speq;
13573 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13575 "0390 Cannot schedule soft IRQ "
13576 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13577 cqid, cq->queue_id, raw_smp_processor_id());
13581 * __lpfc_sli4_process_cq - Process elements of a CQ
13582 * @phba: Pointer to HBA context object.
13583 * @cq: Pointer to CQ to be processed
13584 * @handler: Routine to process each cqe
13585 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13587 * This routine processes completion queue entries in a CQ. While a valid
13588 * queue element is found, the handler is called. During processing checks
13589 * are made for periodic doorbell writes to let the hardware know of
13590 * element consumption.
13592 * If the max limit on cqes to process is hit, or there are no more valid
13593 * entries, the loop stops. If we processed a sufficient number of elements,
13594 * meaning there is sufficient load, rather than rearming and generating
13595 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13596 * indicates no rescheduling.
13598 * Returns True if work scheduled, False otherwise.
13601 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13602 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13603 struct lpfc_cqe *), unsigned long *delay)
13605 struct lpfc_cqe *cqe;
13606 bool workposted = false;
13607 int count = 0, consumed = 0;
13610 /* default - no reschedule */
13613 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13614 goto rearm_and_exit;
13616 /* Process all the entries to the CQ */
13618 cqe = lpfc_sli4_cq_get(cq);
13620 workposted |= handler(phba, cq, cqe);
13621 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13624 if (!(++count % cq->max_proc_limit))
13627 if (!(count % cq->notify_interval)) {
13628 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13633 if (count == LPFC_NVMET_CQ_NOTIFY)
13634 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13636 cqe = lpfc_sli4_cq_get(cq);
13638 if (count >= phba->cfg_cq_poll_threshold) {
13643 /* Track the max number of CQEs processed in 1 EQ */
13644 if (count > cq->CQ_max_cqe)
13645 cq->CQ_max_cqe = count;
13647 cq->assoc_qp->EQ_cqe_cnt += count;
13649 /* Catch the no cq entry condition */
13650 if (unlikely(count == 0))
13651 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13652 "0369 No entry from completion queue "
13653 "qid=%d\n", cq->queue_id);
13655 cq->queue_claimed = 0;
13658 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13659 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13665 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13666 * @cq: pointer to CQ to process
13668 * This routine calls the cq processing routine with a handler specific
13669 * to the type of queue bound to it.
13671 * The CQ routine returns two values: the first is the calling status,
13672 * which indicates whether work was queued to the background discovery
13673 * thread. If true, the routine should wakeup the discovery thread;
13674 * the second is the delay parameter. If non-zero, rather than rearming
13675 * the CQ and yet another interrupt, the CQ handler should be queued so
13676 * that it is processed in a subsequent polling action. The value of
13677 * the delay indicates when to reschedule it.
13680 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13682 struct lpfc_hba *phba = cq->phba;
13683 unsigned long delay;
13684 bool workposted = false;
13686 /* Process and rearm the CQ */
13687 switch (cq->type) {
13689 workposted |= __lpfc_sli4_process_cq(phba, cq,
13690 lpfc_sli4_sp_handle_mcqe,
13694 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13695 workposted |= __lpfc_sli4_process_cq(phba, cq,
13696 lpfc_sli4_fp_handle_cqe,
13699 workposted |= __lpfc_sli4_process_cq(phba, cq,
13700 lpfc_sli4_sp_handle_cqe,
13704 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13705 "0370 Invalid completion queue type (%d)\n",
13711 if (!queue_delayed_work_on(cq->chann, phba->wq,
13712 &cq->sched_spwork, delay))
13713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13714 "0394 Cannot schedule soft IRQ "
13715 "for cqid=%d on CPU %d\n",
13716 cq->queue_id, cq->chann);
13719 /* wake up worker thread if there are works to be done */
13721 lpfc_worker_wake_up(phba);
13725 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13727 * @work: pointer to work element
13729 * translates from the work handler and calls the slow-path handler.
13732 lpfc_sli4_sp_process_cq(struct work_struct *work)
13734 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13736 __lpfc_sli4_sp_process_cq(cq);
13740 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13741 * @work: pointer to work element
13743 * translates from the work handler and calls the slow-path handler.
13746 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13748 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13749 struct lpfc_queue, sched_spwork);
13751 __lpfc_sli4_sp_process_cq(cq);
13755 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13756 * @phba: Pointer to HBA context object.
13757 * @cq: Pointer to associated CQ
13758 * @wcqe: Pointer to work-queue completion queue entry.
13760 * This routine process a fast-path work queue completion entry from fast-path
13761 * event queue for FCP command response completion.
13764 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13765 struct lpfc_wcqe_complete *wcqe)
13767 struct lpfc_sli_ring *pring = cq->pring;
13768 struct lpfc_iocbq *cmdiocbq;
13769 struct lpfc_iocbq irspiocbq;
13770 unsigned long iflags;
13772 /* Check for response status */
13773 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13774 /* If resource errors reported from HBA, reduce queue
13775 * depth of the SCSI device.
13777 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13778 IOSTAT_LOCAL_REJECT)) &&
13779 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13780 IOERR_NO_RESOURCES))
13781 phba->lpfc_rampdown_queue_depth(phba);
13783 /* Log the error status */
13784 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13785 "0373 FCP CQE error: status=x%x: "
13786 "CQE: %08x %08x %08x %08x\n",
13787 bf_get(lpfc_wcqe_c_status, wcqe),
13788 wcqe->word0, wcqe->total_data_placed,
13789 wcqe->parameter, wcqe->word3);
13792 /* Look up the FCP command IOCB and create pseudo response IOCB */
13793 spin_lock_irqsave(&pring->ring_lock, iflags);
13794 pring->stats.iocb_event++;
13795 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13796 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13798 if (unlikely(!cmdiocbq)) {
13799 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13800 "0374 FCP complete with no corresponding "
13801 "cmdiocb: iotag (%d)\n",
13802 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13805 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13806 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13808 if (cmdiocbq->iocb_cmpl == NULL) {
13809 if (cmdiocbq->wqe_cmpl) {
13810 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13811 spin_lock_irqsave(&phba->hbalock, iflags);
13812 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13813 spin_unlock_irqrestore(&phba->hbalock, iflags);
13816 /* Pass the cmd_iocb and the wcqe to the upper layer */
13817 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13820 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13821 "0375 FCP cmdiocb not callback function "
13823 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13827 /* Fake the irspiocb and copy necessary response information */
13828 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13830 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13831 spin_lock_irqsave(&phba->hbalock, iflags);
13832 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13833 spin_unlock_irqrestore(&phba->hbalock, iflags);
13836 /* Pass the cmd_iocb and the rsp state to the upper layer */
13837 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13841 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13842 * @phba: Pointer to HBA context object.
13843 * @cq: Pointer to completion queue.
13844 * @wcqe: Pointer to work-queue completion queue entry.
13846 * This routine handles an fast-path WQ entry consumed event by invoking the
13847 * proper WQ release routine to the slow-path WQ.
13850 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13851 struct lpfc_wcqe_release *wcqe)
13853 struct lpfc_queue *childwq;
13854 bool wqid_matched = false;
13857 /* Check for fast-path FCP work queue release */
13858 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13859 list_for_each_entry(childwq, &cq->child_list, list) {
13860 if (childwq->queue_id == hba_wqid) {
13861 lpfc_sli4_wq_release(childwq,
13862 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13863 if (childwq->q_flag & HBA_NVMET_WQFULL)
13864 lpfc_nvmet_wqfull_process(phba, childwq);
13865 wqid_matched = true;
13869 /* Report warning log message if no match found */
13870 if (wqid_matched != true)
13871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13872 "2580 Fast-path wqe consume event carries "
13873 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13877 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13878 * @phba: Pointer to HBA context object.
13879 * @rcqe: Pointer to receive-queue completion queue entry.
13881 * This routine process a receive-queue completion queue entry.
13883 * Return: true if work posted to worker thread, otherwise false.
13886 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13887 struct lpfc_rcqe *rcqe)
13889 bool workposted = false;
13890 struct lpfc_queue *hrq;
13891 struct lpfc_queue *drq;
13892 struct rqb_dmabuf *dma_buf;
13893 struct fc_frame_header *fc_hdr;
13894 struct lpfc_nvmet_tgtport *tgtp;
13895 uint32_t status, rq_id;
13896 unsigned long iflags;
13897 uint32_t fctl, idx;
13899 if ((phba->nvmet_support == 0) ||
13900 (phba->sli4_hba.nvmet_cqset == NULL))
13903 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13904 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13905 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13907 /* sanity check on queue memory */
13908 if (unlikely(!hrq) || unlikely(!drq))
13911 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13912 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13914 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13916 if ((phba->nvmet_support == 0) ||
13917 (rq_id != hrq->queue_id))
13920 status = bf_get(lpfc_rcqe_status, rcqe);
13922 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13924 "6126 Receive Frame Truncated!!\n");
13926 case FC_STATUS_RQ_SUCCESS:
13927 spin_lock_irqsave(&phba->hbalock, iflags);
13928 lpfc_sli4_rq_release(hrq, drq);
13929 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13931 hrq->RQ_no_buf_found++;
13932 spin_unlock_irqrestore(&phba->hbalock, iflags);
13935 spin_unlock_irqrestore(&phba->hbalock, iflags);
13937 hrq->RQ_buf_posted--;
13938 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13940 /* Just some basic sanity checks on FCP Command frame */
13941 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13942 fc_hdr->fh_f_ctl[1] << 8 |
13943 fc_hdr->fh_f_ctl[2]);
13945 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13946 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13947 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13950 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13951 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13952 lpfc_nvmet_unsol_fcp_event(
13953 phba, idx, dma_buf, cq->isr_timestamp,
13954 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13958 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13960 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13961 if (phba->nvmet_support) {
13962 tgtp = phba->targetport->private;
13963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13964 "6401 RQE Error x%x, posted %d err_cnt "
13966 status, hrq->RQ_buf_posted,
13967 hrq->RQ_no_posted_buf,
13968 atomic_read(&tgtp->rcv_fcp_cmd_in),
13969 atomic_read(&tgtp->rcv_fcp_cmd_out),
13970 atomic_read(&tgtp->xmt_fcp_release));
13974 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13975 hrq->RQ_no_posted_buf++;
13976 /* Post more buffers if possible */
13984 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13985 * @phba: adapter with cq
13986 * @cq: Pointer to the completion queue.
13987 * @eqe: Pointer to fast-path completion queue entry.
13989 * This routine process a fast-path work queue completion entry from fast-path
13990 * event queue for FCP command response completion.
13992 * Return: true if work posted to worker thread, otherwise false.
13995 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13996 struct lpfc_cqe *cqe)
13998 struct lpfc_wcqe_release wcqe;
13999 bool workposted = false;
14001 /* Copy the work queue CQE and convert endian order if needed */
14002 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14004 /* Check and process for different type of WCQE and dispatch */
14005 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14006 case CQE_CODE_COMPL_WQE:
14007 case CQE_CODE_NVME_ERSP:
14009 /* Process the WQ complete event */
14010 phba->last_completion_time = jiffies;
14011 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14012 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14013 (struct lpfc_wcqe_complete *)&wcqe);
14014 if (cq->subtype == LPFC_NVME_LS)
14015 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14016 (struct lpfc_wcqe_complete *)&wcqe);
14018 case CQE_CODE_RELEASE_WQE:
14019 cq->CQ_release_wqe++;
14020 /* Process the WQ release event */
14021 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14022 (struct lpfc_wcqe_release *)&wcqe);
14024 case CQE_CODE_XRI_ABORTED:
14025 cq->CQ_xri_aborted++;
14026 /* Process the WQ XRI abort event */
14027 phba->last_completion_time = jiffies;
14028 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14029 (struct sli4_wcqe_xri_aborted *)&wcqe);
14031 case CQE_CODE_RECEIVE_V1:
14032 case CQE_CODE_RECEIVE:
14033 phba->last_completion_time = jiffies;
14034 if (cq->subtype == LPFC_NVMET) {
14035 workposted = lpfc_sli4_nvmet_handle_rcqe(
14036 phba, cq, (struct lpfc_rcqe *)&wcqe);
14040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14041 "0144 Not a valid CQE code: x%x\n",
14042 bf_get(lpfc_wcqe_c_code, &wcqe));
14049 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14050 * @phba: Pointer to HBA context object.
14051 * @eqe: Pointer to fast-path event queue entry.
14053 * This routine process a event queue entry from the fast-path event queue.
14054 * It will check the MajorCode and MinorCode to determine this is for a
14055 * completion event on a completion queue, if not, an error shall be logged
14056 * and just return. Otherwise, it will get to the corresponding completion
14057 * queue and process all the entries on the completion queue, rearm the
14058 * completion queue, and then return.
14061 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14062 struct lpfc_eqe *eqe)
14064 struct lpfc_queue *cq = NULL;
14065 uint32_t qidx = eq->hdwq;
14068 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14070 "0366 Not a valid completion "
14071 "event: majorcode=x%x, minorcode=x%x\n",
14072 bf_get_le32(lpfc_eqe_major_code, eqe),
14073 bf_get_le32(lpfc_eqe_minor_code, eqe));
14077 /* Get the reference to the corresponding CQ */
14078 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14080 /* Use the fast lookup method first */
14081 if (cqid <= phba->sli4_hba.cq_max) {
14082 cq = phba->sli4_hba.cq_lookup[cqid];
14087 /* Next check for NVMET completion */
14088 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14089 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14090 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14091 /* Process NVMET unsol rcv */
14092 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14097 if (phba->sli4_hba.nvmels_cq &&
14098 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14099 /* Process NVME unsol rcv */
14100 cq = phba->sli4_hba.nvmels_cq;
14103 /* Otherwise this is a Slow path event */
14105 lpfc_sli4_sp_handle_eqe(phba, eqe,
14106 phba->sli4_hba.hdwq[qidx].hba_eq);
14111 if (unlikely(cqid != cq->queue_id)) {
14112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14113 "0368 Miss-matched fast-path completion "
14114 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14115 cqid, cq->queue_id);
14120 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14121 if (phba->ktime_on)
14122 cq->isr_timestamp = ktime_get_ns();
14124 cq->isr_timestamp = 0;
14126 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14128 "0363 Cannot schedule soft IRQ "
14129 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14130 cqid, cq->queue_id, raw_smp_processor_id());
14134 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14135 * @cq: Pointer to CQ to be processed
14137 * This routine calls the cq processing routine with the handler for
14140 * The CQ routine returns two values: the first is the calling status,
14141 * which indicates whether work was queued to the background discovery
14142 * thread. If true, the routine should wakeup the discovery thread;
14143 * the second is the delay parameter. If non-zero, rather than rearming
14144 * the CQ and yet another interrupt, the CQ handler should be queued so
14145 * that it is processed in a subsequent polling action. The value of
14146 * the delay indicates when to reschedule it.
14149 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14151 struct lpfc_hba *phba = cq->phba;
14152 unsigned long delay;
14153 bool workposted = false;
14155 /* process and rearm the CQ */
14156 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14160 if (!queue_delayed_work_on(cq->chann, phba->wq,
14161 &cq->sched_irqwork, delay))
14162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14163 "0367 Cannot schedule soft IRQ "
14164 "for cqid=%d on CPU %d\n",
14165 cq->queue_id, cq->chann);
14168 /* wake up worker thread if there are works to be done */
14170 lpfc_worker_wake_up(phba);
14174 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14176 * @work: pointer to work element
14178 * translates from the work handler and calls the fast-path handler.
14181 lpfc_sli4_hba_process_cq(struct work_struct *work)
14183 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14185 __lpfc_sli4_hba_process_cq(cq);
14189 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14190 * @work: pointer to work element
14192 * translates from the work handler and calls the fast-path handler.
14195 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14197 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14198 struct lpfc_queue, sched_irqwork);
14200 __lpfc_sli4_hba_process_cq(cq);
14204 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14205 * @irq: Interrupt number.
14206 * @dev_id: The device context pointer.
14208 * This function is directly called from the PCI layer as an interrupt
14209 * service routine when device with SLI-4 interface spec is enabled with
14210 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14211 * ring event in the HBA. However, when the device is enabled with either
14212 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14213 * device-level interrupt handler. When the PCI slot is in error recovery
14214 * or the HBA is undergoing initialization, the interrupt handler will not
14215 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14216 * the intrrupt context. This function is called without any lock held.
14217 * It gets the hbalock to access and update SLI data structures. Note that,
14218 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14219 * equal to that of FCP CQ index.
14221 * The link attention and ELS ring attention events are handled
14222 * by the worker thread. The interrupt handler signals the worker thread
14223 * and returns for these events. This function is called without any lock
14224 * held. It gets the hbalock to access and update SLI data structures.
14226 * This function returns IRQ_HANDLED when interrupt is handled else it
14227 * returns IRQ_NONE.
14230 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14232 struct lpfc_hba *phba;
14233 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14234 struct lpfc_queue *fpeq;
14235 unsigned long iflag;
14238 struct lpfc_eq_intr_info *eqi;
14241 /* Get the driver's phba structure from the dev_id */
14242 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14243 phba = hba_eq_hdl->phba;
14244 hba_eqidx = hba_eq_hdl->idx;
14246 if (unlikely(!phba))
14248 if (unlikely(!phba->sli4_hba.hdwq))
14251 /* Get to the EQ struct associated with this vector */
14252 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14253 if (unlikely(!fpeq))
14256 /* Check device state for handling interrupt */
14257 if (unlikely(lpfc_intr_state_check(phba))) {
14258 /* Check again for link_state with lock held */
14259 spin_lock_irqsave(&phba->hbalock, iflag);
14260 if (phba->link_state < LPFC_LINK_DOWN)
14261 /* Flush, clear interrupt, and rearm the EQ */
14262 lpfc_sli4_eq_flush(phba, fpeq);
14263 spin_unlock_irqrestore(&phba->hbalock, iflag);
14267 eqi = phba->sli4_hba.eq_info;
14268 icnt = this_cpu_inc_return(eqi->icnt);
14269 fpeq->last_cpu = raw_smp_processor_id();
14271 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14272 phba->cfg_irq_chann == 1 &&
14273 phba->cfg_auto_imax &&
14274 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14275 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14276 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14278 /* process and rearm the EQ */
14279 ecount = lpfc_sli4_process_eq(phba, fpeq);
14281 if (unlikely(ecount == 0)) {
14282 fpeq->EQ_no_entry++;
14283 if (phba->intr_type == MSIX)
14284 /* MSI-X treated interrupt served as no EQ share INT */
14285 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14286 "0358 MSI-X interrupt with no EQE\n");
14288 /* Non MSI-X treated on interrupt as EQ share INT */
14292 return IRQ_HANDLED;
14293 } /* lpfc_sli4_fp_intr_handler */
14296 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14297 * @irq: Interrupt number.
14298 * @dev_id: The device context pointer.
14300 * This function is the device-level interrupt handler to device with SLI-4
14301 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14302 * interrupt mode is enabled and there is an event in the HBA which requires
14303 * driver attention. This function invokes the slow-path interrupt attention
14304 * handling function and fast-path interrupt attention handling function in
14305 * turn to process the relevant HBA attention events. This function is called
14306 * without any lock held. It gets the hbalock to access and update SLI data
14309 * This function returns IRQ_HANDLED when interrupt is handled, else it
14310 * returns IRQ_NONE.
14313 lpfc_sli4_intr_handler(int irq, void *dev_id)
14315 struct lpfc_hba *phba;
14316 irqreturn_t hba_irq_rc;
14317 bool hba_handled = false;
14320 /* Get the driver's phba structure from the dev_id */
14321 phba = (struct lpfc_hba *)dev_id;
14323 if (unlikely(!phba))
14327 * Invoke fast-path host attention interrupt handling as appropriate.
14329 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14330 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14331 &phba->sli4_hba.hba_eq_hdl[qidx]);
14332 if (hba_irq_rc == IRQ_HANDLED)
14333 hba_handled |= true;
14336 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14337 } /* lpfc_sli4_intr_handler */
14340 * lpfc_sli4_queue_free - free a queue structure and associated memory
14341 * @queue: The queue structure to free.
14343 * This function frees a queue structure and the DMAable memory used for
14344 * the host resident queue. This function must be called after destroying the
14345 * queue on the HBA.
14348 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14350 struct lpfc_dmabuf *dmabuf;
14355 if (!list_empty(&queue->wq_list))
14356 list_del(&queue->wq_list);
14358 while (!list_empty(&queue->page_list)) {
14359 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14361 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14362 dmabuf->virt, dmabuf->phys);
14366 lpfc_free_rq_buffer(queue->phba, queue);
14367 kfree(queue->rqbp);
14370 if (!list_empty(&queue->cpu_list))
14371 list_del(&queue->cpu_list);
14378 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14379 * @phba: The HBA that this queue is being created on.
14380 * @page_size: The size of a queue page
14381 * @entry_size: The size of each queue entry for this queue.
14382 * @entry count: The number of entries that this queue will handle.
14383 * @cpu: The cpu that will primarily utilize this queue.
14385 * This function allocates a queue structure and the DMAable memory used for
14386 * the host resident queue. This function must be called before creating the
14387 * queue on the HBA.
14389 struct lpfc_queue *
14390 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14391 uint32_t entry_size, uint32_t entry_count, int cpu)
14393 struct lpfc_queue *queue;
14394 struct lpfc_dmabuf *dmabuf;
14395 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14398 if (!phba->sli4_hba.pc_sli4_params.supported)
14399 hw_page_size = page_size;
14401 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14403 /* If needed, Adjust page count to match the max the adapter supports */
14404 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14405 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14407 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14408 GFP_KERNEL, cpu_to_node(cpu));
14412 INIT_LIST_HEAD(&queue->list);
14413 INIT_LIST_HEAD(&queue->wq_list);
14414 INIT_LIST_HEAD(&queue->wqfull_list);
14415 INIT_LIST_HEAD(&queue->page_list);
14416 INIT_LIST_HEAD(&queue->child_list);
14417 INIT_LIST_HEAD(&queue->cpu_list);
14419 /* Set queue parameters now. If the system cannot provide memory
14420 * resources, the free routine needs to know what was allocated.
14422 queue->page_count = pgcnt;
14423 queue->q_pgs = (void **)&queue[1];
14424 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14425 queue->entry_size = entry_size;
14426 queue->entry_count = entry_count;
14427 queue->page_size = hw_page_size;
14428 queue->phba = phba;
14430 for (x = 0; x < queue->page_count; x++) {
14431 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14432 dev_to_node(&phba->pcidev->dev));
14435 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14436 hw_page_size, &dmabuf->phys,
14438 if (!dmabuf->virt) {
14442 dmabuf->buffer_tag = x;
14443 list_add_tail(&dmabuf->list, &queue->page_list);
14444 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14445 queue->q_pgs[x] = dmabuf->virt;
14447 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14448 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14449 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14450 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14452 /* notify_interval will be set during q creation */
14456 lpfc_sli4_queue_free(queue);
14461 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14462 * @phba: HBA structure that indicates port to create a queue on.
14463 * @pci_barset: PCI BAR set flag.
14465 * This function shall perform iomap of the specified PCI BAR address to host
14466 * memory address if not already done so and return it. The returned host
14467 * memory address can be NULL.
14469 static void __iomem *
14470 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14475 switch (pci_barset) {
14476 case WQ_PCI_BAR_0_AND_1:
14477 return phba->pci_bar0_memmap_p;
14478 case WQ_PCI_BAR_2_AND_3:
14479 return phba->pci_bar2_memmap_p;
14480 case WQ_PCI_BAR_4_AND_5:
14481 return phba->pci_bar4_memmap_p;
14489 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14490 * @phba: HBA structure that EQs are on.
14491 * @startq: The starting EQ index to modify
14492 * @numq: The number of EQs (consecutive indexes) to modify
14493 * @usdelay: amount of delay
14495 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14496 * is set either by writing to a register (if supported by the SLI Port)
14497 * or by mailbox command. The mailbox command allows several EQs to be
14500 * The @phba struct is used to send a mailbox command to HBA. The @startq
14501 * is used to get the starting EQ index to change. The @numq value is
14502 * used to specify how many consecutive EQ indexes, starting at EQ index,
14503 * are to be changed. This function is asynchronous and will wait for any
14504 * mailbox commands to finish before returning.
14506 * On success this function will return a zero. If unable to allocate
14507 * enough memory this function will return -ENOMEM. If a mailbox command
14508 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14509 * have had their delay multipler changed.
14512 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14513 uint32_t numq, uint32_t usdelay)
14515 struct lpfc_mbx_modify_eq_delay *eq_delay;
14516 LPFC_MBOXQ_t *mbox;
14517 struct lpfc_queue *eq;
14518 int cnt = 0, rc, length;
14519 uint32_t shdr_status, shdr_add_status;
14522 union lpfc_sli4_cfg_shdr *shdr;
14524 if (startq >= phba->cfg_irq_chann)
14527 if (usdelay > 0xFFFF) {
14528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14529 "6429 usdelay %d too large. Scaled down to "
14530 "0xFFFF.\n", usdelay);
14534 /* set values by EQ_DELAY register if supported */
14535 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14536 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14537 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14541 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14549 /* Otherwise, set values by mailbox cmd */
14551 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14554 "6428 Failed allocating mailbox cmd buffer."
14555 " EQ delay was not set.\n");
14558 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14559 sizeof(struct lpfc_sli4_cfg_mhdr));
14560 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14561 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14562 length, LPFC_SLI4_MBX_EMBED);
14563 eq_delay = &mbox->u.mqe.un.eq_delay;
14565 /* Calculate delay multiper from maximum interrupt per second */
14566 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14569 if (dmult > LPFC_DMULT_MAX)
14570 dmult = LPFC_DMULT_MAX;
14572 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14573 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14576 eq->q_mode = usdelay;
14577 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14578 eq_delay->u.request.eq[cnt].phase = 0;
14579 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14584 eq_delay->u.request.num_eq = cnt;
14586 mbox->vport = phba->pport;
14587 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14588 mbox->ctx_buf = NULL;
14589 mbox->ctx_ndlp = NULL;
14590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14591 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14592 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14593 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14594 if (shdr_status || shdr_add_status || rc) {
14595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14596 "2512 MODIFY_EQ_DELAY mailbox failed with "
14597 "status x%x add_status x%x, mbx status x%x\n",
14598 shdr_status, shdr_add_status, rc);
14600 mempool_free(mbox, phba->mbox_mem_pool);
14605 * lpfc_eq_create - Create an Event Queue on the HBA
14606 * @phba: HBA structure that indicates port to create a queue on.
14607 * @eq: The queue structure to use to create the event queue.
14608 * @imax: The maximum interrupt per second limit.
14610 * This function creates an event queue, as detailed in @eq, on a port,
14611 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14613 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14614 * is used to get the entry count and entry size that are necessary to
14615 * determine the number of pages to allocate and use for this queue. This
14616 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14617 * event queue. This function is asynchronous and will wait for the mailbox
14618 * command to finish before continuing.
14620 * On success this function will return a zero. If unable to allocate enough
14621 * memory this function will return -ENOMEM. If the queue create mailbox command
14622 * fails this function will return -ENXIO.
14625 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14627 struct lpfc_mbx_eq_create *eq_create;
14628 LPFC_MBOXQ_t *mbox;
14629 int rc, length, status = 0;
14630 struct lpfc_dmabuf *dmabuf;
14631 uint32_t shdr_status, shdr_add_status;
14632 union lpfc_sli4_cfg_shdr *shdr;
14634 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14636 /* sanity check on queue memory */
14639 if (!phba->sli4_hba.pc_sli4_params.supported)
14640 hw_page_size = SLI4_PAGE_SIZE;
14642 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14645 length = (sizeof(struct lpfc_mbx_eq_create) -
14646 sizeof(struct lpfc_sli4_cfg_mhdr));
14647 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14648 LPFC_MBOX_OPCODE_EQ_CREATE,
14649 length, LPFC_SLI4_MBX_EMBED);
14650 eq_create = &mbox->u.mqe.un.eq_create;
14651 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14652 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14654 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14656 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14658 /* Use version 2 of CREATE_EQ if eqav is set */
14659 if (phba->sli4_hba.pc_sli4_params.eqav) {
14660 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14661 LPFC_Q_CREATE_VERSION_2);
14662 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14663 phba->sli4_hba.pc_sli4_params.eqav);
14666 /* don't setup delay multiplier using EQ_CREATE */
14668 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14670 switch (eq->entry_count) {
14672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14673 "0360 Unsupported EQ count. (%d)\n",
14675 if (eq->entry_count < 256) {
14679 /* fall through - otherwise default to smallest count */
14681 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14685 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14689 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14693 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14697 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14701 list_for_each_entry(dmabuf, &eq->page_list, list) {
14702 memset(dmabuf->virt, 0, hw_page_size);
14703 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14704 putPaddrLow(dmabuf->phys);
14705 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14706 putPaddrHigh(dmabuf->phys);
14708 mbox->vport = phba->pport;
14709 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14710 mbox->ctx_buf = NULL;
14711 mbox->ctx_ndlp = NULL;
14712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14714 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14715 if (shdr_status || shdr_add_status || rc) {
14716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14717 "2500 EQ_CREATE mailbox failed with "
14718 "status x%x add_status x%x, mbx status x%x\n",
14719 shdr_status, shdr_add_status, rc);
14722 eq->type = LPFC_EQ;
14723 eq->subtype = LPFC_NONE;
14724 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14725 if (eq->queue_id == 0xFFFF)
14727 eq->host_index = 0;
14728 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14729 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14731 mempool_free(mbox, phba->mbox_mem_pool);
14736 * lpfc_cq_create - Create a Completion Queue on the HBA
14737 * @phba: HBA structure that indicates port to create a queue on.
14738 * @cq: The queue structure to use to create the completion queue.
14739 * @eq: The event queue to bind this completion queue to.
14741 * This function creates a completion queue, as detailed in @wq, on a port,
14742 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14744 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14745 * is used to get the entry count and entry size that are necessary to
14746 * determine the number of pages to allocate and use for this queue. The @eq
14747 * is used to indicate which event queue to bind this completion queue to. This
14748 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14749 * completion queue. This function is asynchronous and will wait for the mailbox
14750 * command to finish before continuing.
14752 * On success this function will return a zero. If unable to allocate enough
14753 * memory this function will return -ENOMEM. If the queue create mailbox command
14754 * fails this function will return -ENXIO.
14757 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14758 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14760 struct lpfc_mbx_cq_create *cq_create;
14761 struct lpfc_dmabuf *dmabuf;
14762 LPFC_MBOXQ_t *mbox;
14763 int rc, length, status = 0;
14764 uint32_t shdr_status, shdr_add_status;
14765 union lpfc_sli4_cfg_shdr *shdr;
14767 /* sanity check on queue memory */
14771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14774 length = (sizeof(struct lpfc_mbx_cq_create) -
14775 sizeof(struct lpfc_sli4_cfg_mhdr));
14776 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14777 LPFC_MBOX_OPCODE_CQ_CREATE,
14778 length, LPFC_SLI4_MBX_EMBED);
14779 cq_create = &mbox->u.mqe.un.cq_create;
14780 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14781 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14783 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14784 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14785 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14786 phba->sli4_hba.pc_sli4_params.cqv);
14787 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14788 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14789 (cq->page_size / SLI4_PAGE_SIZE));
14790 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14792 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14793 phba->sli4_hba.pc_sli4_params.cqav);
14795 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14798 switch (cq->entry_count) {
14801 if (phba->sli4_hba.pc_sli4_params.cqv ==
14802 LPFC_Q_CREATE_VERSION_2) {
14803 cq_create->u.request.context.lpfc_cq_context_count =
14805 bf_set(lpfc_cq_context_count,
14806 &cq_create->u.request.context,
14807 LPFC_CQ_CNT_WORD7);
14812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14813 "0361 Unsupported CQ count: "
14814 "entry cnt %d sz %d pg cnt %d\n",
14815 cq->entry_count, cq->entry_size,
14817 if (cq->entry_count < 256) {
14821 /* fall through - otherwise default to smallest count */
14823 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14827 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14831 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14835 list_for_each_entry(dmabuf, &cq->page_list, list) {
14836 memset(dmabuf->virt, 0, cq->page_size);
14837 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14838 putPaddrLow(dmabuf->phys);
14839 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14840 putPaddrHigh(dmabuf->phys);
14842 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14844 /* The IOCTL status is embedded in the mailbox subheader. */
14845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14847 if (shdr_status || shdr_add_status || rc) {
14848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14849 "2501 CQ_CREATE mailbox failed with "
14850 "status x%x add_status x%x, mbx status x%x\n",
14851 shdr_status, shdr_add_status, rc);
14855 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14856 if (cq->queue_id == 0xFFFF) {
14860 /* link the cq onto the parent eq child list */
14861 list_add_tail(&cq->list, &eq->child_list);
14862 /* Set up completion queue's type and subtype */
14864 cq->subtype = subtype;
14865 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14866 cq->assoc_qid = eq->queue_id;
14868 cq->host_index = 0;
14869 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14870 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14872 if (cq->queue_id > phba->sli4_hba.cq_max)
14873 phba->sli4_hba.cq_max = cq->queue_id;
14875 mempool_free(mbox, phba->mbox_mem_pool);
14880 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14881 * @phba: HBA structure that indicates port to create a queue on.
14882 * @cqp: The queue structure array to use to create the completion queues.
14883 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
14885 * This function creates a set of completion queue, s to support MRQ
14886 * as detailed in @cqp, on a port,
14887 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14889 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14890 * is used to get the entry count and entry size that are necessary to
14891 * determine the number of pages to allocate and use for this queue. The @eq
14892 * is used to indicate which event queue to bind this completion queue to. This
14893 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14894 * completion queue. This function is asynchronous and will wait for the mailbox
14895 * command to finish before continuing.
14897 * On success this function will return a zero. If unable to allocate enough
14898 * memory this function will return -ENOMEM. If the queue create mailbox command
14899 * fails this function will return -ENXIO.
14902 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14903 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14906 struct lpfc_queue *cq;
14907 struct lpfc_queue *eq;
14908 struct lpfc_mbx_cq_create_set *cq_set;
14909 struct lpfc_dmabuf *dmabuf;
14910 LPFC_MBOXQ_t *mbox;
14911 int rc, length, alloclen, status = 0;
14912 int cnt, idx, numcq, page_idx = 0;
14913 uint32_t shdr_status, shdr_add_status;
14914 union lpfc_sli4_cfg_shdr *shdr;
14915 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14917 /* sanity check on queue memory */
14918 numcq = phba->cfg_nvmet_mrq;
14919 if (!cqp || !hdwq || !numcq)
14922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14926 length = sizeof(struct lpfc_mbx_cq_create_set);
14927 length += ((numcq * cqp[0]->page_count) *
14928 sizeof(struct dma_address));
14929 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14930 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14931 LPFC_SLI4_MBX_NEMBED);
14932 if (alloclen < length) {
14933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14934 "3098 Allocated DMA memory size (%d) is "
14935 "less than the requested DMA memory size "
14936 "(%d)\n", alloclen, length);
14940 cq_set = mbox->sge_array->addr[0];
14941 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14942 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14944 for (idx = 0; idx < numcq; idx++) {
14946 eq = hdwq[idx].hba_eq;
14951 if (!phba->sli4_hba.pc_sli4_params.supported)
14952 hw_page_size = cq->page_size;
14956 bf_set(lpfc_mbx_cq_create_set_page_size,
14957 &cq_set->u.request,
14958 (hw_page_size / SLI4_PAGE_SIZE));
14959 bf_set(lpfc_mbx_cq_create_set_num_pages,
14960 &cq_set->u.request, cq->page_count);
14961 bf_set(lpfc_mbx_cq_create_set_evt,
14962 &cq_set->u.request, 1);
14963 bf_set(lpfc_mbx_cq_create_set_valid,
14964 &cq_set->u.request, 1);
14965 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14966 &cq_set->u.request, 0);
14967 bf_set(lpfc_mbx_cq_create_set_num_cq,
14968 &cq_set->u.request, numcq);
14969 bf_set(lpfc_mbx_cq_create_set_autovalid,
14970 &cq_set->u.request,
14971 phba->sli4_hba.pc_sli4_params.cqav);
14972 switch (cq->entry_count) {
14975 if (phba->sli4_hba.pc_sli4_params.cqv ==
14976 LPFC_Q_CREATE_VERSION_2) {
14977 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14978 &cq_set->u.request,
14980 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14981 &cq_set->u.request,
14982 LPFC_CQ_CNT_WORD7);
14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14988 "3118 Bad CQ count. (%d)\n",
14990 if (cq->entry_count < 256) {
14994 /* fall through - otherwise default to smallest */
14996 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14997 &cq_set->u.request, LPFC_CQ_CNT_256);
15000 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15001 &cq_set->u.request, LPFC_CQ_CNT_512);
15004 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15005 &cq_set->u.request, LPFC_CQ_CNT_1024);
15008 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15009 &cq_set->u.request, eq->queue_id);
15012 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15013 &cq_set->u.request, eq->queue_id);
15016 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15017 &cq_set->u.request, eq->queue_id);
15020 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15021 &cq_set->u.request, eq->queue_id);
15024 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15025 &cq_set->u.request, eq->queue_id);
15028 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15029 &cq_set->u.request, eq->queue_id);
15032 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15033 &cq_set->u.request, eq->queue_id);
15036 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15037 &cq_set->u.request, eq->queue_id);
15040 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15041 &cq_set->u.request, eq->queue_id);
15044 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15045 &cq_set->u.request, eq->queue_id);
15048 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15049 &cq_set->u.request, eq->queue_id);
15052 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15053 &cq_set->u.request, eq->queue_id);
15056 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15057 &cq_set->u.request, eq->queue_id);
15060 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15061 &cq_set->u.request, eq->queue_id);
15064 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15065 &cq_set->u.request, eq->queue_id);
15068 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15069 &cq_set->u.request, eq->queue_id);
15073 /* link the cq onto the parent eq child list */
15074 list_add_tail(&cq->list, &eq->child_list);
15075 /* Set up completion queue's type and subtype */
15077 cq->subtype = subtype;
15078 cq->assoc_qid = eq->queue_id;
15080 cq->host_index = 0;
15081 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15082 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15087 list_for_each_entry(dmabuf, &cq->page_list, list) {
15088 memset(dmabuf->virt, 0, hw_page_size);
15089 cnt = page_idx + dmabuf->buffer_tag;
15090 cq_set->u.request.page[cnt].addr_lo =
15091 putPaddrLow(dmabuf->phys);
15092 cq_set->u.request.page[cnt].addr_hi =
15093 putPaddrHigh(dmabuf->phys);
15099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15101 /* The IOCTL status is embedded in the mailbox subheader. */
15102 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15103 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15104 if (shdr_status || shdr_add_status || rc) {
15105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15106 "3119 CQ_CREATE_SET mailbox failed with "
15107 "status x%x add_status x%x, mbx status x%x\n",
15108 shdr_status, shdr_add_status, rc);
15112 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15113 if (rc == 0xFFFF) {
15118 for (idx = 0; idx < numcq; idx++) {
15120 cq->queue_id = rc + idx;
15121 if (cq->queue_id > phba->sli4_hba.cq_max)
15122 phba->sli4_hba.cq_max = cq->queue_id;
15126 lpfc_sli4_mbox_cmd_free(phba, mbox);
15131 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15132 * @phba: HBA structure that indicates port to create a queue on.
15133 * @mq: The queue structure to use to create the mailbox queue.
15134 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15135 * @cq: The completion queue to associate with this cq.
15137 * This function provides failback (fb) functionality when the
15138 * mq_create_ext fails on older FW generations. It's purpose is identical
15139 * to mq_create_ext otherwise.
15141 * This routine cannot fail as all attributes were previously accessed and
15142 * initialized in mq_create_ext.
15145 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15146 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15148 struct lpfc_mbx_mq_create *mq_create;
15149 struct lpfc_dmabuf *dmabuf;
15152 length = (sizeof(struct lpfc_mbx_mq_create) -
15153 sizeof(struct lpfc_sli4_cfg_mhdr));
15154 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15155 LPFC_MBOX_OPCODE_MQ_CREATE,
15156 length, LPFC_SLI4_MBX_EMBED);
15157 mq_create = &mbox->u.mqe.un.mq_create;
15158 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15160 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15162 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15163 switch (mq->entry_count) {
15165 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15166 LPFC_MQ_RING_SIZE_16);
15169 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15170 LPFC_MQ_RING_SIZE_32);
15173 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15174 LPFC_MQ_RING_SIZE_64);
15177 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15178 LPFC_MQ_RING_SIZE_128);
15181 list_for_each_entry(dmabuf, &mq->page_list, list) {
15182 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15183 putPaddrLow(dmabuf->phys);
15184 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15185 putPaddrHigh(dmabuf->phys);
15190 * lpfc_mq_create - Create a mailbox Queue on the HBA
15191 * @phba: HBA structure that indicates port to create a queue on.
15192 * @mq: The queue structure to use to create the mailbox queue.
15193 * @cq: The completion queue to associate with this cq.
15194 * @subtype: The queue's subtype.
15196 * This function creates a mailbox queue, as detailed in @mq, on a port,
15197 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15199 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15200 * is used to get the entry count and entry size that are necessary to
15201 * determine the number of pages to allocate and use for this queue. This
15202 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15203 * mailbox queue. This function is asynchronous and will wait for the mailbox
15204 * command to finish before continuing.
15206 * On success this function will return a zero. If unable to allocate enough
15207 * memory this function will return -ENOMEM. If the queue create mailbox command
15208 * fails this function will return -ENXIO.
15211 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15212 struct lpfc_queue *cq, uint32_t subtype)
15214 struct lpfc_mbx_mq_create *mq_create;
15215 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15216 struct lpfc_dmabuf *dmabuf;
15217 LPFC_MBOXQ_t *mbox;
15218 int rc, length, status = 0;
15219 uint32_t shdr_status, shdr_add_status;
15220 union lpfc_sli4_cfg_shdr *shdr;
15221 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15223 /* sanity check on queue memory */
15226 if (!phba->sli4_hba.pc_sli4_params.supported)
15227 hw_page_size = SLI4_PAGE_SIZE;
15229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15232 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15233 sizeof(struct lpfc_sli4_cfg_mhdr));
15234 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15235 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15236 length, LPFC_SLI4_MBX_EMBED);
15238 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15239 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15240 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15241 &mq_create_ext->u.request, mq->page_count);
15242 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15243 &mq_create_ext->u.request, 1);
15244 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15245 &mq_create_ext->u.request, 1);
15246 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15247 &mq_create_ext->u.request, 1);
15248 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15249 &mq_create_ext->u.request, 1);
15250 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15251 &mq_create_ext->u.request, 1);
15252 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15253 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15254 phba->sli4_hba.pc_sli4_params.mqv);
15255 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15256 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15259 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15261 switch (mq->entry_count) {
15263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15264 "0362 Unsupported MQ count. (%d)\n",
15266 if (mq->entry_count < 16) {
15270 /* fall through - otherwise default to smallest count */
15272 bf_set(lpfc_mq_context_ring_size,
15273 &mq_create_ext->u.request.context,
15274 LPFC_MQ_RING_SIZE_16);
15277 bf_set(lpfc_mq_context_ring_size,
15278 &mq_create_ext->u.request.context,
15279 LPFC_MQ_RING_SIZE_32);
15282 bf_set(lpfc_mq_context_ring_size,
15283 &mq_create_ext->u.request.context,
15284 LPFC_MQ_RING_SIZE_64);
15287 bf_set(lpfc_mq_context_ring_size,
15288 &mq_create_ext->u.request.context,
15289 LPFC_MQ_RING_SIZE_128);
15292 list_for_each_entry(dmabuf, &mq->page_list, list) {
15293 memset(dmabuf->virt, 0, hw_page_size);
15294 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15295 putPaddrLow(dmabuf->phys);
15296 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15297 putPaddrHigh(dmabuf->phys);
15299 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15300 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15301 &mq_create_ext->u.response);
15302 if (rc != MBX_SUCCESS) {
15303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15304 "2795 MQ_CREATE_EXT failed with "
15305 "status x%x. Failback to MQ_CREATE.\n",
15307 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15308 mq_create = &mbox->u.mqe.un.mq_create;
15309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15310 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15311 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15312 &mq_create->u.response);
15315 /* The IOCTL status is embedded in the mailbox subheader. */
15316 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15317 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15318 if (shdr_status || shdr_add_status || rc) {
15319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15320 "2502 MQ_CREATE mailbox failed with "
15321 "status x%x add_status x%x, mbx status x%x\n",
15322 shdr_status, shdr_add_status, rc);
15326 if (mq->queue_id == 0xFFFF) {
15330 mq->type = LPFC_MQ;
15331 mq->assoc_qid = cq->queue_id;
15332 mq->subtype = subtype;
15333 mq->host_index = 0;
15336 /* link the mq onto the parent cq child list */
15337 list_add_tail(&mq->list, &cq->child_list);
15339 mempool_free(mbox, phba->mbox_mem_pool);
15344 * lpfc_wq_create - Create a Work Queue on the HBA
15345 * @phba: HBA structure that indicates port to create a queue on.
15346 * @wq: The queue structure to use to create the work queue.
15347 * @cq: The completion queue to bind this work queue to.
15348 * @subtype: The subtype of the work queue indicating its functionality.
15350 * This function creates a work queue, as detailed in @wq, on a port, described
15351 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15353 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15354 * is used to get the entry count and entry size that are necessary to
15355 * determine the number of pages to allocate and use for this queue. The @cq
15356 * is used to indicate which completion queue to bind this work queue to. This
15357 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15358 * work queue. This function is asynchronous and will wait for the mailbox
15359 * command to finish before continuing.
15361 * On success this function will return a zero. If unable to allocate enough
15362 * memory this function will return -ENOMEM. If the queue create mailbox command
15363 * fails this function will return -ENXIO.
15366 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15367 struct lpfc_queue *cq, uint32_t subtype)
15369 struct lpfc_mbx_wq_create *wq_create;
15370 struct lpfc_dmabuf *dmabuf;
15371 LPFC_MBOXQ_t *mbox;
15372 int rc, length, status = 0;
15373 uint32_t shdr_status, shdr_add_status;
15374 union lpfc_sli4_cfg_shdr *shdr;
15375 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15376 struct dma_address *page;
15377 void __iomem *bar_memmap_p;
15378 uint32_t db_offset;
15379 uint16_t pci_barset;
15380 uint8_t dpp_barset;
15381 uint32_t dpp_offset;
15382 unsigned long pg_addr;
15383 uint8_t wq_create_version;
15385 /* sanity check on queue memory */
15388 if (!phba->sli4_hba.pc_sli4_params.supported)
15389 hw_page_size = wq->page_size;
15391 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15394 length = (sizeof(struct lpfc_mbx_wq_create) -
15395 sizeof(struct lpfc_sli4_cfg_mhdr));
15396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15397 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15398 length, LPFC_SLI4_MBX_EMBED);
15399 wq_create = &mbox->u.mqe.un.wq_create;
15400 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15401 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15403 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15406 /* wqv is the earliest version supported, NOT the latest */
15407 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15408 phba->sli4_hba.pc_sli4_params.wqv);
15410 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15411 (wq->page_size > SLI4_PAGE_SIZE))
15412 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15414 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15417 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15418 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15420 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15422 switch (wq_create_version) {
15423 case LPFC_Q_CREATE_VERSION_1:
15424 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15426 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15427 LPFC_Q_CREATE_VERSION_1);
15429 switch (wq->entry_size) {
15432 bf_set(lpfc_mbx_wq_create_wqe_size,
15433 &wq_create->u.request_1,
15434 LPFC_WQ_WQE_SIZE_64);
15437 bf_set(lpfc_mbx_wq_create_wqe_size,
15438 &wq_create->u.request_1,
15439 LPFC_WQ_WQE_SIZE_128);
15442 /* Request DPP by default */
15443 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15444 bf_set(lpfc_mbx_wq_create_page_size,
15445 &wq_create->u.request_1,
15446 (wq->page_size / SLI4_PAGE_SIZE));
15447 page = wq_create->u.request_1.page;
15450 page = wq_create->u.request.page;
15454 list_for_each_entry(dmabuf, &wq->page_list, list) {
15455 memset(dmabuf->virt, 0, hw_page_size);
15456 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15457 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15460 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15461 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15463 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15464 /* The IOCTL status is embedded in the mailbox subheader. */
15465 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15466 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15467 if (shdr_status || shdr_add_status || rc) {
15468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15469 "2503 WQ_CREATE mailbox failed with "
15470 "status x%x add_status x%x, mbx status x%x\n",
15471 shdr_status, shdr_add_status, rc);
15476 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15477 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15478 &wq_create->u.response);
15480 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15481 &wq_create->u.response_1);
15483 if (wq->queue_id == 0xFFFF) {
15488 wq->db_format = LPFC_DB_LIST_FORMAT;
15489 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15490 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15491 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15492 &wq_create->u.response);
15493 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15494 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15496 "3265 WQ[%d] doorbell format "
15497 "not supported: x%x\n",
15498 wq->queue_id, wq->db_format);
15502 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15503 &wq_create->u.response);
15504 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15506 if (!bar_memmap_p) {
15507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15508 "3263 WQ[%d] failed to memmap "
15509 "pci barset:x%x\n",
15510 wq->queue_id, pci_barset);
15514 db_offset = wq_create->u.response.doorbell_offset;
15515 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15516 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15518 "3252 WQ[%d] doorbell offset "
15519 "not supported: x%x\n",
15520 wq->queue_id, db_offset);
15524 wq->db_regaddr = bar_memmap_p + db_offset;
15525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15526 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15527 "format:x%x\n", wq->queue_id,
15528 pci_barset, db_offset, wq->db_format);
15530 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15532 /* Check if DPP was honored by the firmware */
15533 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15534 &wq_create->u.response_1);
15535 if (wq->dpp_enable) {
15536 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15537 &wq_create->u.response_1);
15538 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15540 if (!bar_memmap_p) {
15541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15542 "3267 WQ[%d] failed to memmap "
15543 "pci barset:x%x\n",
15544 wq->queue_id, pci_barset);
15548 db_offset = wq_create->u.response_1.doorbell_offset;
15549 wq->db_regaddr = bar_memmap_p + db_offset;
15550 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15551 &wq_create->u.response_1);
15552 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15553 &wq_create->u.response_1);
15554 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15556 if (!bar_memmap_p) {
15557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15558 "3268 WQ[%d] failed to memmap "
15559 "pci barset:x%x\n",
15560 wq->queue_id, dpp_barset);
15564 dpp_offset = wq_create->u.response_1.dpp_offset;
15565 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15567 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15568 "dpp_id:x%x dpp_barset:x%x "
15569 "dpp_offset:x%x\n",
15570 wq->queue_id, pci_barset, db_offset,
15571 wq->dpp_id, dpp_barset, dpp_offset);
15573 /* Enable combined writes for DPP aperture */
15574 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15576 rc = set_memory_wc(pg_addr, 1);
15578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15579 "3272 Cannot setup Combined "
15580 "Write on WQ[%d] - disable DPP\n",
15582 phba->cfg_enable_dpp = 0;
15585 phba->cfg_enable_dpp = 0;
15588 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15590 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15591 if (wq->pring == NULL) {
15595 wq->type = LPFC_WQ;
15596 wq->assoc_qid = cq->queue_id;
15597 wq->subtype = subtype;
15598 wq->host_index = 0;
15600 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15602 /* link the wq onto the parent cq child list */
15603 list_add_tail(&wq->list, &cq->child_list);
15605 mempool_free(mbox, phba->mbox_mem_pool);
15610 * lpfc_rq_create - Create a Receive Queue on the HBA
15611 * @phba: HBA structure that indicates port to create a queue on.
15612 * @hrq: The queue structure to use to create the header receive queue.
15613 * @drq: The queue structure to use to create the data receive queue.
15614 * @cq: The completion queue to bind this work queue to.
15616 * This function creates a receive buffer queue pair , as detailed in @hrq and
15617 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15620 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15621 * struct is used to get the entry count that is necessary to determine the
15622 * number of pages to use for this queue. The @cq is used to indicate which
15623 * completion queue to bind received buffers that are posted to these queues to.
15624 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15625 * receive queue pair. This function is asynchronous and will wait for the
15626 * mailbox command to finish before continuing.
15628 * On success this function will return a zero. If unable to allocate enough
15629 * memory this function will return -ENOMEM. If the queue create mailbox command
15630 * fails this function will return -ENXIO.
15633 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15634 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15636 struct lpfc_mbx_rq_create *rq_create;
15637 struct lpfc_dmabuf *dmabuf;
15638 LPFC_MBOXQ_t *mbox;
15639 int rc, length, status = 0;
15640 uint32_t shdr_status, shdr_add_status;
15641 union lpfc_sli4_cfg_shdr *shdr;
15642 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15643 void __iomem *bar_memmap_p;
15644 uint32_t db_offset;
15645 uint16_t pci_barset;
15647 /* sanity check on queue memory */
15648 if (!hrq || !drq || !cq)
15650 if (!phba->sli4_hba.pc_sli4_params.supported)
15651 hw_page_size = SLI4_PAGE_SIZE;
15653 if (hrq->entry_count != drq->entry_count)
15655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15658 length = (sizeof(struct lpfc_mbx_rq_create) -
15659 sizeof(struct lpfc_sli4_cfg_mhdr));
15660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15661 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15662 length, LPFC_SLI4_MBX_EMBED);
15663 rq_create = &mbox->u.mqe.un.rq_create;
15664 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15665 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15666 phba->sli4_hba.pc_sli4_params.rqv);
15667 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15668 bf_set(lpfc_rq_context_rqe_count_1,
15669 &rq_create->u.request.context,
15671 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15672 bf_set(lpfc_rq_context_rqe_size,
15673 &rq_create->u.request.context,
15675 bf_set(lpfc_rq_context_page_size,
15676 &rq_create->u.request.context,
15677 LPFC_RQ_PAGE_SIZE_4096);
15679 switch (hrq->entry_count) {
15681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15682 "2535 Unsupported RQ count. (%d)\n",
15684 if (hrq->entry_count < 512) {
15688 /* fall through - otherwise default to smallest count */
15690 bf_set(lpfc_rq_context_rqe_count,
15691 &rq_create->u.request.context,
15692 LPFC_RQ_RING_SIZE_512);
15695 bf_set(lpfc_rq_context_rqe_count,
15696 &rq_create->u.request.context,
15697 LPFC_RQ_RING_SIZE_1024);
15700 bf_set(lpfc_rq_context_rqe_count,
15701 &rq_create->u.request.context,
15702 LPFC_RQ_RING_SIZE_2048);
15705 bf_set(lpfc_rq_context_rqe_count,
15706 &rq_create->u.request.context,
15707 LPFC_RQ_RING_SIZE_4096);
15710 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15711 LPFC_HDR_BUF_SIZE);
15713 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15715 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15717 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15718 memset(dmabuf->virt, 0, hw_page_size);
15719 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15720 putPaddrLow(dmabuf->phys);
15721 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15722 putPaddrHigh(dmabuf->phys);
15724 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15725 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15727 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15728 /* The IOCTL status is embedded in the mailbox subheader. */
15729 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15730 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15731 if (shdr_status || shdr_add_status || rc) {
15732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15733 "2504 RQ_CREATE mailbox failed with "
15734 "status x%x add_status x%x, mbx status x%x\n",
15735 shdr_status, shdr_add_status, rc);
15739 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15740 if (hrq->queue_id == 0xFFFF) {
15745 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15746 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15747 &rq_create->u.response);
15748 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15749 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15751 "3262 RQ [%d] doorbell format not "
15752 "supported: x%x\n", hrq->queue_id,
15758 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15759 &rq_create->u.response);
15760 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15761 if (!bar_memmap_p) {
15762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15763 "3269 RQ[%d] failed to memmap pci "
15764 "barset:x%x\n", hrq->queue_id,
15770 db_offset = rq_create->u.response.doorbell_offset;
15771 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15772 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15774 "3270 RQ[%d] doorbell offset not "
15775 "supported: x%x\n", hrq->queue_id,
15780 hrq->db_regaddr = bar_memmap_p + db_offset;
15781 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15782 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15783 "format:x%x\n", hrq->queue_id, pci_barset,
15784 db_offset, hrq->db_format);
15786 hrq->db_format = LPFC_DB_RING_FORMAT;
15787 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15789 hrq->type = LPFC_HRQ;
15790 hrq->assoc_qid = cq->queue_id;
15791 hrq->subtype = subtype;
15792 hrq->host_index = 0;
15793 hrq->hba_index = 0;
15794 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15796 /* now create the data queue */
15797 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15798 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15799 length, LPFC_SLI4_MBX_EMBED);
15800 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15801 phba->sli4_hba.pc_sli4_params.rqv);
15802 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15803 bf_set(lpfc_rq_context_rqe_count_1,
15804 &rq_create->u.request.context, hrq->entry_count);
15805 if (subtype == LPFC_NVMET)
15806 rq_create->u.request.context.buffer_size =
15807 LPFC_NVMET_DATA_BUF_SIZE;
15809 rq_create->u.request.context.buffer_size =
15810 LPFC_DATA_BUF_SIZE;
15811 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15813 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15814 (PAGE_SIZE/SLI4_PAGE_SIZE));
15816 switch (drq->entry_count) {
15818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15819 "2536 Unsupported RQ count. (%d)\n",
15821 if (drq->entry_count < 512) {
15825 /* fall through - otherwise default to smallest count */
15827 bf_set(lpfc_rq_context_rqe_count,
15828 &rq_create->u.request.context,
15829 LPFC_RQ_RING_SIZE_512);
15832 bf_set(lpfc_rq_context_rqe_count,
15833 &rq_create->u.request.context,
15834 LPFC_RQ_RING_SIZE_1024);
15837 bf_set(lpfc_rq_context_rqe_count,
15838 &rq_create->u.request.context,
15839 LPFC_RQ_RING_SIZE_2048);
15842 bf_set(lpfc_rq_context_rqe_count,
15843 &rq_create->u.request.context,
15844 LPFC_RQ_RING_SIZE_4096);
15847 if (subtype == LPFC_NVMET)
15848 bf_set(lpfc_rq_context_buf_size,
15849 &rq_create->u.request.context,
15850 LPFC_NVMET_DATA_BUF_SIZE);
15852 bf_set(lpfc_rq_context_buf_size,
15853 &rq_create->u.request.context,
15854 LPFC_DATA_BUF_SIZE);
15856 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15858 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15860 list_for_each_entry(dmabuf, &drq->page_list, list) {
15861 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15862 putPaddrLow(dmabuf->phys);
15863 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15864 putPaddrHigh(dmabuf->phys);
15866 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15867 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15869 /* The IOCTL status is embedded in the mailbox subheader. */
15870 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15873 if (shdr_status || shdr_add_status || rc) {
15877 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15878 if (drq->queue_id == 0xFFFF) {
15882 drq->type = LPFC_DRQ;
15883 drq->assoc_qid = cq->queue_id;
15884 drq->subtype = subtype;
15885 drq->host_index = 0;
15886 drq->hba_index = 0;
15887 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15889 /* link the header and data RQs onto the parent cq child list */
15890 list_add_tail(&hrq->list, &cq->child_list);
15891 list_add_tail(&drq->list, &cq->child_list);
15894 mempool_free(mbox, phba->mbox_mem_pool);
15899 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15900 * @phba: HBA structure that indicates port to create a queue on.
15901 * @hrqp: The queue structure array to use to create the header receive queues.
15902 * @drqp: The queue structure array to use to create the data receive queues.
15903 * @cqp: The completion queue array to bind these receive queues to.
15905 * This function creates a receive buffer queue pair , as detailed in @hrq and
15906 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15909 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15910 * struct is used to get the entry count that is necessary to determine the
15911 * number of pages to use for this queue. The @cq is used to indicate which
15912 * completion queue to bind received buffers that are posted to these queues to.
15913 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15914 * receive queue pair. This function is asynchronous and will wait for the
15915 * mailbox command to finish before continuing.
15917 * On success this function will return a zero. If unable to allocate enough
15918 * memory this function will return -ENOMEM. If the queue create mailbox command
15919 * fails this function will return -ENXIO.
15922 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15923 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15926 struct lpfc_queue *hrq, *drq, *cq;
15927 struct lpfc_mbx_rq_create_v2 *rq_create;
15928 struct lpfc_dmabuf *dmabuf;
15929 LPFC_MBOXQ_t *mbox;
15930 int rc, length, alloclen, status = 0;
15931 int cnt, idx, numrq, page_idx = 0;
15932 uint32_t shdr_status, shdr_add_status;
15933 union lpfc_sli4_cfg_shdr *shdr;
15934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15936 numrq = phba->cfg_nvmet_mrq;
15937 /* sanity check on array memory */
15938 if (!hrqp || !drqp || !cqp || !numrq)
15940 if (!phba->sli4_hba.pc_sli4_params.supported)
15941 hw_page_size = SLI4_PAGE_SIZE;
15943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15947 length = sizeof(struct lpfc_mbx_rq_create_v2);
15948 length += ((2 * numrq * hrqp[0]->page_count) *
15949 sizeof(struct dma_address));
15951 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15952 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15953 LPFC_SLI4_MBX_NEMBED);
15954 if (alloclen < length) {
15955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15956 "3099 Allocated DMA memory size (%d) is "
15957 "less than the requested DMA memory size "
15958 "(%d)\n", alloclen, length);
15965 rq_create = mbox->sge_array->addr[0];
15966 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15968 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15971 for (idx = 0; idx < numrq; idx++) {
15976 /* sanity check on queue memory */
15977 if (!hrq || !drq || !cq) {
15982 if (hrq->entry_count != drq->entry_count) {
15988 bf_set(lpfc_mbx_rq_create_num_pages,
15989 &rq_create->u.request,
15991 bf_set(lpfc_mbx_rq_create_rq_cnt,
15992 &rq_create->u.request, (numrq * 2));
15993 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15995 bf_set(lpfc_rq_context_base_cq,
15996 &rq_create->u.request.context,
15998 bf_set(lpfc_rq_context_data_size,
15999 &rq_create->u.request.context,
16000 LPFC_NVMET_DATA_BUF_SIZE);
16001 bf_set(lpfc_rq_context_hdr_size,
16002 &rq_create->u.request.context,
16003 LPFC_HDR_BUF_SIZE);
16004 bf_set(lpfc_rq_context_rqe_count_1,
16005 &rq_create->u.request.context,
16007 bf_set(lpfc_rq_context_rqe_size,
16008 &rq_create->u.request.context,
16010 bf_set(lpfc_rq_context_page_size,
16011 &rq_create->u.request.context,
16012 (PAGE_SIZE/SLI4_PAGE_SIZE));
16015 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16016 memset(dmabuf->virt, 0, hw_page_size);
16017 cnt = page_idx + dmabuf->buffer_tag;
16018 rq_create->u.request.page[cnt].addr_lo =
16019 putPaddrLow(dmabuf->phys);
16020 rq_create->u.request.page[cnt].addr_hi =
16021 putPaddrHigh(dmabuf->phys);
16027 list_for_each_entry(dmabuf, &drq->page_list, list) {
16028 memset(dmabuf->virt, 0, hw_page_size);
16029 cnt = page_idx + dmabuf->buffer_tag;
16030 rq_create->u.request.page[cnt].addr_lo =
16031 putPaddrLow(dmabuf->phys);
16032 rq_create->u.request.page[cnt].addr_hi =
16033 putPaddrHigh(dmabuf->phys);
16038 hrq->db_format = LPFC_DB_RING_FORMAT;
16039 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16040 hrq->type = LPFC_HRQ;
16041 hrq->assoc_qid = cq->queue_id;
16042 hrq->subtype = subtype;
16043 hrq->host_index = 0;
16044 hrq->hba_index = 0;
16045 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16047 drq->db_format = LPFC_DB_RING_FORMAT;
16048 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16049 drq->type = LPFC_DRQ;
16050 drq->assoc_qid = cq->queue_id;
16051 drq->subtype = subtype;
16052 drq->host_index = 0;
16053 drq->hba_index = 0;
16054 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16056 list_add_tail(&hrq->list, &cq->child_list);
16057 list_add_tail(&drq->list, &cq->child_list);
16060 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16061 /* The IOCTL status is embedded in the mailbox subheader. */
16062 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16063 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16064 if (shdr_status || shdr_add_status || rc) {
16065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16066 "3120 RQ_CREATE mailbox failed with "
16067 "status x%x add_status x%x, mbx status x%x\n",
16068 shdr_status, shdr_add_status, rc);
16072 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16073 if (rc == 0xFFFF) {
16078 /* Initialize all RQs with associated queue id */
16079 for (idx = 0; idx < numrq; idx++) {
16081 hrq->queue_id = rc + (2 * idx);
16083 drq->queue_id = rc + (2 * idx) + 1;
16087 lpfc_sli4_mbox_cmd_free(phba, mbox);
16092 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16093 * @eq: The queue structure associated with the queue to destroy.
16095 * This function destroys a queue, as detailed in @eq by sending an mailbox
16096 * command, specific to the type of queue, to the HBA.
16098 * The @eq struct is used to get the queue ID of the queue to destroy.
16100 * On success this function will return a zero. If the queue destroy mailbox
16101 * command fails this function will return -ENXIO.
16104 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16106 LPFC_MBOXQ_t *mbox;
16107 int rc, length, status = 0;
16108 uint32_t shdr_status, shdr_add_status;
16109 union lpfc_sli4_cfg_shdr *shdr;
16111 /* sanity check on queue memory */
16115 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16118 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16119 sizeof(struct lpfc_sli4_cfg_mhdr));
16120 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16121 LPFC_MBOX_OPCODE_EQ_DESTROY,
16122 length, LPFC_SLI4_MBX_EMBED);
16123 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16125 mbox->vport = eq->phba->pport;
16126 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16128 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16129 /* The IOCTL status is embedded in the mailbox subheader. */
16130 shdr = (union lpfc_sli4_cfg_shdr *)
16131 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16134 if (shdr_status || shdr_add_status || rc) {
16135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16136 "2505 EQ_DESTROY mailbox failed with "
16137 "status x%x add_status x%x, mbx status x%x\n",
16138 shdr_status, shdr_add_status, rc);
16142 /* Remove eq from any list */
16143 list_del_init(&eq->list);
16144 mempool_free(mbox, eq->phba->mbox_mem_pool);
16149 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16150 * @cq: The queue structure associated with the queue to destroy.
16152 * This function destroys a queue, as detailed in @cq by sending an mailbox
16153 * command, specific to the type of queue, to the HBA.
16155 * The @cq struct is used to get the queue ID of the queue to destroy.
16157 * On success this function will return a zero. If the queue destroy mailbox
16158 * command fails this function will return -ENXIO.
16161 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16163 LPFC_MBOXQ_t *mbox;
16164 int rc, length, status = 0;
16165 uint32_t shdr_status, shdr_add_status;
16166 union lpfc_sli4_cfg_shdr *shdr;
16168 /* sanity check on queue memory */
16171 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16174 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16175 sizeof(struct lpfc_sli4_cfg_mhdr));
16176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16177 LPFC_MBOX_OPCODE_CQ_DESTROY,
16178 length, LPFC_SLI4_MBX_EMBED);
16179 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16181 mbox->vport = cq->phba->pport;
16182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16183 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16184 /* The IOCTL status is embedded in the mailbox subheader. */
16185 shdr = (union lpfc_sli4_cfg_shdr *)
16186 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16187 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16188 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16189 if (shdr_status || shdr_add_status || rc) {
16190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16191 "2506 CQ_DESTROY mailbox failed with "
16192 "status x%x add_status x%x, mbx status x%x\n",
16193 shdr_status, shdr_add_status, rc);
16196 /* Remove cq from any list */
16197 list_del_init(&cq->list);
16198 mempool_free(mbox, cq->phba->mbox_mem_pool);
16203 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16204 * @qm: The queue structure associated with the queue to destroy.
16206 * This function destroys a queue, as detailed in @mq by sending an mailbox
16207 * command, specific to the type of queue, to the HBA.
16209 * The @mq struct is used to get the queue ID of the queue to destroy.
16211 * On success this function will return a zero. If the queue destroy mailbox
16212 * command fails this function will return -ENXIO.
16215 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16217 LPFC_MBOXQ_t *mbox;
16218 int rc, length, status = 0;
16219 uint32_t shdr_status, shdr_add_status;
16220 union lpfc_sli4_cfg_shdr *shdr;
16222 /* sanity check on queue memory */
16225 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16228 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16229 sizeof(struct lpfc_sli4_cfg_mhdr));
16230 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16231 LPFC_MBOX_OPCODE_MQ_DESTROY,
16232 length, LPFC_SLI4_MBX_EMBED);
16233 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16235 mbox->vport = mq->phba->pport;
16236 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16237 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16238 /* The IOCTL status is embedded in the mailbox subheader. */
16239 shdr = (union lpfc_sli4_cfg_shdr *)
16240 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16241 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16242 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16243 if (shdr_status || shdr_add_status || rc) {
16244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16245 "2507 MQ_DESTROY mailbox failed with "
16246 "status x%x add_status x%x, mbx status x%x\n",
16247 shdr_status, shdr_add_status, rc);
16250 /* Remove mq from any list */
16251 list_del_init(&mq->list);
16252 mempool_free(mbox, mq->phba->mbox_mem_pool);
16257 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16258 * @wq: The queue structure associated with the queue to destroy.
16260 * This function destroys a queue, as detailed in @wq by sending an mailbox
16261 * command, specific to the type of queue, to the HBA.
16263 * The @wq struct is used to get the queue ID of the queue to destroy.
16265 * On success this function will return a zero. If the queue destroy mailbox
16266 * command fails this function will return -ENXIO.
16269 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16271 LPFC_MBOXQ_t *mbox;
16272 int rc, length, status = 0;
16273 uint32_t shdr_status, shdr_add_status;
16274 union lpfc_sli4_cfg_shdr *shdr;
16276 /* sanity check on queue memory */
16279 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16282 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16283 sizeof(struct lpfc_sli4_cfg_mhdr));
16284 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16285 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16286 length, LPFC_SLI4_MBX_EMBED);
16287 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16289 mbox->vport = wq->phba->pport;
16290 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16291 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16292 shdr = (union lpfc_sli4_cfg_shdr *)
16293 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16294 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16295 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16296 if (shdr_status || shdr_add_status || rc) {
16297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16298 "2508 WQ_DESTROY mailbox failed with "
16299 "status x%x add_status x%x, mbx status x%x\n",
16300 shdr_status, shdr_add_status, rc);
16303 /* Remove wq from any list */
16304 list_del_init(&wq->list);
16307 mempool_free(mbox, wq->phba->mbox_mem_pool);
16312 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16313 * @rq: The queue structure associated with the queue to destroy.
16315 * This function destroys a queue, as detailed in @rq by sending an mailbox
16316 * command, specific to the type of queue, to the HBA.
16318 * The @rq struct is used to get the queue ID of the queue to destroy.
16320 * On success this function will return a zero. If the queue destroy mailbox
16321 * command fails this function will return -ENXIO.
16324 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16325 struct lpfc_queue *drq)
16327 LPFC_MBOXQ_t *mbox;
16328 int rc, length, status = 0;
16329 uint32_t shdr_status, shdr_add_status;
16330 union lpfc_sli4_cfg_shdr *shdr;
16332 /* sanity check on queue memory */
16335 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16338 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16339 sizeof(struct lpfc_sli4_cfg_mhdr));
16340 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16341 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16342 length, LPFC_SLI4_MBX_EMBED);
16343 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16345 mbox->vport = hrq->phba->pport;
16346 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16347 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16348 /* The IOCTL status is embedded in the mailbox subheader. */
16349 shdr = (union lpfc_sli4_cfg_shdr *)
16350 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16353 if (shdr_status || shdr_add_status || rc) {
16354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16355 "2509 RQ_DESTROY mailbox failed with "
16356 "status x%x add_status x%x, mbx status x%x\n",
16357 shdr_status, shdr_add_status, rc);
16358 if (rc != MBX_TIMEOUT)
16359 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16362 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16364 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16365 shdr = (union lpfc_sli4_cfg_shdr *)
16366 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16369 if (shdr_status || shdr_add_status || rc) {
16370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16371 "2510 RQ_DESTROY mailbox failed with "
16372 "status x%x add_status x%x, mbx status x%x\n",
16373 shdr_status, shdr_add_status, rc);
16376 list_del_init(&hrq->list);
16377 list_del_init(&drq->list);
16378 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16383 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16384 * @phba: The virtual port for which this call being executed.
16385 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16386 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16387 * @xritag: the xritag that ties this io to the SGL pages.
16389 * This routine will post the sgl pages for the IO that has the xritag
16390 * that is in the iocbq structure. The xritag is assigned during iocbq
16391 * creation and persists for as long as the driver is loaded.
16392 * if the caller has fewer than 256 scatter gather segments to map then
16393 * pdma_phys_addr1 should be 0.
16394 * If the caller needs to map more than 256 scatter gather segment then
16395 * pdma_phys_addr1 should be a valid physical address.
16396 * physical address for SGLs must be 64 byte aligned.
16397 * If you are going to map 2 SGL's then the first one must have 256 entries
16398 * the second sgl can have between 1 and 256 entries.
16402 * -ENXIO, -ENOMEM - Failure
16405 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16406 dma_addr_t pdma_phys_addr0,
16407 dma_addr_t pdma_phys_addr1,
16410 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16411 LPFC_MBOXQ_t *mbox;
16413 uint32_t shdr_status, shdr_add_status;
16415 union lpfc_sli4_cfg_shdr *shdr;
16417 if (xritag == NO_XRI) {
16418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16419 "0364 Invalid param:\n");
16423 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16427 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16428 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16429 sizeof(struct lpfc_mbx_post_sgl_pages) -
16430 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16432 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16433 &mbox->u.mqe.un.post_sgl_pages;
16434 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16435 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16437 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16438 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16439 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16440 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16442 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16443 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16444 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16445 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16446 if (!phba->sli4_hba.intr_enable)
16447 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16449 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16450 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16452 /* The IOCTL status is embedded in the mailbox subheader. */
16453 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16454 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16455 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16456 if (rc != MBX_TIMEOUT)
16457 mempool_free(mbox, phba->mbox_mem_pool);
16458 if (shdr_status || shdr_add_status || rc) {
16459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16460 "2511 POST_SGL mailbox failed with "
16461 "status x%x add_status x%x, mbx status x%x\n",
16462 shdr_status, shdr_add_status, rc);
16468 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16469 * @phba: pointer to lpfc hba data structure.
16471 * This routine is invoked to post rpi header templates to the
16472 * HBA consistent with the SLI-4 interface spec. This routine
16473 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16474 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16477 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16478 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16481 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16486 * Fetch the next logical xri. Because this index is logical,
16487 * the driver starts at 0 each time.
16489 spin_lock_irq(&phba->hbalock);
16490 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16491 phba->sli4_hba.max_cfg_param.max_xri, 0);
16492 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16493 spin_unlock_irq(&phba->hbalock);
16496 set_bit(xri, phba->sli4_hba.xri_bmask);
16497 phba->sli4_hba.max_cfg_param.xri_used++;
16499 spin_unlock_irq(&phba->hbalock);
16504 * lpfc_sli4_free_xri - Release an xri for reuse.
16505 * @phba: pointer to lpfc hba data structure.
16507 * This routine is invoked to release an xri to the pool of
16508 * available rpis maintained by the driver.
16511 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16513 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16514 phba->sli4_hba.max_cfg_param.xri_used--;
16519 * lpfc_sli4_free_xri - Release an xri for reuse.
16520 * @phba: pointer to lpfc hba data structure.
16522 * This routine is invoked to release an xri to the pool of
16523 * available rpis maintained by the driver.
16526 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16528 spin_lock_irq(&phba->hbalock);
16529 __lpfc_sli4_free_xri(phba, xri);
16530 spin_unlock_irq(&phba->hbalock);
16534 * lpfc_sli4_next_xritag - Get an xritag for the io
16535 * @phba: Pointer to HBA context object.
16537 * This function gets an xritag for the iocb. If there is no unused xritag
16538 * it will return 0xffff.
16539 * The function returns the allocated xritag if successful, else returns zero.
16540 * Zero is not a valid xritag.
16541 * The caller is not required to hold any lock.
16544 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16546 uint16_t xri_index;
16548 xri_index = lpfc_sli4_alloc_xri(phba);
16549 if (xri_index == NO_XRI)
16550 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16551 "2004 Failed to allocate XRI.last XRITAG is %d"
16552 " Max XRI is %d, Used XRI is %d\n",
16554 phba->sli4_hba.max_cfg_param.max_xri,
16555 phba->sli4_hba.max_cfg_param.xri_used);
16560 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16561 * @phba: pointer to lpfc hba data structure.
16562 * @post_sgl_list: pointer to els sgl entry list.
16563 * @count: number of els sgl entries on the list.
16565 * This routine is invoked to post a block of driver's sgl pages to the
16566 * HBA using non-embedded mailbox command. No Lock is held. This routine
16567 * is only called when the driver is loading and after all IO has been
16571 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16572 struct list_head *post_sgl_list,
16575 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16576 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16577 struct sgl_page_pairs *sgl_pg_pairs;
16579 LPFC_MBOXQ_t *mbox;
16580 uint32_t reqlen, alloclen, pg_pairs;
16582 uint16_t xritag_start = 0;
16584 uint32_t shdr_status, shdr_add_status;
16585 union lpfc_sli4_cfg_shdr *shdr;
16587 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16589 if (reqlen > SLI4_PAGE_SIZE) {
16590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16591 "2559 Block sgl registration required DMA "
16592 "size (%d) great than a page\n", reqlen);
16596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16600 /* Allocate DMA memory and set up the non-embedded mailbox command */
16601 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16602 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16603 LPFC_SLI4_MBX_NEMBED);
16605 if (alloclen < reqlen) {
16606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16607 "0285 Allocated DMA memory size (%d) is "
16608 "less than the requested DMA memory "
16609 "size (%d)\n", alloclen, reqlen);
16610 lpfc_sli4_mbox_cmd_free(phba, mbox);
16613 /* Set up the SGL pages in the non-embedded DMA pages */
16614 viraddr = mbox->sge_array->addr[0];
16615 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16616 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16619 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16620 /* Set up the sge entry */
16621 sgl_pg_pairs->sgl_pg0_addr_lo =
16622 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16623 sgl_pg_pairs->sgl_pg0_addr_hi =
16624 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16625 sgl_pg_pairs->sgl_pg1_addr_lo =
16626 cpu_to_le32(putPaddrLow(0));
16627 sgl_pg_pairs->sgl_pg1_addr_hi =
16628 cpu_to_le32(putPaddrHigh(0));
16630 /* Keep the first xritag on the list */
16632 xritag_start = sglq_entry->sli4_xritag;
16637 /* Complete initialization and perform endian conversion. */
16638 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16639 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16640 sgl->word0 = cpu_to_le32(sgl->word0);
16642 if (!phba->sli4_hba.intr_enable)
16643 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16645 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16646 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16648 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16649 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16650 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16651 if (rc != MBX_TIMEOUT)
16652 lpfc_sli4_mbox_cmd_free(phba, mbox);
16653 if (shdr_status || shdr_add_status || rc) {
16654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16655 "2513 POST_SGL_BLOCK mailbox command failed "
16656 "status x%x add_status x%x mbx status x%x\n",
16657 shdr_status, shdr_add_status, rc);
16664 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16665 * @phba: pointer to lpfc hba data structure.
16666 * @nblist: pointer to nvme buffer list.
16667 * @count: number of scsi buffers on the list.
16669 * This routine is invoked to post a block of @count scsi sgl pages from a
16670 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16675 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16678 struct lpfc_io_buf *lpfc_ncmd;
16679 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16680 struct sgl_page_pairs *sgl_pg_pairs;
16682 LPFC_MBOXQ_t *mbox;
16683 uint32_t reqlen, alloclen, pg_pairs;
16685 uint16_t xritag_start = 0;
16687 uint32_t shdr_status, shdr_add_status;
16688 dma_addr_t pdma_phys_bpl1;
16689 union lpfc_sli4_cfg_shdr *shdr;
16691 /* Calculate the requested length of the dma memory */
16692 reqlen = count * sizeof(struct sgl_page_pairs) +
16693 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16694 if (reqlen > SLI4_PAGE_SIZE) {
16695 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16696 "6118 Block sgl registration required DMA "
16697 "size (%d) great than a page\n", reqlen);
16700 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16703 "6119 Failed to allocate mbox cmd memory\n");
16707 /* Allocate DMA memory and set up the non-embedded mailbox command */
16708 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16709 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16710 reqlen, LPFC_SLI4_MBX_NEMBED);
16712 if (alloclen < reqlen) {
16713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16714 "6120 Allocated DMA memory size (%d) is "
16715 "less than the requested DMA memory "
16716 "size (%d)\n", alloclen, reqlen);
16717 lpfc_sli4_mbox_cmd_free(phba, mbox);
16721 /* Get the first SGE entry from the non-embedded DMA memory */
16722 viraddr = mbox->sge_array->addr[0];
16724 /* Set up the SGL pages in the non-embedded DMA pages */
16725 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16726 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16729 list_for_each_entry(lpfc_ncmd, nblist, list) {
16730 /* Set up the sge entry */
16731 sgl_pg_pairs->sgl_pg0_addr_lo =
16732 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16733 sgl_pg_pairs->sgl_pg0_addr_hi =
16734 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16735 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16736 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16739 pdma_phys_bpl1 = 0;
16740 sgl_pg_pairs->sgl_pg1_addr_lo =
16741 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16742 sgl_pg_pairs->sgl_pg1_addr_hi =
16743 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16744 /* Keep the first xritag on the list */
16746 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16750 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16751 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16752 /* Perform endian conversion if necessary */
16753 sgl->word0 = cpu_to_le32(sgl->word0);
16755 if (!phba->sli4_hba.intr_enable) {
16756 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16758 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16759 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16761 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16762 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16763 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16764 if (rc != MBX_TIMEOUT)
16765 lpfc_sli4_mbox_cmd_free(phba, mbox);
16766 if (shdr_status || shdr_add_status || rc) {
16767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16768 "6125 POST_SGL_BLOCK mailbox command failed "
16769 "status x%x add_status x%x mbx status x%x\n",
16770 shdr_status, shdr_add_status, rc);
16777 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16778 * @phba: pointer to lpfc hba data structure.
16779 * @post_nblist: pointer to the nvme buffer list.
16781 * This routine walks a list of nvme buffers that was passed in. It attempts
16782 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16783 * uses the non-embedded SGL block post mailbox commands to post to the port.
16784 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16785 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16786 * must be local list, thus no lock is needed when manipulate the list.
16788 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16791 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16792 struct list_head *post_nblist, int sb_count)
16794 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16795 int status, sgl_size;
16796 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16797 dma_addr_t pdma_phys_sgl1;
16798 int last_xritag = NO_XRI;
16800 LIST_HEAD(prep_nblist);
16801 LIST_HEAD(blck_nblist);
16802 LIST_HEAD(nvme_nblist);
16808 sgl_size = phba->cfg_sg_dma_buf_size;
16809 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16810 list_del_init(&lpfc_ncmd->list);
16812 if ((last_xritag != NO_XRI) &&
16813 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16814 /* a hole in xri block, form a sgl posting block */
16815 list_splice_init(&prep_nblist, &blck_nblist);
16816 post_cnt = block_cnt - 1;
16817 /* prepare list for next posting block */
16818 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16821 /* prepare list for next posting block */
16822 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16823 /* enough sgls for non-embed sgl mbox command */
16824 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16825 list_splice_init(&prep_nblist, &blck_nblist);
16826 post_cnt = block_cnt;
16831 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16833 /* end of repost sgl list condition for NVME buffers */
16834 if (num_posting == sb_count) {
16835 if (post_cnt == 0) {
16836 /* last sgl posting block */
16837 list_splice_init(&prep_nblist, &blck_nblist);
16838 post_cnt = block_cnt;
16839 } else if (block_cnt == 1) {
16840 /* last single sgl with non-contiguous xri */
16841 if (sgl_size > SGL_PAGE_SIZE)
16843 lpfc_ncmd->dma_phys_sgl +
16846 pdma_phys_sgl1 = 0;
16847 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16848 status = lpfc_sli4_post_sgl(
16849 phba, lpfc_ncmd->dma_phys_sgl,
16850 pdma_phys_sgl1, cur_xritag);
16852 /* Post error. Buffer unavailable. */
16853 lpfc_ncmd->flags |=
16854 LPFC_SBUF_NOT_POSTED;
16856 /* Post success. Bffer available. */
16857 lpfc_ncmd->flags &=
16858 ~LPFC_SBUF_NOT_POSTED;
16859 lpfc_ncmd->status = IOSTAT_SUCCESS;
16862 /* success, put on NVME buffer sgl list */
16863 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16867 /* continue until a nembed page worth of sgls */
16871 /* post block of NVME buffer list sgls */
16872 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16875 /* don't reset xirtag due to hole in xri block */
16876 if (block_cnt == 0)
16877 last_xritag = NO_XRI;
16879 /* reset NVME buffer post count for next round of posting */
16882 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16883 while (!list_empty(&blck_nblist)) {
16884 list_remove_head(&blck_nblist, lpfc_ncmd,
16885 struct lpfc_io_buf, list);
16887 /* Post error. Mark buffer unavailable. */
16888 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
16890 /* Post success, Mark buffer available. */
16891 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
16892 lpfc_ncmd->status = IOSTAT_SUCCESS;
16895 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16898 /* Push NVME buffers with sgl posted to the available list */
16899 lpfc_io_buf_replenish(phba, &nvme_nblist);
16905 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16906 * @phba: pointer to lpfc_hba struct that the frame was received on
16907 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16909 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16910 * valid type of frame that the LPFC driver will handle. This function will
16911 * return a zero if the frame is a valid frame or a non zero value when the
16912 * frame does not pass the check.
16915 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16917 /* make rctl_names static to save stack space */
16918 struct fc_vft_header *fc_vft_hdr;
16919 uint32_t *header = (uint32_t *) fc_hdr;
16921 switch (fc_hdr->fh_r_ctl) {
16922 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16923 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16924 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16925 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16926 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16927 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16928 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16929 case FC_RCTL_DD_CMD_STATUS: /* command status */
16930 case FC_RCTL_ELS_REQ: /* extended link services request */
16931 case FC_RCTL_ELS_REP: /* extended link services reply */
16932 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16933 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16934 case FC_RCTL_BA_NOP: /* basic link service NOP */
16935 case FC_RCTL_BA_ABTS: /* basic link service abort */
16936 case FC_RCTL_BA_RMC: /* remove connection */
16937 case FC_RCTL_BA_ACC: /* basic accept */
16938 case FC_RCTL_BA_RJT: /* basic reject */
16939 case FC_RCTL_BA_PRMT:
16940 case FC_RCTL_ACK_1: /* acknowledge_1 */
16941 case FC_RCTL_ACK_0: /* acknowledge_0 */
16942 case FC_RCTL_P_RJT: /* port reject */
16943 case FC_RCTL_F_RJT: /* fabric reject */
16944 case FC_RCTL_P_BSY: /* port busy */
16945 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16946 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16947 case FC_RCTL_LCR: /* link credit reset */
16948 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16949 case FC_RCTL_END: /* end */
16951 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16952 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16953 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16954 return lpfc_fc_frame_check(phba, fc_hdr);
16959 switch (fc_hdr->fh_type) {
16972 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16973 "2538 Received frame rctl:x%x, type:x%x, "
16974 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16975 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16976 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16977 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16978 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16979 be32_to_cpu(header[6]));
16982 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16983 "2539 Dropped frame rctl:x%x type:x%x\n",
16984 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16989 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16990 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16992 * This function processes the FC header to retrieve the VFI from the VF
16993 * header, if one exists. This function will return the VFI if one exists
16994 * or 0 if no VSAN Header exists.
16997 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16999 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17001 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17003 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17007 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17008 * @phba: Pointer to the HBA structure to search for the vport on
17009 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17010 * @fcfi: The FC Fabric ID that the frame came from
17012 * This function searches the @phba for a vport that matches the content of the
17013 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17014 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17015 * returns the matching vport pointer or NULL if unable to match frame to a
17018 static struct lpfc_vport *
17019 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17020 uint16_t fcfi, uint32_t did)
17022 struct lpfc_vport **vports;
17023 struct lpfc_vport *vport = NULL;
17026 if (did == Fabric_DID)
17027 return phba->pport;
17028 if ((phba->pport->fc_flag & FC_PT2PT) &&
17029 !(phba->link_state == LPFC_HBA_READY))
17030 return phba->pport;
17032 vports = lpfc_create_vport_work_array(phba);
17033 if (vports != NULL) {
17034 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17035 if (phba->fcf.fcfi == fcfi &&
17036 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17037 vports[i]->fc_myDID == did) {
17043 lpfc_destroy_vport_work_array(phba, vports);
17048 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17049 * @vport: The vport to work on.
17051 * This function updates the receive sequence time stamp for this vport. The
17052 * receive sequence time stamp indicates the time that the last frame of the
17053 * the sequence that has been idle for the longest amount of time was received.
17054 * the driver uses this time stamp to indicate if any received sequences have
17058 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17060 struct lpfc_dmabuf *h_buf;
17061 struct hbq_dmabuf *dmabuf = NULL;
17063 /* get the oldest sequence on the rcv list */
17064 h_buf = list_get_first(&vport->rcv_buffer_list,
17065 struct lpfc_dmabuf, list);
17068 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17069 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17073 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17074 * @vport: The vport that the received sequences were sent to.
17076 * This function cleans up all outstanding received sequences. This is called
17077 * by the driver when a link event or user action invalidates all the received
17081 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17083 struct lpfc_dmabuf *h_buf, *hnext;
17084 struct lpfc_dmabuf *d_buf, *dnext;
17085 struct hbq_dmabuf *dmabuf = NULL;
17087 /* start with the oldest sequence on the rcv list */
17088 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17089 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17090 list_del_init(&dmabuf->hbuf.list);
17091 list_for_each_entry_safe(d_buf, dnext,
17092 &dmabuf->dbuf.list, list) {
17093 list_del_init(&d_buf->list);
17094 lpfc_in_buf_free(vport->phba, d_buf);
17096 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17101 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17102 * @vport: The vport that the received sequences were sent to.
17104 * This function determines whether any received sequences have timed out by
17105 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17106 * indicates that there is at least one timed out sequence this routine will
17107 * go through the received sequences one at a time from most inactive to most
17108 * active to determine which ones need to be cleaned up. Once it has determined
17109 * that a sequence needs to be cleaned up it will simply free up the resources
17110 * without sending an abort.
17113 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17115 struct lpfc_dmabuf *h_buf, *hnext;
17116 struct lpfc_dmabuf *d_buf, *dnext;
17117 struct hbq_dmabuf *dmabuf = NULL;
17118 unsigned long timeout;
17119 int abort_count = 0;
17121 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17122 vport->rcv_buffer_time_stamp);
17123 if (list_empty(&vport->rcv_buffer_list) ||
17124 time_before(jiffies, timeout))
17126 /* start with the oldest sequence on the rcv list */
17127 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17128 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17129 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17130 dmabuf->time_stamp);
17131 if (time_before(jiffies, timeout))
17134 list_del_init(&dmabuf->hbuf.list);
17135 list_for_each_entry_safe(d_buf, dnext,
17136 &dmabuf->dbuf.list, list) {
17137 list_del_init(&d_buf->list);
17138 lpfc_in_buf_free(vport->phba, d_buf);
17140 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17143 lpfc_update_rcv_time_stamp(vport);
17147 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17148 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17150 * This function searches through the existing incomplete sequences that have
17151 * been sent to this @vport. If the frame matches one of the incomplete
17152 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17153 * make up that sequence. If no sequence is found that matches this frame then
17154 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17155 * This function returns a pointer to the first dmabuf in the sequence list that
17156 * the frame was linked to.
17158 static struct hbq_dmabuf *
17159 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17161 struct fc_frame_header *new_hdr;
17162 struct fc_frame_header *temp_hdr;
17163 struct lpfc_dmabuf *d_buf;
17164 struct lpfc_dmabuf *h_buf;
17165 struct hbq_dmabuf *seq_dmabuf = NULL;
17166 struct hbq_dmabuf *temp_dmabuf = NULL;
17169 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17170 dmabuf->time_stamp = jiffies;
17171 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17173 /* Use the hdr_buf to find the sequence that this frame belongs to */
17174 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17175 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17176 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17177 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17178 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17180 /* found a pending sequence that matches this frame */
17181 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17186 * This indicates first frame received for this sequence.
17187 * Queue the buffer on the vport's rcv_buffer_list.
17189 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17190 lpfc_update_rcv_time_stamp(vport);
17193 temp_hdr = seq_dmabuf->hbuf.virt;
17194 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17195 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17196 list_del_init(&seq_dmabuf->hbuf.list);
17197 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17198 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17199 lpfc_update_rcv_time_stamp(vport);
17202 /* move this sequence to the tail to indicate a young sequence */
17203 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17204 seq_dmabuf->time_stamp = jiffies;
17205 lpfc_update_rcv_time_stamp(vport);
17206 if (list_empty(&seq_dmabuf->dbuf.list)) {
17207 temp_hdr = dmabuf->hbuf.virt;
17208 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17211 /* find the correct place in the sequence to insert this frame */
17212 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17214 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17215 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17217 * If the frame's sequence count is greater than the frame on
17218 * the list then insert the frame right after this frame
17220 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17221 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17222 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17227 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17229 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17238 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17239 * @vport: pointer to a vitural port
17240 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17242 * This function tries to abort from the partially assembed sequence, described
17243 * by the information from basic abbort @dmabuf. It checks to see whether such
17244 * partially assembled sequence held by the driver. If so, it shall free up all
17245 * the frames from the partially assembled sequence.
17248 * true -- if there is matching partially assembled sequence present and all
17249 * the frames freed with the sequence;
17250 * false -- if there is no matching partially assembled sequence present so
17251 * nothing got aborted in the lower layer driver
17254 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17255 struct hbq_dmabuf *dmabuf)
17257 struct fc_frame_header *new_hdr;
17258 struct fc_frame_header *temp_hdr;
17259 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17260 struct hbq_dmabuf *seq_dmabuf = NULL;
17262 /* Use the hdr_buf to find the sequence that matches this frame */
17263 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17264 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17265 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17266 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17267 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17268 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17269 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17270 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17272 /* found a pending sequence that matches this frame */
17273 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17277 /* Free up all the frames from the partially assembled sequence */
17279 list_for_each_entry_safe(d_buf, n_buf,
17280 &seq_dmabuf->dbuf.list, list) {
17281 list_del_init(&d_buf->list);
17282 lpfc_in_buf_free(vport->phba, d_buf);
17290 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17291 * @vport: pointer to a vitural port
17292 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17294 * This function tries to abort from the assembed sequence from upper level
17295 * protocol, described by the information from basic abbort @dmabuf. It
17296 * checks to see whether such pending context exists at upper level protocol.
17297 * If so, it shall clean up the pending context.
17300 * true -- if there is matching pending context of the sequence cleaned
17302 * false -- if there is no matching pending context of the sequence present
17306 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17308 struct lpfc_hba *phba = vport->phba;
17311 /* Accepting abort at ulp with SLI4 only */
17312 if (phba->sli_rev < LPFC_SLI_REV4)
17315 /* Register all caring upper level protocols to attend abort */
17316 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17324 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17325 * @phba: Pointer to HBA context object.
17326 * @cmd_iocbq: pointer to the command iocbq structure.
17327 * @rsp_iocbq: pointer to the response iocbq structure.
17329 * This function handles the sequence abort response iocb command complete
17330 * event. It properly releases the memory allocated to the sequence abort
17334 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17335 struct lpfc_iocbq *cmd_iocbq,
17336 struct lpfc_iocbq *rsp_iocbq)
17338 struct lpfc_nodelist *ndlp;
17341 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17342 lpfc_nlp_put(ndlp);
17343 lpfc_nlp_not_used(ndlp);
17344 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17347 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17348 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17350 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17351 rsp_iocbq->iocb.ulpStatus,
17352 rsp_iocbq->iocb.un.ulpWord[4]);
17356 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17357 * @phba: Pointer to HBA context object.
17358 * @xri: xri id in transaction.
17360 * This function validates the xri maps to the known range of XRIs allocated an
17361 * used by the driver.
17364 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17369 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17370 if (xri == phba->sli4_hba.xri_ids[i])
17377 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17378 * @phba: Pointer to HBA context object.
17379 * @fc_hdr: pointer to a FC frame header.
17381 * This function sends a basic response to a previous unsol sequence abort
17382 * event after aborting the sequence handling.
17385 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17386 struct fc_frame_header *fc_hdr, bool aborted)
17388 struct lpfc_hba *phba = vport->phba;
17389 struct lpfc_iocbq *ctiocb = NULL;
17390 struct lpfc_nodelist *ndlp;
17391 uint16_t oxid, rxid, xri, lxri;
17392 uint32_t sid, fctl;
17396 if (!lpfc_is_link_up(phba))
17399 sid = sli4_sid_from_fc_hdr(fc_hdr);
17400 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17401 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17403 ndlp = lpfc_findnode_did(vport, sid);
17405 ndlp = lpfc_nlp_init(vport, sid);
17407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17408 "1268 Failed to allocate ndlp for "
17409 "oxid:x%x SID:x%x\n", oxid, sid);
17412 /* Put ndlp onto pport node list */
17413 lpfc_enqueue_node(vport, ndlp);
17414 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17415 /* re-setup ndlp without removing from node list */
17416 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17419 "3275 Failed to active ndlp found "
17420 "for oxid:x%x SID:x%x\n", oxid, sid);
17425 /* Allocate buffer for rsp iocb */
17426 ctiocb = lpfc_sli_get_iocbq(phba);
17430 /* Extract the F_CTL field from FC_HDR */
17431 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17433 icmd = &ctiocb->iocb;
17434 icmd->un.xseq64.bdl.bdeSize = 0;
17435 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17436 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17437 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17438 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17440 /* Fill in the rest of iocb fields */
17441 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17442 icmd->ulpBdeCount = 0;
17444 icmd->ulpClass = CLASS3;
17445 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17446 ctiocb->context1 = lpfc_nlp_get(ndlp);
17448 ctiocb->iocb_cmpl = NULL;
17449 ctiocb->vport = phba->pport;
17450 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17451 ctiocb->sli4_lxritag = NO_XRI;
17452 ctiocb->sli4_xritag = NO_XRI;
17454 if (fctl & FC_FC_EX_CTX)
17455 /* Exchange responder sent the abort so we
17461 lxri = lpfc_sli4_xri_inrange(phba, xri);
17462 if (lxri != NO_XRI)
17463 lpfc_set_rrq_active(phba, ndlp, lxri,
17464 (xri == oxid) ? rxid : oxid, 0);
17465 /* For BA_ABTS from exchange responder, if the logical xri with
17466 * the oxid maps to the FCP XRI range, the port no longer has
17467 * that exchange context, send a BLS_RJT. Override the IOCB for
17470 if ((fctl & FC_FC_EX_CTX) &&
17471 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17472 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17473 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17474 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17475 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17478 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17479 * the driver no longer has that exchange, send a BLS_RJT. Override
17480 * the IOCB for a BA_RJT.
17482 if (aborted == false) {
17483 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17484 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17485 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17486 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17489 if (fctl & FC_FC_EX_CTX) {
17490 /* ABTS sent by responder to CT exchange, construction
17491 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17492 * field and RX_ID from ABTS for RX_ID field.
17494 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17496 /* ABTS sent by initiator to CT exchange, construction
17497 * of BA_ACC will need to allocate a new XRI as for the
17500 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17502 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17503 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17505 /* Xmit CT abts response on exchange <xid> */
17506 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17507 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17508 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17510 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17511 if (rc == IOCB_ERROR) {
17512 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17513 "2925 Failed to issue CT ABTS RSP x%x on "
17514 "xri x%x, Data x%x\n",
17515 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17517 lpfc_nlp_put(ndlp);
17518 ctiocb->context1 = NULL;
17519 lpfc_sli_release_iocbq(phba, ctiocb);
17524 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17525 * @vport: Pointer to the vport on which this sequence was received
17526 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17528 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17529 * receive sequence is only partially assembed by the driver, it shall abort
17530 * the partially assembled frames for the sequence. Otherwise, if the
17531 * unsolicited receive sequence has been completely assembled and passed to
17532 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17533 * unsolicited sequence has been aborted. After that, it will issue a basic
17534 * accept to accept the abort.
17537 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17538 struct hbq_dmabuf *dmabuf)
17540 struct lpfc_hba *phba = vport->phba;
17541 struct fc_frame_header fc_hdr;
17545 /* Make a copy of fc_hdr before the dmabuf being released */
17546 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17547 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17549 if (fctl & FC_FC_EX_CTX) {
17550 /* ABTS by responder to exchange, no cleanup needed */
17553 /* ABTS by initiator to exchange, need to do cleanup */
17554 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17555 if (aborted == false)
17556 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17558 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17560 if (phba->nvmet_support) {
17561 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17565 /* Respond with BA_ACC or BA_RJT accordingly */
17566 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17570 * lpfc_seq_complete - Indicates if a sequence is complete
17571 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17573 * This function checks the sequence, starting with the frame described by
17574 * @dmabuf, to see if all the frames associated with this sequence are present.
17575 * the frames associated with this sequence are linked to the @dmabuf using the
17576 * dbuf list. This function looks for two major things. 1) That the first frame
17577 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17578 * set. 3) That there are no holes in the sequence count. The function will
17579 * return 1 when the sequence is complete, otherwise it will return 0.
17582 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17584 struct fc_frame_header *hdr;
17585 struct lpfc_dmabuf *d_buf;
17586 struct hbq_dmabuf *seq_dmabuf;
17590 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17591 /* make sure first fame of sequence has a sequence count of zero */
17592 if (hdr->fh_seq_cnt != seq_count)
17594 fctl = (hdr->fh_f_ctl[0] << 16 |
17595 hdr->fh_f_ctl[1] << 8 |
17597 /* If last frame of sequence we can return success. */
17598 if (fctl & FC_FC_END_SEQ)
17600 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17601 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17602 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17603 /* If there is a hole in the sequence count then fail. */
17604 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17606 fctl = (hdr->fh_f_ctl[0] << 16 |
17607 hdr->fh_f_ctl[1] << 8 |
17609 /* If last frame of sequence we can return success. */
17610 if (fctl & FC_FC_END_SEQ)
17617 * lpfc_prep_seq - Prep sequence for ULP processing
17618 * @vport: Pointer to the vport on which this sequence was received
17619 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17621 * This function takes a sequence, described by a list of frames, and creates
17622 * a list of iocbq structures to describe the sequence. This iocbq list will be
17623 * used to issue to the generic unsolicited sequence handler. This routine
17624 * returns a pointer to the first iocbq in the list. If the function is unable
17625 * to allocate an iocbq then it throw out the received frames that were not
17626 * able to be described and return a pointer to the first iocbq. If unable to
17627 * allocate any iocbqs (including the first) this function will return NULL.
17629 static struct lpfc_iocbq *
17630 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17632 struct hbq_dmabuf *hbq_buf;
17633 struct lpfc_dmabuf *d_buf, *n_buf;
17634 struct lpfc_iocbq *first_iocbq, *iocbq;
17635 struct fc_frame_header *fc_hdr;
17637 uint32_t len, tot_len;
17638 struct ulp_bde64 *pbde;
17640 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17641 /* remove from receive buffer list */
17642 list_del_init(&seq_dmabuf->hbuf.list);
17643 lpfc_update_rcv_time_stamp(vport);
17644 /* get the Remote Port's SID */
17645 sid = sli4_sid_from_fc_hdr(fc_hdr);
17647 /* Get an iocbq struct to fill in. */
17648 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17650 /* Initialize the first IOCB. */
17651 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17652 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17653 first_iocbq->vport = vport;
17655 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17656 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17657 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17658 first_iocbq->iocb.un.rcvels.parmRo =
17659 sli4_did_from_fc_hdr(fc_hdr);
17660 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17662 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17663 first_iocbq->iocb.ulpContext = NO_XRI;
17664 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17665 be16_to_cpu(fc_hdr->fh_ox_id);
17666 /* iocbq is prepped for internal consumption. Physical vpi. */
17667 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17668 vport->phba->vpi_ids[vport->vpi];
17669 /* put the first buffer into the first IOCBq */
17670 tot_len = bf_get(lpfc_rcqe_length,
17671 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17673 first_iocbq->context2 = &seq_dmabuf->dbuf;
17674 first_iocbq->context3 = NULL;
17675 first_iocbq->iocb.ulpBdeCount = 1;
17676 if (tot_len > LPFC_DATA_BUF_SIZE)
17677 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17678 LPFC_DATA_BUF_SIZE;
17680 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17682 first_iocbq->iocb.un.rcvels.remoteID = sid;
17684 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17686 iocbq = first_iocbq;
17688 * Each IOCBq can have two Buffers assigned, so go through the list
17689 * of buffers for this sequence and save two buffers in each IOCBq
17691 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17693 lpfc_in_buf_free(vport->phba, d_buf);
17696 if (!iocbq->context3) {
17697 iocbq->context3 = d_buf;
17698 iocbq->iocb.ulpBdeCount++;
17699 /* We need to get the size out of the right CQE */
17700 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17701 len = bf_get(lpfc_rcqe_length,
17702 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17703 pbde = (struct ulp_bde64 *)
17704 &iocbq->iocb.unsli3.sli3Words[4];
17705 if (len > LPFC_DATA_BUF_SIZE)
17706 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17708 pbde->tus.f.bdeSize = len;
17710 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17713 iocbq = lpfc_sli_get_iocbq(vport->phba);
17716 first_iocbq->iocb.ulpStatus =
17717 IOSTAT_FCP_RSP_ERROR;
17718 first_iocbq->iocb.un.ulpWord[4] =
17719 IOERR_NO_RESOURCES;
17721 lpfc_in_buf_free(vport->phba, d_buf);
17724 /* We need to get the size out of the right CQE */
17725 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17726 len = bf_get(lpfc_rcqe_length,
17727 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17728 iocbq->context2 = d_buf;
17729 iocbq->context3 = NULL;
17730 iocbq->iocb.ulpBdeCount = 1;
17731 if (len > LPFC_DATA_BUF_SIZE)
17732 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17733 LPFC_DATA_BUF_SIZE;
17735 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17738 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17740 iocbq->iocb.un.rcvels.remoteID = sid;
17741 list_add_tail(&iocbq->list, &first_iocbq->list);
17744 return first_iocbq;
17748 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17749 struct hbq_dmabuf *seq_dmabuf)
17751 struct fc_frame_header *fc_hdr;
17752 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17753 struct lpfc_hba *phba = vport->phba;
17755 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17756 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17759 "2707 Ring %d handler: Failed to allocate "
17760 "iocb Rctl x%x Type x%x received\n",
17762 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17765 if (!lpfc_complete_unsol_iocb(phba,
17766 phba->sli4_hba.els_wq->pring,
17767 iocbq, fc_hdr->fh_r_ctl,
17769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17770 "2540 Ring %d handler: unexpected Rctl "
17771 "x%x Type x%x received\n",
17773 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17775 /* Free iocb created in lpfc_prep_seq */
17776 list_for_each_entry_safe(curr_iocb, next_iocb,
17777 &iocbq->list, list) {
17778 list_del_init(&curr_iocb->list);
17779 lpfc_sli_release_iocbq(phba, curr_iocb);
17781 lpfc_sli_release_iocbq(phba, iocbq);
17785 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17786 struct lpfc_iocbq *rspiocb)
17788 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17790 if (pcmd && pcmd->virt)
17791 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17793 lpfc_sli_release_iocbq(phba, cmdiocb);
17794 lpfc_drain_txq(phba);
17798 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17799 struct hbq_dmabuf *dmabuf)
17801 struct fc_frame_header *fc_hdr;
17802 struct lpfc_hba *phba = vport->phba;
17803 struct lpfc_iocbq *iocbq = NULL;
17804 union lpfc_wqe *wqe;
17805 struct lpfc_dmabuf *pcmd = NULL;
17806 uint32_t frame_len;
17808 unsigned long iflags;
17810 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17811 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17813 /* Send the received frame back */
17814 iocbq = lpfc_sli_get_iocbq(phba);
17816 /* Queue cq event and wakeup worker thread to process it */
17817 spin_lock_irqsave(&phba->hbalock, iflags);
17818 list_add_tail(&dmabuf->cq_event.list,
17819 &phba->sli4_hba.sp_queue_event);
17820 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17821 spin_unlock_irqrestore(&phba->hbalock, iflags);
17822 lpfc_worker_wake_up(phba);
17826 /* Allocate buffer for command payload */
17827 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17829 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17831 if (!pcmd || !pcmd->virt)
17834 INIT_LIST_HEAD(&pcmd->list);
17836 /* copyin the payload */
17837 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17839 /* fill in BDE's for command */
17840 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17841 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17842 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17843 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17845 iocbq->context2 = pcmd;
17846 iocbq->vport = vport;
17847 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17848 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17851 * Setup rest of the iocb as though it were a WQE
17852 * Build the SEND_FRAME WQE
17854 wqe = (union lpfc_wqe *)&iocbq->iocb;
17856 wqe->send_frame.frame_len = frame_len;
17857 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17858 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17859 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17860 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17861 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17862 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17864 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17865 iocbq->iocb.ulpLe = 1;
17866 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17868 if (rc == IOCB_ERROR)
17871 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17875 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17876 "2023 Unable to process MDS loopback frame\n");
17877 if (pcmd && pcmd->virt)
17878 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17881 lpfc_sli_release_iocbq(phba, iocbq);
17882 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17886 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17887 * @phba: Pointer to HBA context object.
17889 * This function is called with no lock held. This function processes all
17890 * the received buffers and gives it to upper layers when a received buffer
17891 * indicates that it is the final frame in the sequence. The interrupt
17892 * service routine processes received buffers at interrupt contexts.
17893 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17894 * appropriate receive function when the final frame in a sequence is received.
17897 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17898 struct hbq_dmabuf *dmabuf)
17900 struct hbq_dmabuf *seq_dmabuf;
17901 struct fc_frame_header *fc_hdr;
17902 struct lpfc_vport *vport;
17906 /* Process each received buffer */
17907 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17909 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17910 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17911 vport = phba->pport;
17912 /* Handle MDS Loopback frames */
17913 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17917 /* check to see if this a valid type of frame */
17918 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17919 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17923 if ((bf_get(lpfc_cqe_code,
17924 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17925 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17926 &dmabuf->cq_event.cqe.rcqe_cmpl);
17928 fcfi = bf_get(lpfc_rcqe_fcf_id,
17929 &dmabuf->cq_event.cqe.rcqe_cmpl);
17931 /* d_id this frame is directed to */
17932 did = sli4_did_from_fc_hdr(fc_hdr);
17934 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17936 /* throw out the frame */
17937 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17941 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17942 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17943 (did != Fabric_DID)) {
17945 * Throw out the frame if we are not pt2pt.
17946 * The pt2pt protocol allows for discovery frames
17947 * to be received without a registered VPI.
17949 if (!(vport->fc_flag & FC_PT2PT) ||
17950 (phba->link_state == LPFC_HBA_READY)) {
17951 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17956 /* Handle the basic abort sequence (BA_ABTS) event */
17957 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17958 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17962 /* Link this frame */
17963 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17965 /* unable to add frame to vport - throw it out */
17966 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17969 /* If not last frame in sequence continue processing frames. */
17970 if (!lpfc_seq_complete(seq_dmabuf))
17973 /* Send the complete sequence to the upper layer protocol */
17974 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17978 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17979 * @phba: pointer to lpfc hba data structure.
17981 * This routine is invoked to post rpi header templates to the
17982 * HBA consistent with the SLI-4 interface spec. This routine
17983 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17984 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17986 * This routine does not require any locks. It's usage is expected
17987 * to be driver load or reset recovery when the driver is
17992 * -EIO - The mailbox failed to complete successfully.
17993 * When this error occurs, the driver is not guaranteed
17994 * to have any rpi regions posted to the device and
17995 * must either attempt to repost the regions or take a
17999 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18001 struct lpfc_rpi_hdr *rpi_page;
18005 /* SLI4 ports that support extents do not require RPI headers. */
18006 if (!phba->sli4_hba.rpi_hdrs_in_use)
18008 if (phba->sli4_hba.extents_in_use)
18011 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18013 * Assign the rpi headers a physical rpi only if the driver
18014 * has not initialized those resources. A port reset only
18015 * needs the headers posted.
18017 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18019 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18021 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18022 if (rc != MBX_SUCCESS) {
18023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18024 "2008 Error %d posting all rpi "
18032 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18033 LPFC_RPI_RSRC_RDY);
18038 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18039 * @phba: pointer to lpfc hba data structure.
18040 * @rpi_page: pointer to the rpi memory region.
18042 * This routine is invoked to post a single rpi header to the
18043 * HBA consistent with the SLI-4 interface spec. This memory region
18044 * maps up to 64 rpi context regions.
18048 * -ENOMEM - No available memory
18049 * -EIO - The mailbox failed to complete successfully.
18052 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18054 LPFC_MBOXQ_t *mboxq;
18055 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18057 uint32_t shdr_status, shdr_add_status;
18058 union lpfc_sli4_cfg_shdr *shdr;
18060 /* SLI4 ports that support extents do not require RPI headers. */
18061 if (!phba->sli4_hba.rpi_hdrs_in_use)
18063 if (phba->sli4_hba.extents_in_use)
18066 /* The port is notified of the header region via a mailbox command. */
18067 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18070 "2001 Unable to allocate memory for issuing "
18071 "SLI_CONFIG_SPECIAL mailbox command\n");
18075 /* Post all rpi memory regions to the port. */
18076 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18077 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18078 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18079 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18080 sizeof(struct lpfc_sli4_cfg_mhdr),
18081 LPFC_SLI4_MBX_EMBED);
18084 /* Post the physical rpi to the port for this rpi header. */
18085 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18086 rpi_page->start_rpi);
18087 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18088 hdr_tmpl, rpi_page->page_count);
18090 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18091 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18093 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18094 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18095 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18096 if (rc != MBX_TIMEOUT)
18097 mempool_free(mboxq, phba->mbox_mem_pool);
18098 if (shdr_status || shdr_add_status || rc) {
18099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18100 "2514 POST_RPI_HDR mailbox failed with "
18101 "status x%x add_status x%x, mbx status x%x\n",
18102 shdr_status, shdr_add_status, rc);
18106 * The next_rpi stores the next logical module-64 rpi value used
18107 * to post physical rpis in subsequent rpi postings.
18109 spin_lock_irq(&phba->hbalock);
18110 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18111 spin_unlock_irq(&phba->hbalock);
18117 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18118 * @phba: pointer to lpfc hba data structure.
18120 * This routine is invoked to post rpi header templates to the
18121 * HBA consistent with the SLI-4 interface spec. This routine
18122 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18123 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18126 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18127 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18130 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18133 uint16_t max_rpi, rpi_limit;
18134 uint16_t rpi_remaining, lrpi = 0;
18135 struct lpfc_rpi_hdr *rpi_hdr;
18136 unsigned long iflag;
18139 * Fetch the next logical rpi. Because this index is logical,
18140 * the driver starts at 0 each time.
18142 spin_lock_irqsave(&phba->hbalock, iflag);
18143 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18144 rpi_limit = phba->sli4_hba.next_rpi;
18146 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18147 if (rpi >= rpi_limit)
18148 rpi = LPFC_RPI_ALLOC_ERROR;
18150 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18151 phba->sli4_hba.max_cfg_param.rpi_used++;
18152 phba->sli4_hba.rpi_count++;
18154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18155 "0001 rpi:%x max:%x lim:%x\n",
18156 (int) rpi, max_rpi, rpi_limit);
18159 * Don't try to allocate more rpi header regions if the device limit
18160 * has been exhausted.
18162 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18163 (phba->sli4_hba.rpi_count >= max_rpi)) {
18164 spin_unlock_irqrestore(&phba->hbalock, iflag);
18169 * RPI header postings are not required for SLI4 ports capable of
18172 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18173 spin_unlock_irqrestore(&phba->hbalock, iflag);
18178 * If the driver is running low on rpi resources, allocate another
18179 * page now. Note that the next_rpi value is used because
18180 * it represents how many are actually in use whereas max_rpi notes
18181 * how many are supported max by the device.
18183 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18184 spin_unlock_irqrestore(&phba->hbalock, iflag);
18185 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18186 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18189 "2002 Error Could not grow rpi "
18192 lrpi = rpi_hdr->start_rpi;
18193 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18194 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18202 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18203 * @phba: pointer to lpfc hba data structure.
18205 * This routine is invoked to release an rpi to the pool of
18206 * available rpis maintained by the driver.
18209 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18211 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18212 phba->sli4_hba.rpi_count--;
18213 phba->sli4_hba.max_cfg_param.rpi_used--;
18218 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18219 * @phba: pointer to lpfc hba data structure.
18221 * This routine is invoked to release an rpi to the pool of
18222 * available rpis maintained by the driver.
18225 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18227 spin_lock_irq(&phba->hbalock);
18228 __lpfc_sli4_free_rpi(phba, rpi);
18229 spin_unlock_irq(&phba->hbalock);
18233 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18234 * @phba: pointer to lpfc hba data structure.
18236 * This routine is invoked to remove the memory region that
18237 * provided rpi via a bitmask.
18240 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18242 kfree(phba->sli4_hba.rpi_bmask);
18243 kfree(phba->sli4_hba.rpi_ids);
18244 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18248 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18249 * @phba: pointer to lpfc hba data structure.
18251 * This routine is invoked to remove the memory region that
18252 * provided rpi via a bitmask.
18255 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18256 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18258 LPFC_MBOXQ_t *mboxq;
18259 struct lpfc_hba *phba = ndlp->phba;
18262 /* The port is notified of the header region via a mailbox command. */
18263 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18267 /* Post all rpi memory regions to the port. */
18268 lpfc_resume_rpi(mboxq, ndlp);
18270 mboxq->mbox_cmpl = cmpl;
18271 mboxq->ctx_buf = arg;
18272 mboxq->ctx_ndlp = ndlp;
18274 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18275 mboxq->vport = ndlp->vport;
18276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18277 if (rc == MBX_NOT_FINISHED) {
18278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18279 "2010 Resume RPI Mailbox failed "
18280 "status %d, mbxStatus x%x\n", rc,
18281 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18282 mempool_free(mboxq, phba->mbox_mem_pool);
18289 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18290 * @vport: Pointer to the vport for which the vpi is being initialized
18292 * This routine is invoked to activate a vpi with the port.
18296 * -Evalue otherwise
18299 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18301 LPFC_MBOXQ_t *mboxq;
18303 int retval = MBX_SUCCESS;
18305 struct lpfc_hba *phba = vport->phba;
18306 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18309 lpfc_init_vpi(phba, mboxq, vport->vpi);
18310 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18311 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18312 if (rc != MBX_SUCCESS) {
18313 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18314 "2022 INIT VPI Mailbox failed "
18315 "status %d, mbxStatus x%x\n", rc,
18316 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18319 if (rc != MBX_TIMEOUT)
18320 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18326 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18327 * @phba: pointer to lpfc hba data structure.
18328 * @mboxq: Pointer to mailbox object.
18330 * This routine is invoked to manually add a single FCF record. The caller
18331 * must pass a completely initialized FCF_Record. This routine takes
18332 * care of the nonembedded mailbox operations.
18335 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18338 union lpfc_sli4_cfg_shdr *shdr;
18339 uint32_t shdr_status, shdr_add_status;
18341 virt_addr = mboxq->sge_array->addr[0];
18342 /* The IOCTL status is embedded in the mailbox subheader. */
18343 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18344 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18345 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18347 if ((shdr_status || shdr_add_status) &&
18348 (shdr_status != STATUS_FCF_IN_USE))
18349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18350 "2558 ADD_FCF_RECORD mailbox failed with "
18351 "status x%x add_status x%x\n",
18352 shdr_status, shdr_add_status);
18354 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18358 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18359 * @phba: pointer to lpfc hba data structure.
18360 * @fcf_record: pointer to the initialized fcf record to add.
18362 * This routine is invoked to manually add a single FCF record. The caller
18363 * must pass a completely initialized FCF_Record. This routine takes
18364 * care of the nonembedded mailbox operations.
18367 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18370 LPFC_MBOXQ_t *mboxq;
18373 struct lpfc_mbx_sge sge;
18374 uint32_t alloc_len, req_len;
18377 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18380 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18384 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18387 /* Allocate DMA memory and set up the non-embedded mailbox command */
18388 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18389 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18390 req_len, LPFC_SLI4_MBX_NEMBED);
18391 if (alloc_len < req_len) {
18392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18393 "2523 Allocated DMA memory size (x%x) is "
18394 "less than the requested DMA memory "
18395 "size (x%x)\n", alloc_len, req_len);
18396 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18401 * Get the first SGE entry from the non-embedded DMA memory. This
18402 * routine only uses a single SGE.
18404 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18405 virt_addr = mboxq->sge_array->addr[0];
18407 * Configure the FCF record for FCFI 0. This is the driver's
18408 * hardcoded default and gets used in nonFIP mode.
18410 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18411 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18412 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18415 * Copy the fcf_index and the FCF Record Data. The data starts after
18416 * the FCoE header plus word10. The data copy needs to be endian
18419 bytep += sizeof(uint32_t);
18420 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18421 mboxq->vport = phba->pport;
18422 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18424 if (rc == MBX_NOT_FINISHED) {
18425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18426 "2515 ADD_FCF_RECORD mailbox failed with "
18427 "status 0x%x\n", rc);
18428 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18437 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18438 * @phba: pointer to lpfc hba data structure.
18439 * @fcf_record: pointer to the fcf record to write the default data.
18440 * @fcf_index: FCF table entry index.
18442 * This routine is invoked to build the driver's default FCF record. The
18443 * values used are hardcoded. This routine handles memory initialization.
18447 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18448 struct fcf_record *fcf_record,
18449 uint16_t fcf_index)
18451 memset(fcf_record, 0, sizeof(struct fcf_record));
18452 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18453 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18454 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18455 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18456 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18457 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18458 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18459 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18460 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18461 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18462 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18463 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18464 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18465 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18466 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18467 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18468 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18469 /* Set the VLAN bit map */
18470 if (phba->valid_vlan) {
18471 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18472 = 1 << (phba->vlan_id % 8);
18477 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18478 * @phba: pointer to lpfc hba data structure.
18479 * @fcf_index: FCF table entry offset.
18481 * This routine is invoked to scan the entire FCF table by reading FCF
18482 * record and processing it one at a time starting from the @fcf_index
18483 * for initial FCF discovery or fast FCF failover rediscovery.
18485 * Return 0 if the mailbox command is submitted successfully, none 0
18489 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18492 LPFC_MBOXQ_t *mboxq;
18494 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18495 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18496 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18499 "2000 Failed to allocate mbox for "
18502 goto fail_fcf_scan;
18504 /* Construct the read FCF record mailbox command */
18505 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18508 goto fail_fcf_scan;
18510 /* Issue the mailbox command asynchronously */
18511 mboxq->vport = phba->pport;
18512 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18514 spin_lock_irq(&phba->hbalock);
18515 phba->hba_flag |= FCF_TS_INPROG;
18516 spin_unlock_irq(&phba->hbalock);
18518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18519 if (rc == MBX_NOT_FINISHED)
18522 /* Reset eligible FCF count for new scan */
18523 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18524 phba->fcf.eligible_fcf_cnt = 0;
18530 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18531 /* FCF scan failed, clear FCF_TS_INPROG flag */
18532 spin_lock_irq(&phba->hbalock);
18533 phba->hba_flag &= ~FCF_TS_INPROG;
18534 spin_unlock_irq(&phba->hbalock);
18540 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18541 * @phba: pointer to lpfc hba data structure.
18542 * @fcf_index: FCF table entry offset.
18544 * This routine is invoked to read an FCF record indicated by @fcf_index
18545 * and to use it for FLOGI roundrobin FCF failover.
18547 * Return 0 if the mailbox command is submitted successfully, none 0
18551 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18554 LPFC_MBOXQ_t *mboxq;
18556 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18558 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18559 "2763 Failed to allocate mbox for "
18562 goto fail_fcf_read;
18564 /* Construct the read FCF record mailbox command */
18565 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18568 goto fail_fcf_read;
18570 /* Issue the mailbox command asynchronously */
18571 mboxq->vport = phba->pport;
18572 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18573 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18574 if (rc == MBX_NOT_FINISHED)
18580 if (error && mboxq)
18581 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18586 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18587 * @phba: pointer to lpfc hba data structure.
18588 * @fcf_index: FCF table entry offset.
18590 * This routine is invoked to read an FCF record indicated by @fcf_index to
18591 * determine whether it's eligible for FLOGI roundrobin failover list.
18593 * Return 0 if the mailbox command is submitted successfully, none 0
18597 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18600 LPFC_MBOXQ_t *mboxq;
18602 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18604 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18605 "2758 Failed to allocate mbox for "
18608 goto fail_fcf_read;
18610 /* Construct the read FCF record mailbox command */
18611 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18614 goto fail_fcf_read;
18616 /* Issue the mailbox command asynchronously */
18617 mboxq->vport = phba->pport;
18618 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18619 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18620 if (rc == MBX_NOT_FINISHED)
18626 if (error && mboxq)
18627 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18632 * lpfc_check_next_fcf_pri_level
18633 * phba pointer to the lpfc_hba struct for this port.
18634 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18635 * routine when the rr_bmask is empty. The FCF indecies are put into the
18636 * rr_bmask based on their priority level. Starting from the highest priority
18637 * to the lowest. The most likely FCF candidate will be in the highest
18638 * priority group. When this routine is called it searches the fcf_pri list for
18639 * next lowest priority group and repopulates the rr_bmask with only those
18642 * 1=success 0=failure
18645 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18647 uint16_t next_fcf_pri;
18648 uint16_t last_index;
18649 struct lpfc_fcf_pri *fcf_pri;
18653 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18654 LPFC_SLI4_FCF_TBL_INDX_MAX);
18655 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18656 "3060 Last IDX %d\n", last_index);
18658 /* Verify the priority list has 2 or more entries */
18659 spin_lock_irq(&phba->hbalock);
18660 if (list_empty(&phba->fcf.fcf_pri_list) ||
18661 list_is_singular(&phba->fcf.fcf_pri_list)) {
18662 spin_unlock_irq(&phba->hbalock);
18663 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18664 "3061 Last IDX %d\n", last_index);
18665 return 0; /* Empty rr list */
18667 spin_unlock_irq(&phba->hbalock);
18671 * Clear the rr_bmask and set all of the bits that are at this
18674 memset(phba->fcf.fcf_rr_bmask, 0,
18675 sizeof(*phba->fcf.fcf_rr_bmask));
18676 spin_lock_irq(&phba->hbalock);
18677 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18678 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18681 * the 1st priority that has not FLOGI failed
18682 * will be the highest.
18685 next_fcf_pri = fcf_pri->fcf_rec.priority;
18686 spin_unlock_irq(&phba->hbalock);
18687 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18688 rc = lpfc_sli4_fcf_rr_index_set(phba,
18689 fcf_pri->fcf_rec.fcf_index);
18693 spin_lock_irq(&phba->hbalock);
18696 * if next_fcf_pri was not set above and the list is not empty then
18697 * we have failed flogis on all of them. So reset flogi failed
18698 * and start at the beginning.
18700 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18701 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18702 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18704 * the 1st priority that has not FLOGI failed
18705 * will be the highest.
18708 next_fcf_pri = fcf_pri->fcf_rec.priority;
18709 spin_unlock_irq(&phba->hbalock);
18710 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18711 rc = lpfc_sli4_fcf_rr_index_set(phba,
18712 fcf_pri->fcf_rec.fcf_index);
18716 spin_lock_irq(&phba->hbalock);
18720 spin_unlock_irq(&phba->hbalock);
18725 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18726 * @phba: pointer to lpfc hba data structure.
18728 * This routine is to get the next eligible FCF record index in a round
18729 * robin fashion. If the next eligible FCF record index equals to the
18730 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18731 * shall be returned, otherwise, the next eligible FCF record's index
18732 * shall be returned.
18735 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18737 uint16_t next_fcf_index;
18740 /* Search start from next bit of currently registered FCF index */
18741 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18744 /* Determine the next fcf index to check */
18745 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18746 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18747 LPFC_SLI4_FCF_TBL_INDX_MAX,
18750 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18751 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18753 * If we have wrapped then we need to clear the bits that
18754 * have been tested so that we can detect when we should
18755 * change the priority level.
18757 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18758 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18762 /* Check roundrobin failover list empty condition */
18763 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18764 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18766 * If next fcf index is not found check if there are lower
18767 * Priority level fcf's in the fcf_priority list.
18768 * Set up the rr_bmask with all of the avaiable fcf bits
18769 * at that level and continue the selection process.
18771 if (lpfc_check_next_fcf_pri_level(phba))
18772 goto initial_priority;
18773 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18774 "2844 No roundrobin failover FCF available\n");
18776 return LPFC_FCOE_FCF_NEXT_NONE;
18779 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18780 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18781 LPFC_FCF_FLOGI_FAILED) {
18782 if (list_is_singular(&phba->fcf.fcf_pri_list))
18783 return LPFC_FCOE_FCF_NEXT_NONE;
18785 goto next_priority;
18788 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18789 "2845 Get next roundrobin failover FCF (x%x)\n",
18792 return next_fcf_index;
18796 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18797 * @phba: pointer to lpfc hba data structure.
18799 * This routine sets the FCF record index in to the eligible bmask for
18800 * roundrobin failover search. It checks to make sure that the index
18801 * does not go beyond the range of the driver allocated bmask dimension
18802 * before setting the bit.
18804 * Returns 0 if the index bit successfully set, otherwise, it returns
18808 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18810 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18811 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18812 "2610 FCF (x%x) reached driver's book "
18813 "keeping dimension:x%x\n",
18814 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18817 /* Set the eligible FCF record index bmask */
18818 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18820 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18821 "2790 Set FCF (x%x) to roundrobin FCF failover "
18822 "bmask\n", fcf_index);
18828 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18829 * @phba: pointer to lpfc hba data structure.
18831 * This routine clears the FCF record index from the eligible bmask for
18832 * roundrobin failover search. It checks to make sure that the index
18833 * does not go beyond the range of the driver allocated bmask dimension
18834 * before clearing the bit.
18837 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18839 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18840 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18841 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18842 "2762 FCF (x%x) reached driver's book "
18843 "keeping dimension:x%x\n",
18844 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18847 /* Clear the eligible FCF record index bmask */
18848 spin_lock_irq(&phba->hbalock);
18849 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18851 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18852 list_del_init(&fcf_pri->list);
18856 spin_unlock_irq(&phba->hbalock);
18857 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18859 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18860 "2791 Clear FCF (x%x) from roundrobin failover "
18861 "bmask\n", fcf_index);
18865 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18866 * @phba: pointer to lpfc hba data structure.
18868 * This routine is the completion routine for the rediscover FCF table mailbox
18869 * command. If the mailbox command returned failure, it will try to stop the
18870 * FCF rediscover wait timer.
18873 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18875 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18876 uint32_t shdr_status, shdr_add_status;
18878 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18880 shdr_status = bf_get(lpfc_mbox_hdr_status,
18881 &redisc_fcf->header.cfg_shdr.response);
18882 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18883 &redisc_fcf->header.cfg_shdr.response);
18884 if (shdr_status || shdr_add_status) {
18885 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18886 "2746 Requesting for FCF rediscovery failed "
18887 "status x%x add_status x%x\n",
18888 shdr_status, shdr_add_status);
18889 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18890 spin_lock_irq(&phba->hbalock);
18891 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18892 spin_unlock_irq(&phba->hbalock);
18894 * CVL event triggered FCF rediscover request failed,
18895 * last resort to re-try current registered FCF entry.
18897 lpfc_retry_pport_discovery(phba);
18899 spin_lock_irq(&phba->hbalock);
18900 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18901 spin_unlock_irq(&phba->hbalock);
18903 * DEAD FCF event triggered FCF rediscover request
18904 * failed, last resort to fail over as a link down
18905 * to FCF registration.
18907 lpfc_sli4_fcf_dead_failthrough(phba);
18910 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18911 "2775 Start FCF rediscover quiescent timer\n");
18913 * Start FCF rediscovery wait timer for pending FCF
18914 * before rescan FCF record table.
18916 lpfc_fcf_redisc_wait_start_timer(phba);
18919 mempool_free(mbox, phba->mbox_mem_pool);
18923 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18924 * @phba: pointer to lpfc hba data structure.
18926 * This routine is invoked to request for rediscovery of the entire FCF table
18930 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18932 LPFC_MBOXQ_t *mbox;
18933 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18936 /* Cancel retry delay timers to all vports before FCF rediscover */
18937 lpfc_cancel_all_vport_retry_delay_timer(phba);
18939 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18942 "2745 Failed to allocate mbox for "
18943 "requesting FCF rediscover.\n");
18947 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18948 sizeof(struct lpfc_sli4_cfg_mhdr));
18949 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18950 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18951 length, LPFC_SLI4_MBX_EMBED);
18953 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18954 /* Set count to 0 for invalidating the entire FCF database */
18955 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18957 /* Issue the mailbox command asynchronously */
18958 mbox->vport = phba->pport;
18959 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18960 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18962 if (rc == MBX_NOT_FINISHED) {
18963 mempool_free(mbox, phba->mbox_mem_pool);
18970 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18971 * @phba: pointer to lpfc hba data structure.
18973 * This function is the failover routine as a last resort to the FCF DEAD
18974 * event when driver failed to perform fast FCF failover.
18977 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18979 uint32_t link_state;
18982 * Last resort as FCF DEAD event failover will treat this as
18983 * a link down, but save the link state because we don't want
18984 * it to be changed to Link Down unless it is already down.
18986 link_state = phba->link_state;
18987 lpfc_linkdown(phba);
18988 phba->link_state = link_state;
18990 /* Unregister FCF if no devices connected to it */
18991 lpfc_unregister_unused_fcf(phba);
18995 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18996 * @phba: pointer to lpfc hba data structure.
18997 * @rgn23_data: pointer to configure region 23 data.
18999 * This function gets SLI3 port configure region 23 data through memory dump
19000 * mailbox command. When it successfully retrieves data, the size of the data
19001 * will be returned, otherwise, 0 will be returned.
19004 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19006 LPFC_MBOXQ_t *pmb = NULL;
19008 uint32_t offset = 0;
19014 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19017 "2600 failed to allocate mailbox memory\n");
19023 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19024 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19026 if (rc != MBX_SUCCESS) {
19027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19028 "2601 failed to read config "
19029 "region 23, rc 0x%x Status 0x%x\n",
19030 rc, mb->mbxStatus);
19031 mb->un.varDmp.word_cnt = 0;
19034 * dump mem may return a zero when finished or we got a
19035 * mailbox error, either way we are done.
19037 if (mb->un.varDmp.word_cnt == 0)
19039 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19040 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19042 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19043 rgn23_data + offset,
19044 mb->un.varDmp.word_cnt);
19045 offset += mb->un.varDmp.word_cnt;
19046 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19048 mempool_free(pmb, phba->mbox_mem_pool);
19053 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19054 * @phba: pointer to lpfc hba data structure.
19055 * @rgn23_data: pointer to configure region 23 data.
19057 * This function gets SLI4 port configure region 23 data through memory dump
19058 * mailbox command. When it successfully retrieves data, the size of the data
19059 * will be returned, otherwise, 0 will be returned.
19062 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19064 LPFC_MBOXQ_t *mboxq = NULL;
19065 struct lpfc_dmabuf *mp = NULL;
19066 struct lpfc_mqe *mqe;
19067 uint32_t data_length = 0;
19073 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19076 "3105 failed to allocate mailbox memory\n");
19080 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19082 mqe = &mboxq->u.mqe;
19083 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19084 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19087 data_length = mqe->un.mb_words[5];
19088 if (data_length == 0)
19090 if (data_length > DMP_RGN23_SIZE) {
19094 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19096 mempool_free(mboxq, phba->mbox_mem_pool);
19098 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19101 return data_length;
19105 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19106 * @phba: pointer to lpfc hba data structure.
19108 * This function read region 23 and parse TLV for port status to
19109 * decide if the user disaled the port. If the TLV indicates the
19110 * port is disabled, the hba_flag is set accordingly.
19113 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19115 uint8_t *rgn23_data = NULL;
19116 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19117 uint32_t offset = 0;
19119 /* Get adapter Region 23 data */
19120 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19124 if (phba->sli_rev < LPFC_SLI_REV4)
19125 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19127 if_type = bf_get(lpfc_sli_intf_if_type,
19128 &phba->sli4_hba.sli_intf);
19129 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19131 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19137 /* Check the region signature first */
19138 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19140 "2619 Config region 23 has bad signature\n");
19145 /* Check the data structure version */
19146 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19148 "2620 Config region 23 has bad version\n");
19153 /* Parse TLV entries in the region */
19154 while (offset < data_size) {
19155 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19158 * If the TLV is not driver specific TLV or driver id is
19159 * not linux driver id, skip the record.
19161 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19162 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19163 (rgn23_data[offset + 3] != 0)) {
19164 offset += rgn23_data[offset + 1] * 4 + 4;
19168 /* Driver found a driver specific TLV in the config region */
19169 sub_tlv_len = rgn23_data[offset + 1] * 4;
19174 * Search for configured port state sub-TLV.
19176 while ((offset < data_size) &&
19177 (tlv_offset < sub_tlv_len)) {
19178 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19183 if (rgn23_data[offset] != PORT_STE_TYPE) {
19184 offset += rgn23_data[offset + 1] * 4 + 4;
19185 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19189 /* This HBA contains PORT_STE configured */
19190 if (!rgn23_data[offset + 2])
19191 phba->hba_flag |= LINK_DISABLED;
19203 * lpfc_wr_object - write an object to the firmware
19204 * @phba: HBA structure that indicates port to create a queue on.
19205 * @dmabuf_list: list of dmabufs to write to the port.
19206 * @size: the total byte value of the objects to write to the port.
19207 * @offset: the current offset to be used to start the transfer.
19209 * This routine will create a wr_object mailbox command to send to the port.
19210 * the mailbox command will be constructed using the dma buffers described in
19211 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19212 * BDEs that the imbedded mailbox can support. The @offset variable will be
19213 * used to indicate the starting offset of the transfer and will also return
19214 * the offset after the write object mailbox has completed. @size is used to
19215 * determine the end of the object and whether the eof bit should be set.
19217 * Return 0 is successful and offset will contain the the new offset to use
19218 * for the next write.
19219 * Return negative value for error cases.
19222 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19223 uint32_t size, uint32_t *offset)
19225 struct lpfc_mbx_wr_object *wr_object;
19226 LPFC_MBOXQ_t *mbox;
19228 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19230 struct lpfc_dmabuf *dmabuf;
19231 uint32_t written = 0;
19232 bool check_change_status = false;
19234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19238 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19239 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19240 sizeof(struct lpfc_mbx_wr_object) -
19241 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19243 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19244 wr_object->u.request.write_offset = *offset;
19245 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19246 wr_object->u.request.object_name[0] =
19247 cpu_to_le32(wr_object->u.request.object_name[0]);
19248 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19249 list_for_each_entry(dmabuf, dmabuf_list, list) {
19250 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19252 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19253 wr_object->u.request.bde[i].addrHigh =
19254 putPaddrHigh(dmabuf->phys);
19255 if (written + SLI4_PAGE_SIZE >= size) {
19256 wr_object->u.request.bde[i].tus.f.bdeSize =
19258 written += (size - written);
19259 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19260 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19261 check_change_status = true;
19263 wr_object->u.request.bde[i].tus.f.bdeSize =
19265 written += SLI4_PAGE_SIZE;
19269 wr_object->u.request.bde_count = i;
19270 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19271 if (!phba->sli4_hba.intr_enable)
19272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19274 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19275 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19277 /* The IOCTL status is embedded in the mailbox subheader. */
19278 shdr_status = bf_get(lpfc_mbox_hdr_status,
19279 &wr_object->header.cfg_shdr.response);
19280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19281 &wr_object->header.cfg_shdr.response);
19282 if (check_change_status) {
19283 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19284 &wr_object->u.response);
19285 switch (shdr_change_status) {
19286 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19288 "3198 Firmware write complete: System "
19289 "reboot required to instantiate\n");
19291 case (LPFC_CHANGE_STATUS_FW_RESET):
19292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19293 "3199 Firmware write complete: Firmware"
19294 " reset required to instantiate\n");
19296 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19298 "3200 Firmware write complete: Port "
19299 "Migration or PCI Reset required to "
19302 case (LPFC_CHANGE_STATUS_PCI_RESET):
19303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19304 "3201 Firmware write complete: PCI "
19305 "Reset required to instantiate\n");
19311 if (rc != MBX_TIMEOUT)
19312 mempool_free(mbox, phba->mbox_mem_pool);
19313 if (shdr_status || shdr_add_status || rc) {
19314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19315 "3025 Write Object mailbox failed with "
19316 "status x%x add_status x%x, mbx status x%x\n",
19317 shdr_status, shdr_add_status, rc);
19319 *offset = shdr_add_status;
19321 *offset += wr_object->u.response.actual_write_length;
19326 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19327 * @vport: pointer to vport data structure.
19329 * This function iterate through the mailboxq and clean up all REG_LOGIN
19330 * and REG_VPI mailbox commands associated with the vport. This function
19331 * is called when driver want to restart discovery of the vport due to
19332 * a Clear Virtual Link event.
19335 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19337 struct lpfc_hba *phba = vport->phba;
19338 LPFC_MBOXQ_t *mb, *nextmb;
19339 struct lpfc_dmabuf *mp;
19340 struct lpfc_nodelist *ndlp;
19341 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19342 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19343 LIST_HEAD(mbox_cmd_list);
19344 uint8_t restart_loop;
19346 /* Clean up internally queued mailbox commands with the vport */
19347 spin_lock_irq(&phba->hbalock);
19348 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19349 if (mb->vport != vport)
19352 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19353 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19356 list_del(&mb->list);
19357 list_add_tail(&mb->list, &mbox_cmd_list);
19359 /* Clean up active mailbox command with the vport */
19360 mb = phba->sli.mbox_active;
19361 if (mb && (mb->vport == vport)) {
19362 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19363 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19364 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19365 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19366 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19367 /* Put reference count for delayed processing */
19368 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19369 /* Unregister the RPI when mailbox complete */
19370 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19373 /* Cleanup any mailbox completions which are not yet processed */
19376 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19378 * If this mailox is already processed or it is
19379 * for another vport ignore it.
19381 if ((mb->vport != vport) ||
19382 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19385 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19386 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19389 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19390 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19391 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19392 /* Unregister the RPI when mailbox complete */
19393 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19395 spin_unlock_irq(&phba->hbalock);
19396 spin_lock(shost->host_lock);
19397 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19398 spin_unlock(shost->host_lock);
19399 spin_lock_irq(&phba->hbalock);
19403 } while (restart_loop);
19405 spin_unlock_irq(&phba->hbalock);
19407 /* Release the cleaned-up mailbox commands */
19408 while (!list_empty(&mbox_cmd_list)) {
19409 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19410 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19411 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19413 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19416 mb->ctx_buf = NULL;
19417 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19418 mb->ctx_ndlp = NULL;
19420 spin_lock(shost->host_lock);
19421 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19422 spin_unlock(shost->host_lock);
19423 lpfc_nlp_put(ndlp);
19426 mempool_free(mb, phba->mbox_mem_pool);
19429 /* Release the ndlp with the cleaned-up active mailbox command */
19430 if (act_mbx_ndlp) {
19431 spin_lock(shost->host_lock);
19432 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19433 spin_unlock(shost->host_lock);
19434 lpfc_nlp_put(act_mbx_ndlp);
19439 * lpfc_drain_txq - Drain the txq
19440 * @phba: Pointer to HBA context object.
19442 * This function attempt to submit IOCBs on the txq
19443 * to the adapter. For SLI4 adapters, the txq contains
19444 * ELS IOCBs that have been deferred because the there
19445 * are no SGLs. This congestion can occur with large
19446 * vport counts during node discovery.
19450 lpfc_drain_txq(struct lpfc_hba *phba)
19452 LIST_HEAD(completions);
19453 struct lpfc_sli_ring *pring;
19454 struct lpfc_iocbq *piocbq = NULL;
19455 unsigned long iflags = 0;
19456 char *fail_msg = NULL;
19457 struct lpfc_sglq *sglq;
19458 union lpfc_wqe128 wqe;
19459 uint32_t txq_cnt = 0;
19460 struct lpfc_queue *wq;
19462 if (phba->link_flag & LS_MDS_LOOPBACK) {
19463 /* MDS WQE are posted only to first WQ*/
19464 wq = phba->sli4_hba.hdwq[0].fcp_wq;
19469 wq = phba->sli4_hba.els_wq;
19472 pring = lpfc_phba_elsring(phba);
19475 if (unlikely(!pring) || list_empty(&pring->txq))
19478 spin_lock_irqsave(&pring->ring_lock, iflags);
19479 list_for_each_entry(piocbq, &pring->txq, list) {
19483 if (txq_cnt > pring->txq_max)
19484 pring->txq_max = txq_cnt;
19486 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19488 while (!list_empty(&pring->txq)) {
19489 spin_lock_irqsave(&pring->ring_lock, iflags);
19491 piocbq = lpfc_sli_ringtx_get(phba, pring);
19493 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19495 "2823 txq empty and txq_cnt is %d\n ",
19499 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19501 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19502 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19507 /* The xri and iocb resources secured,
19508 * attempt to issue request
19510 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19511 piocbq->sli4_xritag = sglq->sli4_xritag;
19512 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19513 fail_msg = "to convert bpl to sgl";
19514 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19515 fail_msg = "to convert iocb to wqe";
19516 else if (lpfc_sli4_wq_put(wq, &wqe))
19517 fail_msg = " - Wq is full";
19519 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19522 /* Failed means we can't issue and need to cancel */
19523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19524 "2822 IOCB failed %s iotag 0x%x "
19527 piocbq->iotag, piocbq->sli4_xritag);
19528 list_add_tail(&piocbq->list, &completions);
19530 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19533 /* Cancel all the IOCBs that cannot be issued */
19534 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19535 IOERR_SLI_ABORTED);
19541 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19542 * @phba: Pointer to HBA context object.
19543 * @pwqe: Pointer to command WQE.
19544 * @sglq: Pointer to the scatter gather queue object.
19546 * This routine converts the bpl or bde that is in the WQE
19547 * to a sgl list for the sli4 hardware. The physical address
19548 * of the bpl/bde is converted back to a virtual address.
19549 * If the WQE contains a BPL then the list of BDE's is
19550 * converted to sli4_sge's. If the WQE contains a single
19551 * BDE then it is converted to a single sli_sge.
19552 * The WQE is still in cpu endianness so the contents of
19553 * the bpl can be used without byte swapping.
19555 * Returns valid XRI = Success, NO_XRI = Failure.
19558 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19559 struct lpfc_sglq *sglq)
19561 uint16_t xritag = NO_XRI;
19562 struct ulp_bde64 *bpl = NULL;
19563 struct ulp_bde64 bde;
19564 struct sli4_sge *sgl = NULL;
19565 struct lpfc_dmabuf *dmabuf;
19566 union lpfc_wqe128 *wqe;
19569 uint32_t offset = 0; /* accumulated offset in the sg request list */
19570 int inbound = 0; /* number of sg reply entries inbound from firmware */
19573 if (!pwqeq || !sglq)
19576 sgl = (struct sli4_sge *)sglq->sgl;
19578 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19580 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19581 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19582 return sglq->sli4_xritag;
19583 numBdes = pwqeq->rsvd2;
19585 /* The addrHigh and addrLow fields within the WQE
19586 * have not been byteswapped yet so there is no
19587 * need to swap them back.
19589 if (pwqeq->context3)
19590 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19594 bpl = (struct ulp_bde64 *)dmabuf->virt;
19598 for (i = 0; i < numBdes; i++) {
19599 /* Should already be byte swapped. */
19600 sgl->addr_hi = bpl->addrHigh;
19601 sgl->addr_lo = bpl->addrLow;
19603 sgl->word2 = le32_to_cpu(sgl->word2);
19604 if ((i+1) == numBdes)
19605 bf_set(lpfc_sli4_sge_last, sgl, 1);
19607 bf_set(lpfc_sli4_sge_last, sgl, 0);
19608 /* swap the size field back to the cpu so we
19609 * can assign it to the sgl.
19611 bde.tus.w = le32_to_cpu(bpl->tus.w);
19612 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19613 /* The offsets in the sgl need to be accumulated
19614 * separately for the request and reply lists.
19615 * The request is always first, the reply follows.
19618 case CMD_GEN_REQUEST64_WQE:
19619 /* add up the reply sg entries */
19620 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19622 /* first inbound? reset the offset */
19625 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19626 bf_set(lpfc_sli4_sge_type, sgl,
19627 LPFC_SGE_TYPE_DATA);
19628 offset += bde.tus.f.bdeSize;
19630 case CMD_FCP_TRSP64_WQE:
19631 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19632 bf_set(lpfc_sli4_sge_type, sgl,
19633 LPFC_SGE_TYPE_DATA);
19635 case CMD_FCP_TSEND64_WQE:
19636 case CMD_FCP_TRECEIVE64_WQE:
19637 bf_set(lpfc_sli4_sge_type, sgl,
19638 bpl->tus.f.bdeFlags);
19642 offset += bde.tus.f.bdeSize;
19643 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19646 sgl->word2 = cpu_to_le32(sgl->word2);
19650 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19651 /* The addrHigh and addrLow fields of the BDE have not
19652 * been byteswapped yet so they need to be swapped
19653 * before putting them in the sgl.
19655 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19656 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19657 sgl->word2 = le32_to_cpu(sgl->word2);
19658 bf_set(lpfc_sli4_sge_last, sgl, 1);
19659 sgl->word2 = cpu_to_le32(sgl->word2);
19660 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19662 return sglq->sli4_xritag;
19666 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19667 * @phba: Pointer to HBA context object.
19668 * @ring_number: Base sli ring number
19669 * @pwqe: Pointer to command WQE.
19672 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19673 struct lpfc_iocbq *pwqe)
19675 union lpfc_wqe128 *wqe = &pwqe->wqe;
19676 struct lpfc_nvmet_rcv_ctx *ctxp;
19677 struct lpfc_queue *wq;
19678 struct lpfc_sglq *sglq;
19679 struct lpfc_sli_ring *pring;
19680 unsigned long iflags;
19683 /* NVME_LS and NVME_LS ABTS requests. */
19684 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19685 pring = phba->sli4_hba.nvmels_wq->pring;
19686 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19688 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19690 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19693 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19694 pwqe->sli4_xritag = sglq->sli4_xritag;
19695 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19696 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19699 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19700 pwqe->sli4_xritag);
19701 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19703 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19707 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19708 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19712 /* NVME_FCREQ and NVME_ABTS requests */
19713 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19714 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19718 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19720 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19722 ret = lpfc_sli4_wq_put(wq, wqe);
19724 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19727 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19728 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19732 /* NVMET requests */
19733 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19734 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19738 ctxp = pwqe->context2;
19739 sglq = ctxp->ctxbuf->sglq;
19740 if (pwqe->sli4_xritag == NO_XRI) {
19741 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19742 pwqe->sli4_xritag = sglq->sli4_xritag;
19744 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19745 pwqe->sli4_xritag);
19746 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19748 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19750 ret = lpfc_sli4_wq_put(wq, wqe);
19752 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19755 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19756 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19762 #ifdef LPFC_MXP_STAT
19764 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19765 * @phba: pointer to lpfc hba data structure.
19766 * @hwqid: belong to which HWQ.
19768 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19769 * 15 seconds after a test case is running.
19771 * The user should call lpfc_debugfs_multixripools_write before running a test
19772 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19773 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19774 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19775 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19777 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19779 struct lpfc_sli4_hdw_queue *qp;
19780 struct lpfc_multixri_pool *multixri_pool;
19781 struct lpfc_pvt_pool *pvt_pool;
19782 struct lpfc_pbl_pool *pbl_pool;
19785 qp = &phba->sli4_hba.hdwq[hwqid];
19786 multixri_pool = qp->p_multixri_pool;
19787 if (!multixri_pool)
19790 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19791 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19792 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19793 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19795 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19797 multixri_pool->stat_pbl_count = pbl_pool->count;
19798 multixri_pool->stat_pvt_count = pvt_pool->count;
19799 multixri_pool->stat_busy_count = txcmplq_cnt;
19802 multixri_pool->stat_snapshot_taken++;
19807 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19808 * @phba: pointer to lpfc hba data structure.
19809 * @hwqid: belong to which HWQ.
19811 * This routine moves some XRIs from private to public pool when private pool
19814 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19816 struct lpfc_multixri_pool *multixri_pool;
19818 u32 prev_io_req_count;
19820 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19821 if (!multixri_pool)
19823 io_req_count = multixri_pool->io_req_count;
19824 prev_io_req_count = multixri_pool->prev_io_req_count;
19826 if (prev_io_req_count != io_req_count) {
19827 /* Private pool is busy */
19828 multixri_pool->prev_io_req_count = io_req_count;
19830 /* Private pool is not busy.
19831 * Move XRIs from private to public pool.
19833 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19838 * lpfc_adjust_high_watermark - Adjust high watermark
19839 * @phba: pointer to lpfc hba data structure.
19840 * @hwqid: belong to which HWQ.
19842 * This routine sets high watermark as number of outstanding XRIs,
19843 * but make sure the new value is between xri_limit/2 and xri_limit.
19845 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19853 struct lpfc_multixri_pool *multixri_pool;
19854 struct lpfc_sli4_hdw_queue *qp;
19856 qp = &phba->sli4_hba.hdwq[hwqid];
19857 multixri_pool = qp->p_multixri_pool;
19858 if (!multixri_pool)
19860 xri_limit = multixri_pool->xri_limit;
19862 watermark_max = xri_limit;
19863 watermark_min = xri_limit / 2;
19865 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19866 abts_io_bufs = qp->abts_scsi_io_bufs;
19868 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19869 abts_io_bufs += qp->abts_nvme_io_bufs;
19872 new_watermark = txcmplq_cnt + abts_io_bufs;
19873 new_watermark = min(watermark_max, new_watermark);
19874 new_watermark = max(watermark_min, new_watermark);
19875 multixri_pool->pvt_pool.high_watermark = new_watermark;
19877 #ifdef LPFC_MXP_STAT
19878 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19884 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
19885 * @phba: pointer to lpfc hba data structure.
19886 * @hwqid: belong to which HWQ.
19888 * This routine is called from hearbeat timer when pvt_pool is idle.
19889 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
19890 * The first step moves (all - low_watermark) amount of XRIs.
19891 * The second step moves the rest of XRIs.
19893 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19895 struct lpfc_pbl_pool *pbl_pool;
19896 struct lpfc_pvt_pool *pvt_pool;
19897 struct lpfc_sli4_hdw_queue *qp;
19898 struct lpfc_io_buf *lpfc_ncmd;
19899 struct lpfc_io_buf *lpfc_ncmd_next;
19900 unsigned long iflag;
19901 struct list_head tmp_list;
19904 qp = &phba->sli4_hba.hdwq[hwqid];
19905 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19906 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19909 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19910 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
19912 if (pvt_pool->count > pvt_pool->low_watermark) {
19913 /* Step 1: move (all - low_watermark) from pvt_pool
19917 /* Move low watermark of bufs from pvt_pool to tmp_list */
19918 INIT_LIST_HEAD(&tmp_list);
19919 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19920 &pvt_pool->list, list) {
19921 list_move_tail(&lpfc_ncmd->list, &tmp_list);
19923 if (tmp_count >= pvt_pool->low_watermark)
19927 /* Move all bufs from pvt_pool to pbl_pool */
19928 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19930 /* Move all bufs from tmp_list to pvt_pool */
19931 list_splice(&tmp_list, &pvt_pool->list);
19933 pbl_pool->count += (pvt_pool->count - tmp_count);
19934 pvt_pool->count = tmp_count;
19936 /* Step 2: move the rest from pvt_pool to pbl_pool */
19937 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19938 pbl_pool->count += pvt_pool->count;
19939 pvt_pool->count = 0;
19942 spin_unlock(&pvt_pool->lock);
19943 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19947 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
19948 * @phba: pointer to lpfc hba data structure
19949 * @pbl_pool: specified public free XRI pool
19950 * @pvt_pool: specified private free XRI pool
19951 * @count: number of XRIs to move
19953 * This routine tries to move some free common bufs from the specified pbl_pool
19954 * to the specified pvt_pool. It might move less than count XRIs if there's not
19955 * enough in public pool.
19958 * true - if XRIs are successfully moved from the specified pbl_pool to the
19959 * specified pvt_pool
19960 * false - if the specified pbl_pool is empty or locked by someone else
19963 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19964 struct lpfc_pbl_pool *pbl_pool,
19965 struct lpfc_pvt_pool *pvt_pool, u32 count)
19967 struct lpfc_io_buf *lpfc_ncmd;
19968 struct lpfc_io_buf *lpfc_ncmd_next;
19969 unsigned long iflag;
19972 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
19974 if (pbl_pool->count) {
19975 /* Move a batch of XRIs from public to private pool */
19976 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
19977 list_for_each_entry_safe(lpfc_ncmd,
19981 list_move_tail(&lpfc_ncmd->list,
19990 spin_unlock(&pvt_pool->lock);
19991 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19994 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20001 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20002 * @phba: pointer to lpfc hba data structure.
20003 * @hwqid: belong to which HWQ.
20004 * @count: number of XRIs to move
20006 * This routine tries to find some free common bufs in one of public pools with
20007 * Round Robin method. The search always starts from local hwqid, then the next
20008 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20009 * a batch of free common bufs are moved to private pool on hwqid.
20010 * It might move less than count XRIs if there's not enough in public pool.
20012 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20014 struct lpfc_multixri_pool *multixri_pool;
20015 struct lpfc_multixri_pool *next_multixri_pool;
20016 struct lpfc_pvt_pool *pvt_pool;
20017 struct lpfc_pbl_pool *pbl_pool;
20018 struct lpfc_sli4_hdw_queue *qp;
20023 qp = &phba->sli4_hba.hdwq[hwqid];
20024 multixri_pool = qp->p_multixri_pool;
20025 pvt_pool = &multixri_pool->pvt_pool;
20026 pbl_pool = &multixri_pool->pbl_pool;
20028 /* Check if local pbl_pool is available */
20029 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20031 #ifdef LPFC_MXP_STAT
20032 multixri_pool->local_pbl_hit_count++;
20037 hwq_count = phba->cfg_hdw_queue;
20039 /* Get the next hwqid which was found last time */
20040 next_hwqid = multixri_pool->rrb_next_hwqid;
20043 /* Go to next hwq */
20044 next_hwqid = (next_hwqid + 1) % hwq_count;
20046 next_multixri_pool =
20047 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20048 pbl_pool = &next_multixri_pool->pbl_pool;
20050 /* Check if the public free xri pool is available */
20051 ret = _lpfc_move_xri_pbl_to_pvt(
20052 phba, qp, pbl_pool, pvt_pool, count);
20054 /* Exit while-loop if success or all hwqid are checked */
20055 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20057 /* Starting point for the next time */
20058 multixri_pool->rrb_next_hwqid = next_hwqid;
20061 /* stats: all public pools are empty*/
20062 multixri_pool->pbl_empty_count++;
20065 #ifdef LPFC_MXP_STAT
20067 if (next_hwqid == hwqid)
20068 multixri_pool->local_pbl_hit_count++;
20070 multixri_pool->other_pbl_hit_count++;
20076 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20077 * @phba: pointer to lpfc hba data structure.
20078 * @qp: belong to which HWQ.
20080 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20083 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20085 struct lpfc_multixri_pool *multixri_pool;
20086 struct lpfc_pvt_pool *pvt_pool;
20088 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20089 pvt_pool = &multixri_pool->pvt_pool;
20091 if (pvt_pool->count < pvt_pool->low_watermark)
20092 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20096 * lpfc_release_io_buf - Return one IO buf back to free pool
20097 * @phba: pointer to lpfc hba data structure.
20098 * @lpfc_ncmd: IO buf to be returned.
20099 * @qp: belong to which HWQ.
20101 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20102 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20103 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20104 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20105 * lpfc_io_buf_list_put.
20107 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20108 struct lpfc_sli4_hdw_queue *qp)
20110 unsigned long iflag;
20111 struct lpfc_pbl_pool *pbl_pool;
20112 struct lpfc_pvt_pool *pvt_pool;
20113 struct lpfc_epd_pool *epd_pool;
20119 /* MUST zero fields if buffer is reused by another protocol */
20120 lpfc_ncmd->nvmeCmd = NULL;
20121 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20122 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20124 if (phba->cfg_xri_rebalancing) {
20125 if (lpfc_ncmd->expedite) {
20126 /* Return to expedite pool */
20127 epd_pool = &phba->epd_pool;
20128 spin_lock_irqsave(&epd_pool->lock, iflag);
20129 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20131 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20135 /* Avoid invalid access if an IO sneaks in and is being rejected
20136 * just _after_ xri pools are destroyed in lpfc_offline.
20137 * Nothing much can be done at this point.
20139 if (!qp->p_multixri_pool)
20142 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20143 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20145 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20146 abts_io_bufs = qp->abts_scsi_io_bufs;
20148 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20149 abts_io_bufs += qp->abts_nvme_io_bufs;
20152 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20153 xri_limit = qp->p_multixri_pool->xri_limit;
20155 #ifdef LPFC_MXP_STAT
20156 if (xri_owned <= xri_limit)
20157 qp->p_multixri_pool->below_limit_count++;
20159 qp->p_multixri_pool->above_limit_count++;
20162 /* XRI goes to either public or private free xri pool
20163 * based on watermark and xri_limit
20165 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20166 (xri_owned < xri_limit &&
20167 pvt_pool->count < pvt_pool->high_watermark)) {
20168 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20169 qp, free_pvt_pool);
20170 list_add_tail(&lpfc_ncmd->list,
20173 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20175 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20176 qp, free_pub_pool);
20177 list_add_tail(&lpfc_ncmd->list,
20180 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20183 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20185 list_add_tail(&lpfc_ncmd->list,
20186 &qp->lpfc_io_buf_list_put);
20188 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20194 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20195 * @phba: pointer to lpfc hba data structure.
20196 * @pvt_pool: pointer to private pool data structure.
20197 * @ndlp: pointer to lpfc nodelist data structure.
20199 * This routine tries to get one free IO buf from private pool.
20202 * pointer to one free IO buf - if private pool is not empty
20203 * NULL - if private pool is empty
20205 static struct lpfc_io_buf *
20206 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20207 struct lpfc_sli4_hdw_queue *qp,
20208 struct lpfc_pvt_pool *pvt_pool,
20209 struct lpfc_nodelist *ndlp)
20211 struct lpfc_io_buf *lpfc_ncmd;
20212 struct lpfc_io_buf *lpfc_ncmd_next;
20213 unsigned long iflag;
20215 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20216 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20217 &pvt_pool->list, list) {
20218 if (lpfc_test_rrq_active(
20219 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20221 list_del(&lpfc_ncmd->list);
20223 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20226 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20232 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20233 * @phba: pointer to lpfc hba data structure.
20235 * This routine tries to get one free IO buf from expedite pool.
20238 * pointer to one free IO buf - if expedite pool is not empty
20239 * NULL - if expedite pool is empty
20241 static struct lpfc_io_buf *
20242 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20244 struct lpfc_io_buf *lpfc_ncmd;
20245 struct lpfc_io_buf *lpfc_ncmd_next;
20246 unsigned long iflag;
20247 struct lpfc_epd_pool *epd_pool;
20249 epd_pool = &phba->epd_pool;
20252 spin_lock_irqsave(&epd_pool->lock, iflag);
20253 if (epd_pool->count > 0) {
20254 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20255 &epd_pool->list, list) {
20256 list_del(&lpfc_ncmd->list);
20261 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20267 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20268 * @phba: pointer to lpfc hba data structure.
20269 * @ndlp: pointer to lpfc nodelist data structure.
20270 * @hwqid: belong to which HWQ
20271 * @expedite: 1 means this request is urgent.
20273 * This routine will do the following actions and then return a pointer to
20276 * 1. If private free xri count is empty, move some XRIs from public to
20278 * 2. Get one XRI from private free xri pool.
20279 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20280 * get one free xri from expedite pool.
20282 * Note: ndlp is only used on SCSI side for RRQ testing.
20283 * The caller should pass NULL for ndlp on NVME side.
20286 * pointer to one free IO buf - if private pool is not empty
20287 * NULL - if private pool is empty
20289 static struct lpfc_io_buf *
20290 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20291 struct lpfc_nodelist *ndlp,
20292 int hwqid, int expedite)
20294 struct lpfc_sli4_hdw_queue *qp;
20295 struct lpfc_multixri_pool *multixri_pool;
20296 struct lpfc_pvt_pool *pvt_pool;
20297 struct lpfc_io_buf *lpfc_ncmd;
20299 qp = &phba->sli4_hba.hdwq[hwqid];
20301 multixri_pool = qp->p_multixri_pool;
20302 pvt_pool = &multixri_pool->pvt_pool;
20303 multixri_pool->io_req_count++;
20305 /* If pvt_pool is empty, move some XRIs from public to private pool */
20306 if (pvt_pool->count == 0)
20307 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20309 /* Get one XRI from private free xri pool */
20310 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20313 lpfc_ncmd->hdwq = qp;
20314 lpfc_ncmd->hdwq_no = hwqid;
20315 } else if (expedite) {
20316 /* If we fail to get one from pvt_pool and this is an expedite
20317 * request, get one free xri from expedite pool.
20319 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20325 static inline struct lpfc_io_buf *
20326 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20328 struct lpfc_sli4_hdw_queue *qp;
20329 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20331 qp = &phba->sli4_hba.hdwq[idx];
20332 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20333 &qp->lpfc_io_buf_list_get, list) {
20334 if (lpfc_test_rrq_active(phba, ndlp,
20335 lpfc_cmd->cur_iocbq.sli4_lxritag))
20338 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20341 list_del_init(&lpfc_cmd->list);
20343 lpfc_cmd->hdwq = qp;
20344 lpfc_cmd->hdwq_no = idx;
20351 * lpfc_get_io_buf - Get one IO buffer from free pool
20352 * @phba: The HBA for which this call is being executed.
20353 * @ndlp: pointer to lpfc nodelist data structure.
20354 * @hwqid: belong to which HWQ
20355 * @expedite: 1 means this request is urgent.
20357 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20358 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20359 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20361 * Note: ndlp is only used on SCSI side for RRQ testing.
20362 * The caller should pass NULL for ndlp on NVME side.
20366 * Pointer to lpfc_io_buf - Success
20368 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20369 struct lpfc_nodelist *ndlp,
20370 u32 hwqid, int expedite)
20372 struct lpfc_sli4_hdw_queue *qp;
20373 unsigned long iflag;
20374 struct lpfc_io_buf *lpfc_cmd;
20376 qp = &phba->sli4_hba.hdwq[hwqid];
20379 if (phba->cfg_xri_rebalancing)
20380 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20381 phba, ndlp, hwqid, expedite);
20383 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20384 qp, alloc_xri_get);
20385 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20386 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20388 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20389 qp, alloc_xri_put);
20390 list_splice(&qp->lpfc_io_buf_list_put,
20391 &qp->lpfc_io_buf_list_get);
20392 qp->get_io_bufs += qp->put_io_bufs;
20393 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20394 qp->put_io_bufs = 0;
20395 spin_unlock(&qp->io_buf_list_put_lock);
20396 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20398 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20400 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);