scsi: lpfc: Fix build error
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
0d041215 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
5a0e3ad6 28#include <linux/slab.h>
1c2ba475 29#include <linux/lockdep.h>
dea3101e 30
91886523 31#include <scsi/scsi.h>
dea3101e 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
f888ba3c 35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include <scsi/fc/fc_fs.h>
0d878419 37#include <linux/aer.h>
1351e69f
JS
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea3101e 41
895427bd
JS
42#include <linux/nvme-fc-driver.h>
43
da0436e9 44#include "lpfc_hw4.h"
dea3101e 45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
da0436e9 47#include "lpfc_sli4.h"
ea2151b4 48#include "lpfc_nl.h"
dea3101e 49#include "lpfc_disc.h"
dea3101e 50#include "lpfc.h"
895427bd
JS
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
f358dd0c 53#include "lpfc_nvmet.h"
dea3101e 54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
858c9f6c 57#include "lpfc_debugfs.h"
04c68496 58#include "lpfc_vport.h"
61bda8f7 59#include "lpfc_version.h"
dea3101e 60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
4f774513
JS
69
70/* Provide function prototypes local to this module. */
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
6669f9bb
JS
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
ae9e28f3
JS
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
32517fc0
JS
81static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
895427bd 83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 84 int);
f485c18d 85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
32517fc0
JS
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
e8d3c3b1
JS
88static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
895427bd
JS
90static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
91 struct lpfc_sli_ring *pring,
92 struct lpfc_iocbq *cmdiocb);
0558056c 93
4f774513
JS
94static IOCB_t *
95lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
96{
97 return &iocbq->iocb;
98}
99
48f8fdb4
JS
100#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
101/**
102 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
103 * @srcp: Source memory pointer.
104 * @destp: Destination memory pointer.
105 * @cnt: Number of words required to be copied.
106 * Must be a multiple of sizeof(uint64_t)
107 *
108 * This function is used for copying data between driver memory
109 * and the SLI WQ. This function also changes the endianness
110 * of each word if native endianness is different from SLI
111 * endianness. This function can be called with or without
112 * lock.
113 **/
114void
115lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
116{
117 uint64_t *src = srcp;
118 uint64_t *dest = destp;
119 int i;
120
121 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
122 *dest++ = *src++;
123}
124#else
125#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
126#endif
127
4f774513
JS
128/**
129 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
130 * @q: The Work Queue to operate on.
131 * @wqe: The work Queue Entry to put on the Work queue.
132 *
133 * This routine will copy the contents of @wqe to the next available entry on
134 * the @q. This function will then ring the Work Queue Doorbell to signal the
135 * HBA to start processing the Work Queue Entry. This function returns 0 if
136 * successful. If no entries are available on @q then this function will return
137 * -ENOMEM.
138 * The caller is expected to hold the hbalock when calling this routine.
139 **/
cd22d605 140static int
205e8240 141lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
4f774513 142{
2e90f4b5 143 union lpfc_wqe *temp_wqe;
4f774513
JS
144 struct lpfc_register doorbell;
145 uint32_t host_index;
027140ea 146 uint32_t idx;
1351e69f
JS
147 uint32_t i = 0;
148 uint8_t *tmp;
5cc167dd 149 u32 if_type;
4f774513 150
2e90f4b5
JS
151 /* sanity check on queue memory */
152 if (unlikely(!q))
153 return -ENOMEM;
9afbee3d 154 temp_wqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 155
4f774513 156 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
157 idx = ((q->host_index + 1) % q->entry_count);
158 if (idx == q->hba_index) {
b84daac9 159 q->WQ_overflow++;
cd22d605 160 return -EBUSY;
b84daac9
JS
161 }
162 q->WQ_posted++;
4f774513 163 /* set consumption flag every once in a while */
32517fc0 164 if (!((q->host_index + 1) % q->notify_interval))
f0d9bccc 165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
04673e38
JS
166 else
167 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
fedd3b7b
JS
168 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
169 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
48f8fdb4 170 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
1351e69f
JS
171 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
172 /* write to DPP aperture taking advatage of Combined Writes */
4c06619f
JS
173 tmp = (uint8_t *)temp_wqe;
174#ifdef __raw_writeq
1351e69f 175 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
4c06619f
JS
176 __raw_writeq(*((uint64_t *)(tmp + i)),
177 q->dpp_regaddr + i);
178#else
179 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
180 __raw_writel(*((uint32_t *)(tmp + i)),
181 q->dpp_regaddr + i);
182#endif
1351e69f
JS
183 }
184 /* ensure WQE bcopy and DPP flushed before doorbell write */
6b3b3bdb 185 wmb();
4f774513
JS
186
187 /* Update the host index before invoking device */
188 host_index = q->host_index;
027140ea
JS
189
190 q->host_index = idx;
4f774513
JS
191
192 /* Ring Doorbell */
193 doorbell.word0 = 0;
962bc51b 194 if (q->db_format == LPFC_DB_LIST_FORMAT) {
1351e69f
JS
195 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
196 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
198 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
199 q->dpp_id);
200 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
201 q->queue_id);
202 } else {
203 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
1351e69f 204 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
5cc167dd
JS
205
206 /* Leave bits <23:16> clear for if_type 6 dpp */
207 if_type = bf_get(lpfc_sli_intf_if_type,
208 &q->phba->sli4_hba.sli_intf);
209 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
210 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
211 host_index);
1351e69f 212 }
962bc51b
JS
213 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
214 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
215 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
216 } else {
217 return -EINVAL;
218 }
219 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
220
221 return 0;
222}
223
224/**
225 * lpfc_sli4_wq_release - Updates internal hba index for WQ
226 * @q: The Work Queue to operate on.
227 * @index: The index to advance the hba index to.
228 *
229 * This routine will update the HBA index of a queue to reflect consumption of
230 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
231 * an entry the host calls this function to update the queue's internal
232 * pointers. This routine returns the number of entries that were consumed by
233 * the HBA.
234 **/
235static uint32_t
236lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
237{
238 uint32_t released = 0;
239
2e90f4b5
JS
240 /* sanity check on queue memory */
241 if (unlikely(!q))
242 return 0;
243
4f774513
JS
244 if (q->hba_index == index)
245 return 0;
246 do {
247 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 released++;
249 } while (q->hba_index != index);
250 return released;
251}
252
253/**
254 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
255 * @q: The Mailbox Queue to operate on.
256 * @wqe: The Mailbox Queue Entry to put on the Work queue.
257 *
258 * This routine will copy the contents of @mqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
262 * -ENOMEM.
263 * The caller is expected to hold the hbalock when calling this routine.
264 **/
265static uint32_t
266lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
267{
2e90f4b5 268 struct lpfc_mqe *temp_mqe;
4f774513 269 struct lpfc_register doorbell;
4f774513 270
2e90f4b5
JS
271 /* sanity check on queue memory */
272 if (unlikely(!q))
273 return -ENOMEM;
9afbee3d 274 temp_mqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 275
4f774513
JS
276 /* If the host has not yet processed the next entry then we are done */
277 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
278 return -ENOMEM;
48f8fdb4 279 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
4f774513
JS
280 /* Save off the mailbox pointer for completion */
281 q->phba->mbox = (MAILBOX_t *)temp_mqe;
282
283 /* Update the host index before invoking device */
4f774513
JS
284 q->host_index = ((q->host_index + 1) % q->entry_count);
285
286 /* Ring Doorbell */
287 doorbell.word0 = 0;
288 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
289 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
290 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
291 return 0;
292}
293
294/**
295 * lpfc_sli4_mq_release - Updates internal hba index for MQ
296 * @q: The Mailbox Queue to operate on.
297 *
298 * This routine will update the HBA index of a queue to reflect consumption of
299 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
300 * an entry the host calls this function to update the queue's internal
301 * pointers. This routine returns the number of entries that were consumed by
302 * the HBA.
303 **/
304static uint32_t
305lpfc_sli4_mq_release(struct lpfc_queue *q)
306{
2e90f4b5
JS
307 /* sanity check on queue memory */
308 if (unlikely(!q))
309 return 0;
310
4f774513
JS
311 /* Clear the mailbox pointer for completion */
312 q->phba->mbox = NULL;
313 q->hba_index = ((q->hba_index + 1) % q->entry_count);
314 return 1;
315}
316
317/**
318 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
319 * @q: The Event Queue to get the first valid EQE from
320 *
321 * This routine will get the first valid Event Queue Entry from @q, update
322 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
323 * the Queue (no more work to do), or the Queue is full of EQEs that have been
324 * processed, but not popped back to the HBA then this routine will return NULL.
325 **/
326static struct lpfc_eqe *
327lpfc_sli4_eq_get(struct lpfc_queue *q)
328{
2e90f4b5
JS
329 struct lpfc_eqe *eqe;
330
331 /* sanity check on queue memory */
332 if (unlikely(!q))
333 return NULL;
9afbee3d 334 eqe = lpfc_sli4_qe(q, q->host_index);
4f774513
JS
335
336 /* If the next EQE is not valid then we are done */
7365f6fd 337 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
4f774513 338 return NULL;
27f344eb
JS
339
340 /*
341 * insert barrier for instruction interlock : data from the hardware
342 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
343 * upon. Speculative instructions were allowing a bcopy at the start
344 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
345 * after our return, to copy data before the valid bit check above
346 * was done. As such, some of the copied data was stale. The barrier
347 * ensures the check is before any data is copied.
27f344eb
JS
348 */
349 mb();
4f774513
JS
350 return eqe;
351}
352
ba20c853
JS
353/**
354 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
355 * @q: The Event Queue to disable interrupts
356 *
357 **/
b71413dd 358inline void
ba20c853
JS
359lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
360{
361 struct lpfc_register doorbell;
362
363 doorbell.word0 = 0;
364 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
365 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
366 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
367 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
368 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 369 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
ba20c853
JS
370}
371
27d6ac0a
JS
372/**
373 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
374 * @q: The Event Queue to disable interrupts
375 *
376 **/
377inline void
378lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
379{
380 struct lpfc_register doorbell;
381
382 doorbell.word0 = 0;
aad59d5d 383 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
27d6ac0a
JS
384 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385}
386
4f774513 387/**
32517fc0
JS
388 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
389 * @phba: adapter with EQ
4f774513 390 * @q: The Event Queue that the host has completed processing for.
32517fc0 391 * @count: Number of elements that have been consumed
4f774513
JS
392 * @arm: Indicates whether the host wants to arms this CQ.
393 *
32517fc0
JS
394 * This routine will notify the HBA, by ringing the doorbell, that count
395 * number of EQEs have been processed. The @arm parameter indicates whether
396 * the queue should be rearmed when ringing the doorbell.
4f774513 397 **/
32517fc0
JS
398void
399lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
400 uint32_t count, bool arm)
4f774513 401{
4f774513
JS
402 struct lpfc_register doorbell;
403
2e90f4b5 404 /* sanity check on queue memory */
32517fc0
JS
405 if (unlikely(!q || (count == 0 && !arm)))
406 return;
4f774513
JS
407
408 /* ring doorbell for number popped */
409 doorbell.word0 = 0;
410 if (arm) {
411 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
412 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
413 }
32517fc0 414 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 415 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
416 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
417 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
418 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 419 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
a747c9ce
JS
420 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
421 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
9dd35425 422 readl(q->phba->sli4_hba.EQDBregaddr);
4f774513
JS
423}
424
27d6ac0a 425/**
32517fc0
JS
426 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
427 * @phba: adapter with EQ
27d6ac0a 428 * @q: The Event Queue that the host has completed processing for.
32517fc0 429 * @count: Number of elements that have been consumed
27d6ac0a
JS
430 * @arm: Indicates whether the host wants to arms this CQ.
431 *
32517fc0
JS
432 * This routine will notify the HBA, by ringing the doorbell, that count
433 * number of EQEs have been processed. The @arm parameter indicates whether
434 * the queue should be rearmed when ringing the doorbell.
27d6ac0a 435 **/
32517fc0
JS
436void
437lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
438 uint32_t count, bool arm)
27d6ac0a 439{
27d6ac0a
JS
440 struct lpfc_register doorbell;
441
442 /* sanity check on queue memory */
32517fc0
JS
443 if (unlikely(!q || (count == 0 && !arm)))
444 return;
27d6ac0a
JS
445
446 /* ring doorbell for number popped */
447 doorbell.word0 = 0;
448 if (arm)
449 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
32517fc0 450 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
451 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
452 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
453 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
454 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
455 readl(q->phba->sli4_hba.EQDBregaddr);
32517fc0
JS
456}
457
458static void
459__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
460 struct lpfc_eqe *eqe)
461{
462 if (!phba->sli4_hba.pc_sli4_params.eqav)
463 bf_set_le32(lpfc_eqe_valid, eqe, 0);
464
465 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
466
467 /* if the index wrapped around, toggle the valid bit */
468 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
469 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
470}
471
472static void
473lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
474{
475 struct lpfc_eqe *eqe;
476 uint32_t count = 0;
477
478 /* walk all the EQ entries and drop on the floor */
479 eqe = lpfc_sli4_eq_get(eq);
480 while (eqe) {
481 __lpfc_sli4_consume_eqe(phba, eq, eqe);
482 count++;
483 eqe = lpfc_sli4_eq_get(eq);
484 }
485
486 /* Clear and re-arm the EQ */
487 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
488}
489
490static int
491lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
492{
493 struct lpfc_eqe *eqe;
494 int count = 0, consumed = 0;
495
496 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
497 goto rearm_and_exit;
498
499 eqe = lpfc_sli4_eq_get(eq);
500 while (eqe) {
501 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
502 __lpfc_sli4_consume_eqe(phba, eq, eqe);
503
504 consumed++;
505 if (!(++count % eq->max_proc_limit))
506 break;
507
508 if (!(count % eq->notify_interval)) {
509 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
510 LPFC_QUEUE_NOARM);
511 consumed = 0;
512 }
513
514 eqe = lpfc_sli4_eq_get(eq);
515 }
516 eq->EQ_processed += count;
517
518 /* Track the max number of EQEs processed in 1 intr */
519 if (count > eq->EQ_max_eqe)
520 eq->EQ_max_eqe = count;
521
522 eq->queue_claimed = 0;
523
524rearm_and_exit:
525 /* Always clear and re-arm the EQ */
526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
527
528 return count;
27d6ac0a
JS
529}
530
4f774513
JS
531/**
532 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
533 * @q: The Completion Queue to get the first valid CQE from
534 *
535 * This routine will get the first valid Completion Queue Entry from @q, update
536 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
537 * the Queue (no more work to do), or the Queue is full of CQEs that have been
538 * processed, but not popped back to the HBA then this routine will return NULL.
539 **/
540static struct lpfc_cqe *
541lpfc_sli4_cq_get(struct lpfc_queue *q)
542{
543 struct lpfc_cqe *cqe;
544
2e90f4b5
JS
545 /* sanity check on queue memory */
546 if (unlikely(!q))
547 return NULL;
9afbee3d 548 cqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 549
4f774513 550 /* If the next CQE is not valid then we are done */
7365f6fd 551 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
4f774513 552 return NULL;
27f344eb
JS
553
554 /*
555 * insert barrier for instruction interlock : data from the hardware
556 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
557 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
558 * instructions allowing action on content before valid bit checked,
559 * add barrier here as well. May not be needed as "content" is a
560 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
561 */
562 mb();
4f774513
JS
563 return cqe;
564}
565
32517fc0
JS
566static void
567__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
568 struct lpfc_cqe *cqe)
569{
570 if (!phba->sli4_hba.pc_sli4_params.cqav)
571 bf_set_le32(lpfc_cqe_valid, cqe, 0);
572
573 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
574
575 /* if the index wrapped around, toggle the valid bit */
576 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
577 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
578}
579
4f774513 580/**
32517fc0
JS
581 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
582 * @phba: the adapter with the CQ
4f774513 583 * @q: The Completion Queue that the host has completed processing for.
32517fc0 584 * @count: the number of elements that were consumed
4f774513
JS
585 * @arm: Indicates whether the host wants to arms this CQ.
586 *
32517fc0
JS
587 * This routine will notify the HBA, by ringing the doorbell, that the
588 * CQEs have been processed. The @arm parameter specifies whether the
589 * queue should be rearmed when ringing the doorbell.
4f774513 590 **/
32517fc0
JS
591void
592lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
593 uint32_t count, bool arm)
4f774513 594{
4f774513
JS
595 struct lpfc_register doorbell;
596
2e90f4b5 597 /* sanity check on queue memory */
32517fc0
JS
598 if (unlikely(!q || (count == 0 && !arm)))
599 return;
4f774513
JS
600
601 /* ring doorbell for number popped */
602 doorbell.word0 = 0;
603 if (arm)
604 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
32517fc0 605 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 606 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
607 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
608 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
609 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
9dd35425 610 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
4f774513
JS
611}
612
27d6ac0a 613/**
32517fc0
JS
614 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
615 * @phba: the adapter with the CQ
27d6ac0a 616 * @q: The Completion Queue that the host has completed processing for.
32517fc0 617 * @count: the number of elements that were consumed
27d6ac0a
JS
618 * @arm: Indicates whether the host wants to arms this CQ.
619 *
32517fc0
JS
620 * This routine will notify the HBA, by ringing the doorbell, that the
621 * CQEs have been processed. The @arm parameter specifies whether the
622 * queue should be rearmed when ringing the doorbell.
27d6ac0a 623 **/
32517fc0
JS
624void
625lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
626 uint32_t count, bool arm)
27d6ac0a 627{
27d6ac0a
JS
628 struct lpfc_register doorbell;
629
630 /* sanity check on queue memory */
32517fc0
JS
631 if (unlikely(!q || (count == 0 && !arm)))
632 return;
27d6ac0a
JS
633
634 /* ring doorbell for number popped */
635 doorbell.word0 = 0;
636 if (arm)
637 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
32517fc0 638 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
639 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
640 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
27d6ac0a
JS
641}
642
4f774513
JS
643/**
644 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
645 * @q: The Header Receive Queue to operate on.
646 * @wqe: The Receive Queue Entry to put on the Receive queue.
647 *
648 * This routine will copy the contents of @wqe to the next available entry on
649 * the @q. This function will then ring the Receive Queue Doorbell to signal the
650 * HBA to start processing the Receive Queue Entry. This function returns the
651 * index that the rqe was copied to if successful. If no entries are available
652 * on @q then this function will return -ENOMEM.
653 * The caller is expected to hold the hbalock when calling this routine.
654 **/
895427bd 655int
4f774513
JS
656lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
657 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
658{
2e90f4b5
JS
659 struct lpfc_rqe *temp_hrqe;
660 struct lpfc_rqe *temp_drqe;
4f774513 661 struct lpfc_register doorbell;
cbc5de1b
JS
662 int hq_put_index;
663 int dq_put_index;
4f774513 664
2e90f4b5
JS
665 /* sanity check on queue memory */
666 if (unlikely(!hq) || unlikely(!dq))
667 return -ENOMEM;
cbc5de1b
JS
668 hq_put_index = hq->host_index;
669 dq_put_index = dq->host_index;
9afbee3d
JS
670 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
671 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
2e90f4b5 672
4f774513
JS
673 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
674 return -EINVAL;
cbc5de1b 675 if (hq_put_index != dq_put_index)
4f774513
JS
676 return -EINVAL;
677 /* If the host has not yet processed the next entry then we are done */
cbc5de1b 678 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513 679 return -EBUSY;
48f8fdb4
JS
680 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
681 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
4f774513
JS
682
683 /* Update the host index to point to the next slot */
cbc5de1b
JS
684 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
685 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
61f3d4bf 686 hq->RQ_buf_posted++;
4f774513
JS
687
688 /* Ring The Header Receive Queue Doorbell */
32517fc0 689 if (!(hq->host_index % hq->notify_interval)) {
4f774513 690 doorbell.word0 = 0;
962bc51b
JS
691 if (hq->db_format == LPFC_DB_RING_FORMAT) {
692 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
32517fc0 693 hq->notify_interval);
962bc51b
JS
694 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
695 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
696 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
32517fc0 697 hq->notify_interval);
962bc51b
JS
698 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
699 hq->host_index);
700 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
701 } else {
702 return -EINVAL;
703 }
704 writel(doorbell.word0, hq->db_regaddr);
4f774513 705 }
cbc5de1b 706 return hq_put_index;
4f774513
JS
707}
708
709/**
710 * lpfc_sli4_rq_release - Updates internal hba index for RQ
711 * @q: The Header Receive Queue to operate on.
712 *
713 * This routine will update the HBA index of a queue to reflect consumption of
714 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
715 * consumed an entry the host calls this function to update the queue's
716 * internal pointers. This routine returns the number of entries that were
717 * consumed by the HBA.
718 **/
719static uint32_t
720lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
721{
2e90f4b5
JS
722 /* sanity check on queue memory */
723 if (unlikely(!hq) || unlikely(!dq))
724 return 0;
725
4f774513
JS
726 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
727 return 0;
728 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
729 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
730 return 1;
731}
732
e59058c4 733/**
3621a710 734 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
735 * @phba: Pointer to HBA context object.
736 * @pring: Pointer to driver SLI ring object.
737 *
738 * This function returns pointer to next command iocb entry
739 * in the command ring. The caller must hold hbalock to prevent
740 * other threads consume the next command iocb.
741 * SLI-2/SLI-3 provide different sized iocbs.
742 **/
ed957684
JS
743static inline IOCB_t *
744lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
745{
7e56aa25
JS
746 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
747 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
748}
749
e59058c4 750/**
3621a710 751 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
754 *
755 * This function returns pointer to next response iocb entry
756 * in the response ring. The caller must hold hbalock to make sure
757 * that no other thread consume the next response iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
759 **/
ed957684
JS
760static inline IOCB_t *
761lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
762{
7e56aa25
JS
763 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
764 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
765}
766
e59058c4 767/**
3621a710 768 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
769 * @phba: Pointer to HBA context object.
770 *
771 * This function is called with hbalock held. This function
772 * allocates a new driver iocb object from the iocb pool. If the
773 * allocation is successful, it returns pointer to the newly
774 * allocated iocb object else it returns NULL.
775 **/
4f2e66c6 776struct lpfc_iocbq *
2e0fef85 777__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
778{
779 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
780 struct lpfc_iocbq * iocbq = NULL;
781
1c2ba475
JT
782 lockdep_assert_held(&phba->hbalock);
783
0bd4ca25 784 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
785 if (iocbq)
786 phba->iocb_cnt++;
787 if (phba->iocb_cnt > phba->iocb_max)
788 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
789 return iocbq;
790}
791
da0436e9
JS
792/**
793 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
794 * @phba: Pointer to HBA context object.
795 * @xritag: XRI value.
796 *
797 * This function clears the sglq pointer from the array of acive
798 * sglq's. The xritag that is passed in is used to index into the
799 * array. Before the xritag can be used it needs to be adjusted
800 * by subtracting the xribase.
801 *
802 * Returns sglq ponter = success, NULL = Failure.
803 **/
895427bd 804struct lpfc_sglq *
da0436e9
JS
805__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
806{
da0436e9 807 struct lpfc_sglq *sglq;
6d368e53
JS
808
809 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
810 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
811 return sglq;
812}
813
814/**
815 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
816 * @phba: Pointer to HBA context object.
817 * @xritag: XRI value.
818 *
819 * This function returns the sglq pointer from the array of acive
820 * sglq's. The xritag that is passed in is used to index into the
821 * array. Before the xritag can be used it needs to be adjusted
822 * by subtracting the xribase.
823 *
824 * Returns sglq ponter = success, NULL = Failure.
825 **/
0f65ff68 826struct lpfc_sglq *
da0436e9
JS
827__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
828{
da0436e9 829 struct lpfc_sglq *sglq;
6d368e53
JS
830
831 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
832 return sglq;
833}
834
19ca7609 835/**
1151e3ec 836 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
837 * @phba: Pointer to HBA context object.
838 * @xritag: xri used in this exchange.
839 * @rrq: The RRQ to be cleared.
840 *
19ca7609 841 **/
1151e3ec
JS
842void
843lpfc_clr_rrq_active(struct lpfc_hba *phba,
844 uint16_t xritag,
845 struct lpfc_node_rrq *rrq)
19ca7609 846{
1151e3ec 847 struct lpfc_nodelist *ndlp = NULL;
19ca7609 848
1151e3ec
JS
849 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
850 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
851
852 /* The target DID could have been swapped (cable swap)
853 * we should use the ndlp from the findnode if it is
854 * available.
855 */
1151e3ec 856 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
857 ndlp = rrq->ndlp;
858
1151e3ec
JS
859 if (!ndlp)
860 goto out;
861
cff261f6 862 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
863 rrq->send_rrq = 0;
864 rrq->xritag = 0;
865 rrq->rrq_stop_time = 0;
866 }
1151e3ec 867out:
19ca7609
JS
868 mempool_free(rrq, phba->rrq_pool);
869}
870
871/**
872 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
873 * @phba: Pointer to HBA context object.
874 *
875 * This function is called with hbalock held. This function
876 * Checks if stop_time (ratov from setting rrq active) has
877 * been reached, if it has and the send_rrq flag is set then
878 * it will call lpfc_send_rrq. If the send_rrq flag is not set
879 * then it will just call the routine to clear the rrq and
880 * free the rrq resource.
881 * The timer is set to the next rrq that is going to expire before
882 * leaving the routine.
883 *
884 **/
885void
886lpfc_handle_rrq_active(struct lpfc_hba *phba)
887{
888 struct lpfc_node_rrq *rrq;
889 struct lpfc_node_rrq *nextrrq;
890 unsigned long next_time;
891 unsigned long iflags;
1151e3ec 892 LIST_HEAD(send_rrq);
19ca7609
JS
893
894 spin_lock_irqsave(&phba->hbalock, iflags);
895 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 896 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 897 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
898 &phba->active_rrq_list, list) {
899 if (time_after(jiffies, rrq->rrq_stop_time))
900 list_move(&rrq->list, &send_rrq);
901 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
902 next_time = rrq->rrq_stop_time;
903 }
904 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
905 if ((!list_empty(&phba->active_rrq_list)) &&
906 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 907 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
908 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
909 list_del(&rrq->list);
910 if (!rrq->send_rrq)
911 /* this call will free the rrq */
912 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
913 else if (lpfc_send_rrq(phba, rrq)) {
914 /* if we send the rrq then the completion handler
915 * will clear the bit in the xribitmap.
916 */
917 lpfc_clr_rrq_active(phba, rrq->xritag,
918 rrq);
919 }
920 }
19ca7609
JS
921}
922
923/**
924 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
925 * @vport: Pointer to vport context object.
926 * @xri: The xri used in the exchange.
927 * @did: The targets DID for this exchange.
928 *
929 * returns NULL = rrq not found in the phba->active_rrq_list.
930 * rrq = rrq for this xri and target.
931 **/
932struct lpfc_node_rrq *
933lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
934{
935 struct lpfc_hba *phba = vport->phba;
936 struct lpfc_node_rrq *rrq;
937 struct lpfc_node_rrq *nextrrq;
938 unsigned long iflags;
939
940 if (phba->sli_rev != LPFC_SLI_REV4)
941 return NULL;
942 spin_lock_irqsave(&phba->hbalock, iflags);
943 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
944 if (rrq->vport == vport && rrq->xritag == xri &&
945 rrq->nlp_DID == did){
946 list_del(&rrq->list);
947 spin_unlock_irqrestore(&phba->hbalock, iflags);
948 return rrq;
949 }
950 }
951 spin_unlock_irqrestore(&phba->hbalock, iflags);
952 return NULL;
953}
954
955/**
956 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
957 * @vport: Pointer to vport context object.
1151e3ec
JS
958 * @ndlp: Pointer to the lpfc_node_list structure.
959 * If ndlp is NULL Remove all active RRQs for this vport from the
960 * phba->active_rrq_list and clear the rrq.
961 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
962 **/
963void
1151e3ec 964lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
965
966{
967 struct lpfc_hba *phba = vport->phba;
968 struct lpfc_node_rrq *rrq;
969 struct lpfc_node_rrq *nextrrq;
970 unsigned long iflags;
1151e3ec 971 LIST_HEAD(rrq_list);
19ca7609
JS
972
973 if (phba->sli_rev != LPFC_SLI_REV4)
974 return;
1151e3ec
JS
975 if (!ndlp) {
976 lpfc_sli4_vport_delete_els_xri_aborted(vport);
977 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 978 }
1151e3ec
JS
979 spin_lock_irqsave(&phba->hbalock, iflags);
980 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
981 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
982 list_move(&rrq->list, &rrq_list);
19ca7609 983 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
984
985 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
986 list_del(&rrq->list);
987 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
988 }
19ca7609
JS
989}
990
19ca7609 991/**
1151e3ec 992 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
993 * @phba: Pointer to HBA context object.
994 * @ndlp: Targets nodelist pointer for this exchange.
995 * @xritag the xri in the bitmap to test.
996 *
997 * This function is called with hbalock held. This function
998 * returns 0 = rrq not active for this xri
999 * 1 = rrq is valid for this xri.
1000 **/
1151e3ec
JS
1001int
1002lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
1003 uint16_t xritag)
1004{
1c2ba475 1005 lockdep_assert_held(&phba->hbalock);
19ca7609
JS
1006 if (!ndlp)
1007 return 0;
cff261f6
JS
1008 if (!ndlp->active_rrqs_xri_bitmap)
1009 return 0;
1010 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
258f84fa 1011 return 1;
19ca7609
JS
1012 else
1013 return 0;
1014}
1015
1016/**
1017 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1018 * @phba: Pointer to HBA context object.
1019 * @ndlp: nodelist pointer for this target.
1020 * @xritag: xri used in this exchange.
1021 * @rxid: Remote Exchange ID.
1022 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1023 *
1024 * This function takes the hbalock.
1025 * The active bit is always set in the active rrq xri_bitmap even
1026 * if there is no slot avaiable for the other rrq information.
1027 *
1028 * returns 0 rrq actived for this xri
1029 * < 0 No memory or invalid ndlp.
1030 **/
1031int
1032lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 1033 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 1034{
19ca7609 1035 unsigned long iflags;
b42c07c8
JS
1036 struct lpfc_node_rrq *rrq;
1037 int empty;
1038
1039 if (!ndlp)
1040 return -EINVAL;
1041
1042 if (!phba->cfg_enable_rrq)
1043 return -EINVAL;
19ca7609
JS
1044
1045 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
1046 if (phba->pport->load_flag & FC_UNLOADING) {
1047 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1048 goto out;
1049 }
1050
1051 /*
1052 * set the active bit even if there is no mem available.
1053 */
1054 if (NLP_CHK_FREE_REQ(ndlp))
1055 goto out;
1056
1057 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1058 goto out;
1059
cff261f6
JS
1060 if (!ndlp->active_rrqs_xri_bitmap)
1061 goto out;
1062
1063 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
1064 goto out;
1065
19ca7609 1066 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
1067 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1068 if (!rrq) {
1069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1070 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1071 " DID:0x%x Send:%d\n",
1072 xritag, rxid, ndlp->nlp_DID, send_rrq);
1073 return -EINVAL;
1074 }
e5771b4d
JS
1075 if (phba->cfg_enable_rrq == 1)
1076 rrq->send_rrq = send_rrq;
1077 else
1078 rrq->send_rrq = 0;
b42c07c8 1079 rrq->xritag = xritag;
256ec0d0
JS
1080 rrq->rrq_stop_time = jiffies +
1081 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
1082 rrq->ndlp = ndlp;
1083 rrq->nlp_DID = ndlp->nlp_DID;
1084 rrq->vport = ndlp->vport;
1085 rrq->rxid = rxid;
b42c07c8
JS
1086 spin_lock_irqsave(&phba->hbalock, iflags);
1087 empty = list_empty(&phba->active_rrq_list);
1088 list_add_tail(&rrq->list, &phba->active_rrq_list);
1089 phba->hba_flag |= HBA_RRQ_ACTIVE;
1090 if (empty)
1091 lpfc_worker_wake_up(phba);
1092 spin_unlock_irqrestore(&phba->hbalock, iflags);
1093 return 0;
1094out:
1095 spin_unlock_irqrestore(&phba->hbalock, iflags);
1096 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1097 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1098 " DID:0x%x Send:%d\n",
1099 xritag, rxid, ndlp->nlp_DID, send_rrq);
1100 return -EINVAL;
19ca7609
JS
1101}
1102
da0436e9 1103/**
895427bd 1104 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 1105 * @phba: Pointer to HBA context object.
19ca7609 1106 * @piocb: Pointer to the iocbq.
da0436e9 1107 *
dafe8cea 1108 * This function is called with the ring lock held. This function
6d368e53 1109 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
1110 * list is not empty then it is successful, it returns pointer to the newly
1111 * allocated sglq object else it returns NULL.
1112 **/
1113static struct lpfc_sglq *
895427bd 1114__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 1115{
895427bd 1116 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 1117 struct lpfc_sglq *sglq = NULL;
19ca7609 1118 struct lpfc_sglq *start_sglq = NULL;
c490850a 1119 struct lpfc_io_buf *lpfc_cmd;
19ca7609
JS
1120 struct lpfc_nodelist *ndlp;
1121 int found = 0;
1122
1c2ba475
JT
1123 lockdep_assert_held(&phba->hbalock);
1124
19ca7609 1125 if (piocbq->iocb_flag & LPFC_IO_FCP) {
c490850a 1126 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
19ca7609 1127 ndlp = lpfc_cmd->rdata->pnode;
be858b65 1128 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 1129 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 1130 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
1131 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1132 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1133 ndlp = NULL;
1134 else
1135 ndlp = piocbq->context_un.ndlp;
1136 } else {
19ca7609 1137 ndlp = piocbq->context1;
6c7cf486 1138 }
19ca7609 1139
895427bd
JS
1140 spin_lock(&phba->sli4_hba.sgl_list_lock);
1141 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
1142 start_sglq = sglq;
1143 while (!found) {
1144 if (!sglq)
d11f54b7 1145 break;
895427bd
JS
1146 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1147 test_bit(sglq->sli4_lxritag,
1148 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
1149 /* This xri has an rrq outstanding for this DID.
1150 * put it back in the list and get another xri.
1151 */
895427bd 1152 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 1153 sglq = NULL;
895427bd 1154 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
1155 struct lpfc_sglq, list);
1156 if (sglq == start_sglq) {
14041bd1 1157 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609
JS
1158 sglq = NULL;
1159 break;
1160 } else
1161 continue;
1162 }
1163 sglq->ndlp = ndlp;
1164 found = 1;
6d368e53 1165 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
1166 sglq->state = SGL_ALLOCATED;
1167 }
895427bd 1168 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1169 return sglq;
1170}
1171
f358dd0c
JS
1172/**
1173 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1174 * @phba: Pointer to HBA context object.
1175 * @piocb: Pointer to the iocbq.
1176 *
1177 * This function is called with the sgl_list lock held. This function
1178 * gets a new driver sglq object from the sglq list. If the
1179 * list is not empty then it is successful, it returns pointer to the newly
1180 * allocated sglq object else it returns NULL.
1181 **/
1182struct lpfc_sglq *
1183__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1184{
1185 struct list_head *lpfc_nvmet_sgl_list;
1186 struct lpfc_sglq *sglq = NULL;
1187
1188 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1189
1190 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1191
1192 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1193 if (!sglq)
1194 return NULL;
1195 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1196 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1197 return sglq;
1198}
1199
e59058c4 1200/**
3621a710 1201 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1202 * @phba: Pointer to HBA context object.
1203 *
1204 * This function is called with no lock held. This function
1205 * allocates a new driver iocb object from the iocb pool. If the
1206 * allocation is successful, it returns pointer to the newly
1207 * allocated iocb object else it returns NULL.
1208 **/
2e0fef85
JS
1209struct lpfc_iocbq *
1210lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1211{
1212 struct lpfc_iocbq * iocbq = NULL;
1213 unsigned long iflags;
1214
1215 spin_lock_irqsave(&phba->hbalock, iflags);
1216 iocbq = __lpfc_sli_get_iocbq(phba);
1217 spin_unlock_irqrestore(&phba->hbalock, iflags);
1218 return iocbq;
1219}
1220
4f774513
JS
1221/**
1222 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1223 * @phba: Pointer to HBA context object.
1224 * @iocbq: Pointer to driver iocb object.
1225 *
1226 * This function is called with hbalock held to release driver
1227 * iocb object to the iocb pool. The iotag in the iocb object
1228 * does not change for each use of the iocb object. This function
1229 * clears all other fields of the iocb object when it is freed.
1230 * The sqlq structure that holds the xritag and phys and virtual
1231 * mappings for the scatter gather list is retrieved from the
1232 * active array of sglq. The get of the sglq pointer also clears
1233 * the entry in the array. If the status of the IO indiactes that
1234 * this IO was aborted then the sglq entry it put on the
1235 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1236 * IO has good status or fails for any other reason then the sglq
895427bd 1237 * entry is added to the free list (lpfc_els_sgl_list).
4f774513
JS
1238 **/
1239static void
1240__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1241{
1242 struct lpfc_sglq *sglq;
1243 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1244 unsigned long iflag = 0;
895427bd 1245 struct lpfc_sli_ring *pring;
4f774513 1246
1c2ba475
JT
1247 lockdep_assert_held(&phba->hbalock);
1248
4f774513
JS
1249 if (iocbq->sli4_xritag == NO_XRI)
1250 sglq = NULL;
1251 else
6d368e53
JS
1252 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1253
0e9bb8d7 1254
4f774513 1255 if (sglq) {
f358dd0c
JS
1256 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1257 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1258 iflag);
1259 sglq->state = SGL_FREED;
1260 sglq->ndlp = NULL;
1261 list_add_tail(&sglq->list,
1262 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1263 spin_unlock_irqrestore(
1264 &phba->sli4_hba.sgl_list_lock, iflag);
1265 goto out;
1266 }
1267
895427bd 1268 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1269 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1270 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1271 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1272 iflag);
4f774513 1273 list_add(&sglq->list,
895427bd 1274 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1275 spin_unlock_irqrestore(
895427bd 1276 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1277 } else {
895427bd
JS
1278 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1279 iflag);
0f65ff68 1280 sglq->state = SGL_FREED;
19ca7609 1281 sglq->ndlp = NULL;
fedd3b7b 1282 list_add_tail(&sglq->list,
895427bd
JS
1283 &phba->sli4_hba.lpfc_els_sgl_list);
1284 spin_unlock_irqrestore(
1285 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1286
1287 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1288 if (!list_empty(&pring->txq))
2a9bf3d0 1289 lpfc_worker_wake_up(phba);
0f65ff68 1290 }
4f774513
JS
1291 }
1292
f358dd0c 1293out:
4f774513
JS
1294 /*
1295 * Clean all volatile data fields, preserve iotag and node struct.
1296 */
1297 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1298 iocbq->sli4_lxritag = NO_XRI;
4f774513 1299 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1300 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1301 LPFC_IO_NVME_LS);
4f774513
JS
1302 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1303}
1304
2a9bf3d0 1305
e59058c4 1306/**
3772a991 1307 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1308 * @phba: Pointer to HBA context object.
1309 * @iocbq: Pointer to driver iocb object.
1310 *
1311 * This function is called with hbalock held to release driver
1312 * iocb object to the iocb pool. The iotag in the iocb object
1313 * does not change for each use of the iocb object. This function
1314 * clears all other fields of the iocb object when it is freed.
1315 **/
a6ababd2 1316static void
3772a991 1317__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1318{
2e0fef85 1319 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1320
1c2ba475 1321 lockdep_assert_held(&phba->hbalock);
0e9bb8d7 1322
604a3e30
JB
1323 /*
1324 * Clean all volatile data fields, preserve iotag and node struct.
1325 */
1326 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1327 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1328 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1329}
1330
3772a991
JS
1331/**
1332 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1333 * @phba: Pointer to HBA context object.
1334 * @iocbq: Pointer to driver iocb object.
1335 *
1336 * This function is called with hbalock held to release driver
1337 * iocb object to the iocb pool. The iotag in the iocb object
1338 * does not change for each use of the iocb object. This function
1339 * clears all other fields of the iocb object when it is freed.
1340 **/
1341static void
1342__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1343{
1c2ba475
JT
1344 lockdep_assert_held(&phba->hbalock);
1345
3772a991 1346 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1347 phba->iocb_cnt--;
3772a991
JS
1348}
1349
e59058c4 1350/**
3621a710 1351 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1352 * @phba: Pointer to HBA context object.
1353 * @iocbq: Pointer to driver iocb object.
1354 *
1355 * This function is called with no lock held to release the iocb to
1356 * iocb pool.
1357 **/
2e0fef85
JS
1358void
1359lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1360{
1361 unsigned long iflags;
1362
1363 /*
1364 * Clean all volatile data fields, preserve iotag and node struct.
1365 */
1366 spin_lock_irqsave(&phba->hbalock, iflags);
1367 __lpfc_sli_release_iocbq(phba, iocbq);
1368 spin_unlock_irqrestore(&phba->hbalock, iflags);
1369}
1370
a257bf90
JS
1371/**
1372 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1373 * @phba: Pointer to HBA context object.
1374 * @iocblist: List of IOCBs.
1375 * @ulpstatus: ULP status in IOCB command field.
1376 * @ulpWord4: ULP word-4 in IOCB command field.
1377 *
1378 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1379 * on the list by invoking the complete callback function associated with the
1380 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1381 * fields.
1382 **/
1383void
1384lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1385 uint32_t ulpstatus, uint32_t ulpWord4)
1386{
1387 struct lpfc_iocbq *piocb;
1388
1389 while (!list_empty(iocblist)) {
1390 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
a257bf90
JS
1391 if (!piocb->iocb_cmpl)
1392 lpfc_sli_release_iocbq(phba, piocb);
1393 else {
1394 piocb->iocb.ulpStatus = ulpstatus;
1395 piocb->iocb.un.ulpWord[4] = ulpWord4;
1396 (piocb->iocb_cmpl) (phba, piocb, piocb);
1397 }
1398 }
1399 return;
1400}
1401
e59058c4 1402/**
3621a710
JS
1403 * lpfc_sli_iocb_cmd_type - Get the iocb type
1404 * @iocb_cmnd: iocb command code.
e59058c4
JS
1405 *
1406 * This function is called by ring event handler function to get the iocb type.
1407 * This function translates the iocb command to an iocb command type used to
1408 * decide the final disposition of each completed IOCB.
1409 * The function returns
1410 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1411 * LPFC_SOL_IOCB if it is a solicited iocb completion
1412 * LPFC_ABORT_IOCB if it is an abort iocb
1413 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1414 *
1415 * The caller is not required to hold any lock.
1416 **/
dea3101e 1417static lpfc_iocb_type
1418lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1419{
1420 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1421
1422 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1423 return 0;
1424
1425 switch (iocb_cmnd) {
1426 case CMD_XMIT_SEQUENCE_CR:
1427 case CMD_XMIT_SEQUENCE_CX:
1428 case CMD_XMIT_BCAST_CN:
1429 case CMD_XMIT_BCAST_CX:
1430 case CMD_ELS_REQUEST_CR:
1431 case CMD_ELS_REQUEST_CX:
1432 case CMD_CREATE_XRI_CR:
1433 case CMD_CREATE_XRI_CX:
1434 case CMD_GET_RPI_CN:
1435 case CMD_XMIT_ELS_RSP_CX:
1436 case CMD_GET_RPI_CR:
1437 case CMD_FCP_IWRITE_CR:
1438 case CMD_FCP_IWRITE_CX:
1439 case CMD_FCP_IREAD_CR:
1440 case CMD_FCP_IREAD_CX:
1441 case CMD_FCP_ICMND_CR:
1442 case CMD_FCP_ICMND_CX:
f5603511
JS
1443 case CMD_FCP_TSEND_CX:
1444 case CMD_FCP_TRSP_CX:
1445 case CMD_FCP_TRECEIVE_CX:
1446 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1447 case CMD_ADAPTER_MSG:
1448 case CMD_ADAPTER_DUMP:
1449 case CMD_XMIT_SEQUENCE64_CR:
1450 case CMD_XMIT_SEQUENCE64_CX:
1451 case CMD_XMIT_BCAST64_CN:
1452 case CMD_XMIT_BCAST64_CX:
1453 case CMD_ELS_REQUEST64_CR:
1454 case CMD_ELS_REQUEST64_CX:
1455 case CMD_FCP_IWRITE64_CR:
1456 case CMD_FCP_IWRITE64_CX:
1457 case CMD_FCP_IREAD64_CR:
1458 case CMD_FCP_IREAD64_CX:
1459 case CMD_FCP_ICMND64_CR:
1460 case CMD_FCP_ICMND64_CX:
f5603511
JS
1461 case CMD_FCP_TSEND64_CX:
1462 case CMD_FCP_TRSP64_CX:
1463 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1464 case CMD_GEN_REQUEST64_CR:
1465 case CMD_GEN_REQUEST64_CX:
1466 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1467 case DSSCMD_IWRITE64_CR:
1468 case DSSCMD_IWRITE64_CX:
1469 case DSSCMD_IREAD64_CR:
1470 case DSSCMD_IREAD64_CX:
dea3101e 1471 type = LPFC_SOL_IOCB;
1472 break;
1473 case CMD_ABORT_XRI_CN:
1474 case CMD_ABORT_XRI_CX:
1475 case CMD_CLOSE_XRI_CN:
1476 case CMD_CLOSE_XRI_CX:
1477 case CMD_XRI_ABORTED_CX:
1478 case CMD_ABORT_MXRI64_CN:
6669f9bb 1479 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1480 type = LPFC_ABORT_IOCB;
1481 break;
1482 case CMD_RCV_SEQUENCE_CX:
1483 case CMD_RCV_ELS_REQ_CX:
1484 case CMD_RCV_SEQUENCE64_CX:
1485 case CMD_RCV_ELS_REQ64_CX:
57127f15 1486 case CMD_ASYNC_STATUS:
ed957684
JS
1487 case CMD_IOCB_RCV_SEQ64_CX:
1488 case CMD_IOCB_RCV_ELS64_CX:
1489 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1490 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1491 type = LPFC_UNSOL_IOCB;
1492 break;
3163f725
JS
1493 case CMD_IOCB_XMIT_MSEQ64_CR:
1494 case CMD_IOCB_XMIT_MSEQ64_CX:
1495 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1496 case CMD_IOCB_RCV_ELS_LIST64_CX:
1497 case CMD_IOCB_CLOSE_EXTENDED_CN:
1498 case CMD_IOCB_ABORT_EXTENDED_CN:
1499 case CMD_IOCB_RET_HBQE64_CN:
1500 case CMD_IOCB_FCP_IBIDIR64_CR:
1501 case CMD_IOCB_FCP_IBIDIR64_CX:
1502 case CMD_IOCB_FCP_ITASKMGT64_CX:
1503 case CMD_IOCB_LOGENTRY_CN:
1504 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1505 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1506 __func__, iocb_cmnd);
3163f725
JS
1507 type = LPFC_UNKNOWN_IOCB;
1508 break;
dea3101e 1509 default:
1510 type = LPFC_UNKNOWN_IOCB;
1511 break;
1512 }
1513
1514 return type;
1515}
1516
e59058c4 1517/**
3621a710 1518 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1519 * @phba: Pointer to HBA context object.
1520 *
1521 * This function is called from SLI initialization code
1522 * to configure every ring of the HBA's SLI interface. The
1523 * caller is not required to hold any lock. This function issues
1524 * a config_ring mailbox command for each ring.
1525 * This function returns zero if successful else returns a negative
1526 * error code.
1527 **/
dea3101e 1528static int
ed957684 1529lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1530{
1531 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1532 LPFC_MBOXQ_t *pmb;
1533 MAILBOX_t *pmbox;
1534 int i, rc, ret = 0;
dea3101e 1535
ed957684
JS
1536 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1537 if (!pmb)
1538 return -ENOMEM;
04c68496 1539 pmbox = &pmb->u.mb;
ed957684 1540 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1541 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1542 lpfc_config_ring(phba, i, pmb);
1543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1544 if (rc != MBX_SUCCESS) {
92d7f7b0 1545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1546 "0446 Adapter failed to init (%d), "
dea3101e 1547 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1548 "ring %d\n",
e8b62011
JS
1549 rc, pmbox->mbxCommand,
1550 pmbox->mbxStatus, i);
2e0fef85 1551 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1552 ret = -ENXIO;
1553 break;
dea3101e 1554 }
1555 }
ed957684
JS
1556 mempool_free(pmb, phba->mbox_mem_pool);
1557 return ret;
dea3101e 1558}
1559
e59058c4 1560/**
3621a710 1561 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1562 * @phba: Pointer to HBA context object.
1563 * @pring: Pointer to driver SLI ring object.
1564 * @piocb: Pointer to the driver iocb object.
1565 *
1566 * This function is called with hbalock held. The function adds the
1567 * new iocb to txcmplq of the given ring. This function always returns
1568 * 0. If this function is called for ELS ring, this function checks if
1569 * there is a vport associated with the ELS command. This function also
1570 * starts els_tmofunc timer if this is an ELS command.
1571 **/
dea3101e 1572static int
2e0fef85
JS
1573lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1574 struct lpfc_iocbq *piocb)
dea3101e 1575{
1c2ba475
JT
1576 lockdep_assert_held(&phba->hbalock);
1577
2319f847 1578 BUG_ON(!piocb);
22466da5 1579
dea3101e 1580 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1581 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
c490850a 1582 pring->txcmplq_cnt++;
2a9bf3d0 1583
92d7f7b0
JS
1584 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1585 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1586 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1587 BUG_ON(!piocb->vport);
1588 if (!(piocb->vport->load_flag & FC_UNLOADING))
1589 mod_timer(&piocb->vport->els_tmofunc,
1590 jiffies +
1591 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1592 }
dea3101e 1593
2e0fef85 1594 return 0;
dea3101e 1595}
1596
e59058c4 1597/**
3621a710 1598 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1599 * @phba: Pointer to HBA context object.
1600 * @pring: Pointer to driver SLI ring object.
1601 *
1602 * This function is called with hbalock held to get next
1603 * iocb in txq of the given ring. If there is any iocb in
1604 * the txq, the function returns first iocb in the list after
1605 * removing the iocb from the list, else it returns NULL.
1606 **/
2a9bf3d0 1607struct lpfc_iocbq *
2e0fef85 1608lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1609{
dea3101e 1610 struct lpfc_iocbq *cmd_iocb;
1611
1c2ba475
JT
1612 lockdep_assert_held(&phba->hbalock);
1613
858c9f6c 1614 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1615 return cmd_iocb;
dea3101e 1616}
1617
e59058c4 1618/**
3621a710 1619 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1620 * @phba: Pointer to HBA context object.
1621 * @pring: Pointer to driver SLI ring object.
1622 *
1623 * This function is called with hbalock held and the caller must post the
1624 * iocb without releasing the lock. If the caller releases the lock,
1625 * iocb slot returned by the function is not guaranteed to be available.
1626 * The function returns pointer to the next available iocb slot if there
1627 * is available slot in the ring, else it returns NULL.
1628 * If the get index of the ring is ahead of the put index, the function
1629 * will post an error attention event to the worker thread to take the
1630 * HBA to offline state.
1631 **/
dea3101e 1632static IOCB_t *
1633lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1634{
34b02dcd 1635 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1636 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1637
1638 lockdep_assert_held(&phba->hbalock);
1639
7e56aa25
JS
1640 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1641 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1642 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1643
7e56aa25
JS
1644 if (unlikely(pring->sli.sli3.local_getidx ==
1645 pring->sli.sli3.next_cmdidx)) {
dea3101e 1646
7e56aa25 1647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1648
7e56aa25 1649 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1651 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1652 "is bigger than cmd ring %d\n",
e8b62011 1653 pring->ringno,
7e56aa25
JS
1654 pring->sli.sli3.local_getidx,
1655 max_cmd_idx);
dea3101e 1656
2e0fef85 1657 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1658 /*
1659 * All error attention handlers are posted to
1660 * worker thread
1661 */
1662 phba->work_ha |= HA_ERATT;
1663 phba->work_hs = HS_FFER3;
92d7f7b0 1664
5e9d9b82 1665 lpfc_worker_wake_up(phba);
dea3101e 1666
1667 return NULL;
1668 }
1669
7e56aa25 1670 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1671 return NULL;
1672 }
1673
ed957684 1674 return lpfc_cmd_iocb(phba, pring);
dea3101e 1675}
1676
e59058c4 1677/**
3621a710 1678 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1679 * @phba: Pointer to HBA context object.
1680 * @iocbq: Pointer to driver iocb object.
1681 *
1682 * This function gets an iotag for the iocb. If there is no unused iotag and
1683 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1684 * array and assigns a new iotag.
1685 * The function returns the allocated iotag if successful, else returns zero.
1686 * Zero is not a valid iotag.
1687 * The caller is not required to hold any lock.
1688 **/
604a3e30 1689uint16_t
2e0fef85 1690lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1691{
2e0fef85
JS
1692 struct lpfc_iocbq **new_arr;
1693 struct lpfc_iocbq **old_arr;
604a3e30
JB
1694 size_t new_len;
1695 struct lpfc_sli *psli = &phba->sli;
1696 uint16_t iotag;
dea3101e 1697
2e0fef85 1698 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1699 iotag = psli->last_iotag;
1700 if(++iotag < psli->iocbq_lookup_len) {
1701 psli->last_iotag = iotag;
1702 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1703 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1704 iocbq->iotag = iotag;
1705 return iotag;
2e0fef85 1706 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1707 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1708 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85 1709 spin_unlock_irq(&phba->hbalock);
6396bb22 1710 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
604a3e30
JB
1711 GFP_KERNEL);
1712 if (new_arr) {
2e0fef85 1713 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1714 old_arr = psli->iocbq_lookup;
1715 if (new_len <= psli->iocbq_lookup_len) {
1716 /* highly unprobable case */
1717 kfree(new_arr);
1718 iotag = psli->last_iotag;
1719 if(++iotag < psli->iocbq_lookup_len) {
1720 psli->last_iotag = iotag;
1721 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1722 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1723 iocbq->iotag = iotag;
1724 return iotag;
1725 }
2e0fef85 1726 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1727 return 0;
1728 }
1729 if (psli->iocbq_lookup)
1730 memcpy(new_arr, old_arr,
1731 ((psli->last_iotag + 1) *
311464ec 1732 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1733 psli->iocbq_lookup = new_arr;
1734 psli->iocbq_lookup_len = new_len;
1735 psli->last_iotag = iotag;
1736 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1737 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1738 iocbq->iotag = iotag;
1739 kfree(old_arr);
1740 return iotag;
1741 }
8f6d98d2 1742 } else
2e0fef85 1743 spin_unlock_irq(&phba->hbalock);
dea3101e 1744
bc73905a 1745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1746 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1747 psli->last_iotag);
dea3101e 1748
604a3e30 1749 return 0;
dea3101e 1750}
1751
e59058c4 1752/**
3621a710 1753 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1754 * @phba: Pointer to HBA context object.
1755 * @pring: Pointer to driver SLI ring object.
1756 * @iocb: Pointer to iocb slot in the ring.
1757 * @nextiocb: Pointer to driver iocb object which need to be
1758 * posted to firmware.
1759 *
1760 * This function is called with hbalock held to post a new iocb to
1761 * the firmware. This function copies the new iocb to ring iocb slot and
1762 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1763 * a completion call back for this iocb else the function will free the
1764 * iocb object.
1765 **/
dea3101e 1766static void
1767lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1768 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1769{
1c2ba475 1770 lockdep_assert_held(&phba->hbalock);
dea3101e 1771 /*
604a3e30 1772 * Set up an iotag
dea3101e 1773 */
604a3e30 1774 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1775
e2a0a9d6 1776
a58cbd52
JS
1777 if (pring->ringno == LPFC_ELS_RING) {
1778 lpfc_debugfs_slow_ring_trc(phba,
1779 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1780 *(((uint32_t *) &nextiocb->iocb) + 4),
1781 *(((uint32_t *) &nextiocb->iocb) + 6),
1782 *(((uint32_t *) &nextiocb->iocb) + 7));
1783 }
1784
dea3101e 1785 /*
1786 * Issue iocb command to adapter
1787 */
92d7f7b0 1788 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1789 wmb();
1790 pring->stats.iocb_cmd++;
1791
1792 /*
1793 * If there is no completion routine to call, we can release the
1794 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1795 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1796 */
1797 if (nextiocb->iocb_cmpl)
1798 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1799 else
2e0fef85 1800 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1801
1802 /*
1803 * Let the HBA know what IOCB slot will be the next one the
1804 * driver will put a command into.
1805 */
7e56aa25
JS
1806 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1807 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1808}
1809
e59058c4 1810/**
3621a710 1811 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1812 * @phba: Pointer to HBA context object.
1813 * @pring: Pointer to driver SLI ring object.
1814 *
1815 * The caller is not required to hold any lock for calling this function.
1816 * This function updates the chip attention bits for the ring to inform firmware
1817 * that there are pending work to be done for this ring and requests an
1818 * interrupt when there is space available in the ring. This function is
1819 * called when the driver is unable to post more iocbs to the ring due
1820 * to unavailability of space in the ring.
1821 **/
dea3101e 1822static void
2e0fef85 1823lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1824{
1825 int ringno = pring->ringno;
1826
1827 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1828
1829 wmb();
1830
1831 /*
1832 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1833 * The HBA will tell us when an IOCB entry is available.
1834 */
1835 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1836 readl(phba->CAregaddr); /* flush */
1837
1838 pring->stats.iocb_cmd_full++;
1839}
1840
e59058c4 1841/**
3621a710 1842 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1843 * @phba: Pointer to HBA context object.
1844 * @pring: Pointer to driver SLI ring object.
1845 *
1846 * This function updates the chip attention register bit for the
1847 * given ring to inform HBA that there is more work to be done
1848 * in this ring. The caller is not required to hold any lock.
1849 **/
dea3101e 1850static void
2e0fef85 1851lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1852{
1853 int ringno = pring->ringno;
1854
1855 /*
1856 * Tell the HBA that there is work to do in this ring.
1857 */
34b02dcd
JS
1858 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1859 wmb();
1860 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1861 readl(phba->CAregaddr); /* flush */
1862 }
dea3101e 1863}
1864
e59058c4 1865/**
3621a710 1866 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1867 * @phba: Pointer to HBA context object.
1868 * @pring: Pointer to driver SLI ring object.
1869 *
1870 * This function is called with hbalock held to post pending iocbs
1871 * in the txq to the firmware. This function is called when driver
1872 * detects space available in the ring.
1873 **/
dea3101e 1874static void
2e0fef85 1875lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1876{
1877 IOCB_t *iocb;
1878 struct lpfc_iocbq *nextiocb;
1879
1c2ba475
JT
1880 lockdep_assert_held(&phba->hbalock);
1881
dea3101e 1882 /*
1883 * Check to see if:
1884 * (a) there is anything on the txq to send
1885 * (b) link is up
1886 * (c) link attention events can be processed (fcp ring only)
1887 * (d) IOCB processing is not blocked by the outstanding mbox command.
1888 */
0e9bb8d7
JS
1889
1890 if (lpfc_is_link_up(phba) &&
1891 (!list_empty(&pring->txq)) &&
895427bd 1892 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1893 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1894
1895 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1896 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1897 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1898
1899 if (iocb)
1900 lpfc_sli_update_ring(phba, pring);
1901 else
1902 lpfc_sli_update_full_ring(phba, pring);
1903 }
1904
1905 return;
1906}
1907
e59058c4 1908/**
3621a710 1909 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1910 * @phba: Pointer to HBA context object.
1911 * @hbqno: HBQ number.
1912 *
1913 * This function is called with hbalock held to get the next
1914 * available slot for the given HBQ. If there is free slot
1915 * available for the HBQ it will return pointer to the next available
1916 * HBQ entry else it will return NULL.
1917 **/
a6ababd2 1918static struct lpfc_hbq_entry *
ed957684
JS
1919lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1920{
1921 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1922
1c2ba475
JT
1923 lockdep_assert_held(&phba->hbalock);
1924
ed957684
JS
1925 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1926 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1927 hbqp->next_hbqPutIdx = 0;
1928
1929 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1930 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1931 uint32_t getidx = le32_to_cpu(raw_index);
1932
1933 hbqp->local_hbqGetIdx = getidx;
1934
1935 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1936 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1937 LOG_SLI | LOG_VPORT,
e8b62011 1938 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1939 "%u is > than hbqp->entry_count %u\n",
e8b62011 1940 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1941 hbqp->entry_count);
1942
1943 phba->link_state = LPFC_HBA_ERROR;
1944 return NULL;
1945 }
1946
1947 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1948 return NULL;
1949 }
1950
51ef4c26
JS
1951 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1952 hbqp->hbqPutIdx;
ed957684
JS
1953}
1954
e59058c4 1955/**
3621a710 1956 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1957 * @phba: Pointer to HBA context object.
1958 *
1959 * This function is called with no lock held to free all the
1960 * hbq buffers while uninitializing the SLI interface. It also
1961 * frees the HBQ buffers returned by the firmware but not yet
1962 * processed by the upper layers.
1963 **/
ed957684
JS
1964void
1965lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1966{
92d7f7b0
JS
1967 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1968 struct hbq_dmabuf *hbq_buf;
3163f725 1969 unsigned long flags;
51ef4c26 1970 int i, hbq_count;
ed957684 1971
51ef4c26 1972 hbq_count = lpfc_sli_hbq_count();
ed957684 1973 /* Return all memory used by all HBQs */
3163f725 1974 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1975 for (i = 0; i < hbq_count; ++i) {
1976 list_for_each_entry_safe(dmabuf, next_dmabuf,
1977 &phba->hbqs[i].hbq_buffer_list, list) {
1978 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1979 list_del(&hbq_buf->dbuf.list);
1980 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1981 }
a8adb832 1982 phba->hbqs[i].buffer_count = 0;
ed957684 1983 }
3163f725
JS
1984
1985 /* Mark the HBQs not in use */
1986 phba->hbq_in_use = 0;
1987 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1988}
1989
e59058c4 1990/**
3621a710 1991 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1992 * @phba: Pointer to HBA context object.
1993 * @hbqno: HBQ number.
1994 * @hbq_buf: Pointer to HBQ buffer.
1995 *
1996 * This function is called with the hbalock held to post a
1997 * hbq buffer to the firmware. If the function finds an empty
1998 * slot in the HBQ, it will post the buffer. The function will return
1999 * pointer to the hbq entry if it successfully post the buffer
2000 * else it will return NULL.
2001 **/
3772a991 2002static int
ed957684 2003lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 2004 struct hbq_dmabuf *hbq_buf)
3772a991 2005{
1c2ba475 2006 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2007 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2008}
2009
2010/**
2011 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2012 * @phba: Pointer to HBA context object.
2013 * @hbqno: HBQ number.
2014 * @hbq_buf: Pointer to HBQ buffer.
2015 *
2016 * This function is called with the hbalock held to post a hbq buffer to the
2017 * firmware. If the function finds an empty slot in the HBQ, it will post the
2018 * buffer and place it on the hbq_buffer_list. The function will return zero if
2019 * it successfully post the buffer else it will return an error.
2020 **/
2021static int
2022lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2023 struct hbq_dmabuf *hbq_buf)
ed957684
JS
2024{
2025 struct lpfc_hbq_entry *hbqe;
92d7f7b0 2026 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 2027
1c2ba475 2028 lockdep_assert_held(&phba->hbalock);
ed957684
JS
2029 /* Get next HBQ entry slot to use */
2030 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2031 if (hbqe) {
2032 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2033
92d7f7b0
JS
2034 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2035 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 2036 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 2037 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
2038 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2039 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2040 /* Sync SLIM */
ed957684
JS
2041 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2042 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 2043 /* flush */
ed957684 2044 readl(phba->hbq_put + hbqno);
51ef4c26 2045 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
2046 return 0;
2047 } else
2048 return -ENOMEM;
ed957684
JS
2049}
2050
4f774513
JS
2051/**
2052 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2053 * @phba: Pointer to HBA context object.
2054 * @hbqno: HBQ number.
2055 * @hbq_buf: Pointer to HBQ buffer.
2056 *
2057 * This function is called with the hbalock held to post an RQE to the SLI4
2058 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2059 * the hbq_buffer_list and return zero, otherwise it will return an error.
2060 **/
2061static int
2062lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2063 struct hbq_dmabuf *hbq_buf)
2064{
2065 int rc;
2066 struct lpfc_rqe hrqe;
2067 struct lpfc_rqe drqe;
895427bd
JS
2068 struct lpfc_queue *hrq;
2069 struct lpfc_queue *drq;
2070
2071 if (hbqno != LPFC_ELS_HBQ)
2072 return 1;
2073 hrq = phba->sli4_hba.hdr_rq;
2074 drq = phba->sli4_hba.dat_rq;
4f774513 2075
1c2ba475 2076 lockdep_assert_held(&phba->hbalock);
4f774513
JS
2077 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2078 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2079 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2080 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 2081 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
2082 if (rc < 0)
2083 return rc;
895427bd 2084 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
2085 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2086 return 0;
2087}
2088
e59058c4 2089/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
2090static struct lpfc_hbq_init lpfc_els_hbq = {
2091 .rn = 1,
def9c7a9 2092 .entry_count = 256,
92d7f7b0
JS
2093 .mask_count = 0,
2094 .profile = 0,
51ef4c26 2095 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 2096 .buffer_count = 0,
a257bf90
JS
2097 .init_count = 40,
2098 .add_count = 40,
92d7f7b0 2099};
ed957684 2100
e59058c4 2101/* Array of HBQs */
78b2d852 2102struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
2103 &lpfc_els_hbq,
2104};
ed957684 2105
e59058c4 2106/**
3621a710 2107 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
2108 * @phba: Pointer to HBA context object.
2109 * @hbqno: HBQ number.
2110 * @count: Number of HBQ buffers to be posted.
2111 *
d7c255b2
JS
2112 * This function is called with no lock held to post more hbq buffers to the
2113 * given HBQ. The function returns the number of HBQ buffers successfully
2114 * posted.
e59058c4 2115 **/
311464ec 2116static int
92d7f7b0 2117lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 2118{
d7c255b2 2119 uint32_t i, posted = 0;
3163f725 2120 unsigned long flags;
92d7f7b0 2121 struct hbq_dmabuf *hbq_buffer;
d7c255b2 2122 LIST_HEAD(hbq_buf_list);
eafe1df9 2123 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 2124 return 0;
51ef4c26 2125
d7c255b2
JS
2126 if ((phba->hbqs[hbqno].buffer_count + count) >
2127 lpfc_hbq_defs[hbqno]->entry_count)
2128 count = lpfc_hbq_defs[hbqno]->entry_count -
2129 phba->hbqs[hbqno].buffer_count;
2130 if (!count)
2131 return 0;
2132 /* Allocate HBQ entries */
2133 for (i = 0; i < count; i++) {
2134 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2135 if (!hbq_buffer)
2136 break;
2137 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2138 }
3163f725
JS
2139 /* Check whether HBQ is still in use */
2140 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 2141 if (!phba->hbq_in_use)
d7c255b2
JS
2142 goto err;
2143 while (!list_empty(&hbq_buf_list)) {
2144 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2145 dbuf.list);
2146 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2147 (hbqno << 16));
3772a991 2148 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 2149 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
2150 posted++;
2151 } else
51ef4c26 2152 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 2153 }
3163f725 2154 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2155 return posted;
2156err:
eafe1df9 2157 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2158 while (!list_empty(&hbq_buf_list)) {
2159 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2160 dbuf.list);
2161 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2162 }
2163 return 0;
ed957684
JS
2164}
2165
e59058c4 2166/**
3621a710 2167 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
2168 * @phba: Pointer to HBA context object.
2169 * @qno: HBQ number.
2170 *
2171 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
2172 * is called with no lock held. The function returns the number of HBQ entries
2173 * successfully allocated.
e59058c4 2174 **/
92d7f7b0
JS
2175int
2176lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 2177{
def9c7a9
JS
2178 if (phba->sli_rev == LPFC_SLI_REV4)
2179 return 0;
2180 else
2181 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2182 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 2183}
ed957684 2184
e59058c4 2185/**
3621a710 2186 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
2187 * @phba: Pointer to HBA context object.
2188 * @qno: HBQ queue number.
2189 *
2190 * This function is called from SLI initialization code path with
2191 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2192 * function returns the number of HBQ entries successfully allocated.
e59058c4 2193 **/
a6ababd2 2194static int
92d7f7b0
JS
2195lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2196{
def9c7a9
JS
2197 if (phba->sli_rev == LPFC_SLI_REV4)
2198 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2199 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2200 else
2201 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2202 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2203}
2204
3772a991
JS
2205/**
2206 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2207 * @phba: Pointer to HBA context object.
2208 * @hbqno: HBQ number.
2209 *
2210 * This function removes the first hbq buffer on an hbq list and returns a
2211 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2212 **/
2213static struct hbq_dmabuf *
2214lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2215{
2216 struct lpfc_dmabuf *d_buf;
2217
2218 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2219 if (!d_buf)
2220 return NULL;
2221 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2222}
2223
2d7dbc4c
JS
2224/**
2225 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2226 * @phba: Pointer to HBA context object.
2227 * @hbqno: HBQ number.
2228 *
2229 * This function removes the first RQ buffer on an RQ buffer list and returns a
2230 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2231 **/
2232static struct rqb_dmabuf *
2233lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2234{
2235 struct lpfc_dmabuf *h_buf;
2236 struct lpfc_rqb *rqbp;
2237
2238 rqbp = hrq->rqbp;
2239 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2240 struct lpfc_dmabuf, list);
2241 if (!h_buf)
2242 return NULL;
2243 rqbp->buffer_count--;
2244 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2245}
2246
e59058c4 2247/**
3621a710 2248 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2249 * @phba: Pointer to HBA context object.
2250 * @tag: Tag of the hbq buffer.
2251 *
71892418
SH
2252 * This function searches for the hbq buffer associated with the given tag in
2253 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2254 * otherwise it returns NULL.
e59058c4 2255 **/
a6ababd2 2256static struct hbq_dmabuf *
92d7f7b0 2257lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2258{
92d7f7b0
JS
2259 struct lpfc_dmabuf *d_buf;
2260 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2261 uint32_t hbqno;
2262
2263 hbqno = tag >> 16;
a0a74e45 2264 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2265 return NULL;
ed957684 2266
3772a991 2267 spin_lock_irq(&phba->hbalock);
51ef4c26 2268 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2269 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2270 if (hbq_buf->tag == tag) {
3772a991 2271 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2272 return hbq_buf;
ed957684
JS
2273 }
2274 }
3772a991 2275 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2276 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2277 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2278 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2279 return NULL;
ed957684
JS
2280}
2281
e59058c4 2282/**
3621a710 2283 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2284 * @phba: Pointer to HBA context object.
2285 * @hbq_buffer: Pointer to HBQ buffer.
2286 *
2287 * This function is called with hbalock. This function gives back
2288 * the hbq buffer to firmware. If the HBQ does not have space to
2289 * post the buffer, it will free the buffer.
2290 **/
ed957684 2291void
51ef4c26 2292lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2293{
2294 uint32_t hbqno;
2295
51ef4c26
JS
2296 if (hbq_buffer) {
2297 hbqno = hbq_buffer->tag >> 16;
3772a991 2298 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2299 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2300 }
2301}
2302
e59058c4 2303/**
3621a710 2304 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2305 * @mbxCommand: mailbox command code.
2306 *
2307 * This function is called by the mailbox event handler function to verify
2308 * that the completed mailbox command is a legitimate mailbox command. If the
2309 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2310 * and the mailbox event handler will take the HBA offline.
2311 **/
dea3101e 2312static int
2313lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2314{
2315 uint8_t ret;
2316
2317 switch (mbxCommand) {
2318 case MBX_LOAD_SM:
2319 case MBX_READ_NV:
2320 case MBX_WRITE_NV:
a8adb832 2321 case MBX_WRITE_VPARMS:
dea3101e 2322 case MBX_RUN_BIU_DIAG:
2323 case MBX_INIT_LINK:
2324 case MBX_DOWN_LINK:
2325 case MBX_CONFIG_LINK:
2326 case MBX_CONFIG_RING:
2327 case MBX_RESET_RING:
2328 case MBX_READ_CONFIG:
2329 case MBX_READ_RCONFIG:
2330 case MBX_READ_SPARM:
2331 case MBX_READ_STATUS:
2332 case MBX_READ_RPI:
2333 case MBX_READ_XRI:
2334 case MBX_READ_REV:
2335 case MBX_READ_LNK_STAT:
2336 case MBX_REG_LOGIN:
2337 case MBX_UNREG_LOGIN:
dea3101e 2338 case MBX_CLEAR_LA:
2339 case MBX_DUMP_MEMORY:
2340 case MBX_DUMP_CONTEXT:
2341 case MBX_RUN_DIAGS:
2342 case MBX_RESTART:
2343 case MBX_UPDATE_CFG:
2344 case MBX_DOWN_LOAD:
2345 case MBX_DEL_LD_ENTRY:
2346 case MBX_RUN_PROGRAM:
2347 case MBX_SET_MASK:
09372820 2348 case MBX_SET_VARIABLE:
dea3101e 2349 case MBX_UNREG_D_ID:
41415862 2350 case MBX_KILL_BOARD:
dea3101e 2351 case MBX_CONFIG_FARP:
41415862 2352 case MBX_BEACON:
dea3101e 2353 case MBX_LOAD_AREA:
2354 case MBX_RUN_BIU_DIAG64:
2355 case MBX_CONFIG_PORT:
2356 case MBX_READ_SPARM64:
2357 case MBX_READ_RPI64:
2358 case MBX_REG_LOGIN64:
76a95d75 2359 case MBX_READ_TOPOLOGY:
09372820 2360 case MBX_WRITE_WWN:
dea3101e 2361 case MBX_SET_DEBUG:
2362 case MBX_LOAD_EXP_ROM:
57127f15 2363 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2364 case MBX_REG_VPI:
2365 case MBX_UNREG_VPI:
858c9f6c 2366 case MBX_HEARTBEAT:
84774a4d
JS
2367 case MBX_PORT_CAPABILITIES:
2368 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2369 case MBX_SLI4_CONFIG:
2370 case MBX_SLI4_REQ_FTRS:
2371 case MBX_REG_FCFI:
2372 case MBX_UNREG_FCFI:
2373 case MBX_REG_VFI:
2374 case MBX_UNREG_VFI:
2375 case MBX_INIT_VPI:
2376 case MBX_INIT_VFI:
2377 case MBX_RESUME_RPI:
c7495937
JS
2378 case MBX_READ_EVENT_LOG_STATUS:
2379 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2380 case MBX_SECURITY_MGMT:
2381 case MBX_AUTH_PORT:
940eb687 2382 case MBX_ACCESS_VDATA:
dea3101e 2383 ret = mbxCommand;
2384 break;
2385 default:
2386 ret = MBX_SHUTDOWN;
2387 break;
2388 }
2e0fef85 2389 return ret;
dea3101e 2390}
e59058c4
JS
2391
2392/**
3621a710 2393 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2394 * @phba: Pointer to HBA context object.
2395 * @pmboxq: Pointer to mailbox command.
2396 *
2397 * This is completion handler function for mailbox commands issued from
2398 * lpfc_sli_issue_mbox_wait function. This function is called by the
2399 * mailbox event handler function with no lock held. This function
2400 * will wake up thread waiting on the wait queue pointed by context1
2401 * of the mailbox.
2402 **/
04c68496 2403void
2e0fef85 2404lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2405{
858c9f6c 2406 unsigned long drvr_flag;
e29d74f8 2407 struct completion *pmbox_done;
dea3101e 2408
2409 /*
e29d74f8 2410 * If pmbox_done is empty, the driver thread gave up waiting and
dea3101e 2411 * continued running.
2412 */
7054a606 2413 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2414 spin_lock_irqsave(&phba->hbalock, drvr_flag);
e29d74f8
JS
2415 pmbox_done = (struct completion *)pmboxq->context3;
2416 if (pmbox_done)
2417 complete(pmbox_done);
858c9f6c 2418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2419 return;
2420}
2421
e59058c4
JS
2422
2423/**
3621a710 2424 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2425 * @phba: Pointer to HBA context object.
2426 * @pmb: Pointer to mailbox object.
2427 *
2428 * This function is the default mailbox completion handler. It
2429 * frees the memory resources associated with the completed mailbox
2430 * command. If the completed command is a REG_LOGIN mailbox command,
2431 * this function will issue a UREG_LOGIN to re-claim the RPI.
2432 **/
dea3101e 2433void
2e0fef85 2434lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2435{
d439d286 2436 struct lpfc_vport *vport = pmb->vport;
dea3101e 2437 struct lpfc_dmabuf *mp;
d439d286 2438 struct lpfc_nodelist *ndlp;
5af5eee7 2439 struct Scsi_Host *shost;
04c68496 2440 uint16_t rpi, vpi;
7054a606
JS
2441 int rc;
2442
3e1f0718 2443 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
7054a606 2444
dea3101e 2445 if (mp) {
2446 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2447 kfree(mp);
2448 }
7054a606
JS
2449
2450 /*
2451 * If a REG_LOGIN succeeded after node is destroyed or node
2452 * is in re-discovery driver need to cleanup the RPI.
2453 */
2e0fef85 2454 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2455 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2456 !pmb->u.mb.mbxStatus) {
2457 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2458 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2459 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2460 pmb->vport = vport;
92d7f7b0 2461 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2462 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2463 if (rc != MBX_NOT_FINISHED)
2464 return;
2465 }
2466
695a814e
JS
2467 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2468 !(phba->pport->load_flag & FC_UNLOADING) &&
2469 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2470 shost = lpfc_shost_from_vport(vport);
2471 spin_lock_irq(shost->host_lock);
2472 vport->vpi_state |= LPFC_VPI_REGISTERED;
2473 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2474 spin_unlock_irq(shost->host_lock);
695a814e
JS
2475 }
2476
d439d286 2477 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 2478 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
d439d286 2479 lpfc_nlp_put(ndlp);
dea16bda
JS
2480 pmb->ctx_buf = NULL;
2481 pmb->ctx_ndlp = NULL;
2482 }
2483
2484 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2486
2487 /* Check to see if there are any deferred events to process */
2488 if (ndlp) {
2489 lpfc_printf_vlog(
2490 vport,
2491 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2492 "1438 UNREG cmpl deferred mbox x%x "
2493 "on NPort x%x Data: x%x x%x %p\n",
2494 ndlp->nlp_rpi, ndlp->nlp_DID,
2495 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2496
2497 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2498 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
00292e03 2499 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2500 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2501 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
00292e03
JS
2502 } else {
2503 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda 2504 }
9b164068 2505 pmb->ctx_ndlp = NULL;
dea16bda 2506 }
d439d286
JS
2507 }
2508
dcf2a4e0
JS
2509 /* Check security permission status on INIT_LINK mailbox command */
2510 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2511 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2512 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2513 "2860 SLI authentication is required "
2514 "for INIT_LINK but has not done yet\n");
2515
04c68496
JS
2516 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2517 lpfc_sli4_mbox_cmd_free(phba, pmb);
2518 else
2519 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2520}
be6bb941
JS
2521 /**
2522 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2523 * @phba: Pointer to HBA context object.
2524 * @pmb: Pointer to mailbox object.
2525 *
2526 * This function is the unreg rpi mailbox completion handler. It
2527 * frees the memory resources associated with the completed mailbox
2528 * command. An additional refrenece is put on the ndlp to prevent
2529 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2530 * the unreg mailbox command completes, this routine puts the
2531 * reference back.
2532 *
2533 **/
2534void
2535lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2536{
2537 struct lpfc_vport *vport = pmb->vport;
2538 struct lpfc_nodelist *ndlp;
2539
3e1f0718 2540 ndlp = pmb->ctx_ndlp;
be6bb941
JS
2541 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2542 if (phba->sli_rev == LPFC_SLI_REV4 &&
2543 (bf_get(lpfc_sli_intf_if_type,
27d6ac0a 2544 &phba->sli4_hba.sli_intf) >=
be6bb941
JS
2545 LPFC_SLI_INTF_IF_TYPE_2)) {
2546 if (ndlp) {
dea16bda
JS
2547 lpfc_printf_vlog(
2548 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2549 "0010 UNREG_LOGIN vpi:%x "
2550 "rpi:%x DID:%x defer x%x flg x%x "
2551 "map:%x %p\n",
2552 vport->vpi, ndlp->nlp_rpi,
2553 ndlp->nlp_DID, ndlp->nlp_defer_did,
2554 ndlp->nlp_flag,
2555 ndlp->nlp_usg_map, ndlp);
7c5e518c 2556 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941 2557 lpfc_nlp_put(ndlp);
dea16bda
JS
2558
2559 /* Check to see if there are any deferred
2560 * events to process
2561 */
2562 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2563 (ndlp->nlp_defer_did !=
2564 NLP_EVT_NOTHING_PENDING)) {
2565 lpfc_printf_vlog(
2566 vport, KERN_INFO, LOG_DISCOVERY,
2567 "4111 UNREG cmpl deferred "
2568 "clr x%x on "
2569 "NPort x%x Data: x%x %p\n",
2570 ndlp->nlp_rpi, ndlp->nlp_DID,
2571 ndlp->nlp_defer_did, ndlp);
00292e03 2572 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2573 ndlp->nlp_defer_did =
2574 NLP_EVT_NOTHING_PENDING;
2575 lpfc_issue_els_plogi(
2576 vport, ndlp->nlp_DID, 0);
00292e03
JS
2577 } else {
2578 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda 2579 }
be6bb941
JS
2580 }
2581 }
2582 }
2583
2584 mempool_free(pmb, phba->mbox_mem_pool);
2585}
dea3101e 2586
e59058c4 2587/**
3621a710 2588 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2589 * @phba: Pointer to HBA context object.
2590 *
2591 * This function is called with no lock held. This function processes all
2592 * the completed mailbox commands and gives it to upper layers. The interrupt
2593 * service routine processes mailbox completion interrupt and adds completed
2594 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2595 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2596 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2597 * function returns the mailbox commands to the upper layer by calling the
2598 * completion handler function of each mailbox.
2599 **/
dea3101e 2600int
2e0fef85 2601lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2602{
92d7f7b0 2603 MAILBOX_t *pmbox;
dea3101e 2604 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2605 int rc;
2606 LIST_HEAD(cmplq);
dea3101e 2607
2608 phba->sli.slistat.mbox_event++;
2609
92d7f7b0
JS
2610 /* Get all completed mailboxe buffers into the cmplq */
2611 spin_lock_irq(&phba->hbalock);
2612 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2613 spin_unlock_irq(&phba->hbalock);
dea3101e 2614
92d7f7b0
JS
2615 /* Get a Mailbox buffer to setup mailbox commands for callback */
2616 do {
2617 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2618 if (pmb == NULL)
2619 break;
2e0fef85 2620
04c68496 2621 pmbox = &pmb->u.mb;
dea3101e 2622
858c9f6c
JS
2623 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2624 if (pmb->vport) {
2625 lpfc_debugfs_disc_trc(pmb->vport,
2626 LPFC_DISC_TRC_MBOX_VPORT,
2627 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2628 (uint32_t)pmbox->mbxCommand,
2629 pmbox->un.varWords[0],
2630 pmbox->un.varWords[1]);
2631 }
2632 else {
2633 lpfc_debugfs_disc_trc(phba->pport,
2634 LPFC_DISC_TRC_MBOX,
2635 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2636 (uint32_t)pmbox->mbxCommand,
2637 pmbox->un.varWords[0],
2638 pmbox->un.varWords[1]);
2639 }
2640 }
2641
dea3101e 2642 /*
2643 * It is a fatal error if unknown mbox command completion.
2644 */
2645 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2646 MBX_SHUTDOWN) {
af901ca1 2647 /* Unknown mailbox command compl */
92d7f7b0 2648 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2649 "(%d):0323 Unknown Mailbox command "
a183a15f 2650 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2651 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2652 pmbox->mbxCommand,
a183a15f
JS
2653 lpfc_sli_config_mbox_subsys_get(phba,
2654 pmb),
2655 lpfc_sli_config_mbox_opcode_get(phba,
2656 pmb));
2e0fef85 2657 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2658 phba->work_hs = HS_FFER3;
2659 lpfc_handle_eratt(phba);
92d7f7b0 2660 continue;
dea3101e 2661 }
2662
dea3101e 2663 if (pmbox->mbxStatus) {
2664 phba->sli.slistat.mbox_stat_err++;
2665 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2666 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2667 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2668 LOG_MBOX | LOG_SLI,
2669 "(%d):0305 Mbox cmd cmpl "
2670 "error - RETRYing Data: x%x "
2671 "(x%x/x%x) x%x x%x x%x\n",
2672 pmb->vport ? pmb->vport->vpi : 0,
2673 pmbox->mbxCommand,
2674 lpfc_sli_config_mbox_subsys_get(phba,
2675 pmb),
2676 lpfc_sli_config_mbox_opcode_get(phba,
2677 pmb),
2678 pmbox->mbxStatus,
2679 pmbox->un.varWords[0],
2680 pmb->vport->port_state);
dea3101e 2681 pmbox->mbxStatus = 0;
2682 pmbox->mbxOwner = OWN_HOST;
dea3101e 2683 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2684 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2685 continue;
dea3101e 2686 }
2687 }
2688
2689 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2690 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2691 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
e74c03c8
JS
2692 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2693 "x%x x%x x%x\n",
92d7f7b0 2694 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2695 pmbox->mbxCommand,
a183a15f
JS
2696 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2697 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2698 pmb->mbox_cmpl,
2699 *((uint32_t *) pmbox),
2700 pmbox->un.varWords[0],
2701 pmbox->un.varWords[1],
2702 pmbox->un.varWords[2],
2703 pmbox->un.varWords[3],
2704 pmbox->un.varWords[4],
2705 pmbox->un.varWords[5],
2706 pmbox->un.varWords[6],
e74c03c8
JS
2707 pmbox->un.varWords[7],
2708 pmbox->un.varWords[8],
2709 pmbox->un.varWords[9],
2710 pmbox->un.varWords[10]);
dea3101e 2711
92d7f7b0 2712 if (pmb->mbox_cmpl)
dea3101e 2713 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2714 } while (1);
2715 return 0;
2716}
dea3101e 2717
e59058c4 2718/**
3621a710 2719 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2720 * @phba: Pointer to HBA context object.
2721 * @pring: Pointer to driver SLI ring object.
2722 * @tag: buffer tag.
2723 *
2724 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2725 * is set in the tag the buffer is posted for a particular exchange,
2726 * the function will return the buffer without replacing the buffer.
2727 * If the buffer is for unsolicited ELS or CT traffic, this function
2728 * returns the buffer and also posts another buffer to the firmware.
2729 **/
76bb24ef
JS
2730static struct lpfc_dmabuf *
2731lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2732 struct lpfc_sli_ring *pring,
2733 uint32_t tag)
76bb24ef 2734{
9f1e1b50
JS
2735 struct hbq_dmabuf *hbq_entry;
2736
76bb24ef
JS
2737 if (tag & QUE_BUFTAG_BIT)
2738 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2739 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2740 if (!hbq_entry)
2741 return NULL;
2742 return &hbq_entry->dbuf;
76bb24ef 2743}
57127f15 2744
3772a991
JS
2745/**
2746 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2747 * @phba: Pointer to HBA context object.
2748 * @pring: Pointer to driver SLI ring object.
2749 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2750 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2751 * @fch_type: the type for the first frame of the sequence.
2752 *
2753 * This function is called with no lock held. This function uses the r_ctl and
2754 * type of the received sequence to find the correct callback function to call
2755 * to process the sequence.
2756 **/
2757static int
2758lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2759 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2760 uint32_t fch_type)
2761{
2762 int i;
2763
f358dd0c
JS
2764 switch (fch_type) {
2765 case FC_TYPE_NVME:
d613b6a7 2766 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
f358dd0c
JS
2767 return 1;
2768 default:
2769 break;
2770 }
2771
3772a991
JS
2772 /* unSolicited Responses */
2773 if (pring->prt[0].profile) {
2774 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2775 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2776 saveq);
2777 return 1;
2778 }
2779 /* We must search, based on rctl / type
2780 for the right routine */
2781 for (i = 0; i < pring->num_mask; i++) {
2782 if ((pring->prt[i].rctl == fch_r_ctl) &&
2783 (pring->prt[i].type == fch_type)) {
2784 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2785 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2786 (phba, pring, saveq);
2787 return 1;
2788 }
2789 }
2790 return 0;
2791}
e59058c4
JS
2792
2793/**
3621a710 2794 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2795 * @phba: Pointer to HBA context object.
2796 * @pring: Pointer to driver SLI ring object.
2797 * @saveq: Pointer to the unsolicited iocb.
2798 *
2799 * This function is called with no lock held by the ring event handler
2800 * when there is an unsolicited iocb posted to the response ring by the
2801 * firmware. This function gets the buffer associated with the iocbs
2802 * and calls the event handler for the ring. This function handles both
2803 * qring buffers and hbq buffers.
2804 * When the function returns 1 the caller can free the iocb object otherwise
2805 * upper layer functions will free the iocb objects.
2806 **/
dea3101e 2807static int
2808lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2809 struct lpfc_iocbq *saveq)
2810{
2811 IOCB_t * irsp;
2812 WORD5 * w5p;
2813 uint32_t Rctl, Type;
76bb24ef 2814 struct lpfc_iocbq *iocbq;
3163f725 2815 struct lpfc_dmabuf *dmzbuf;
dea3101e 2816
dea3101e 2817 irsp = &(saveq->iocb);
57127f15
JS
2818
2819 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2820 if (pring->lpfc_sli_rcv_async_status)
2821 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2822 else
2823 lpfc_printf_log(phba,
2824 KERN_WARNING,
2825 LOG_SLI,
2826 "0316 Ring %d handler: unexpected "
2827 "ASYNC_STATUS iocb received evt_code "
2828 "0x%x\n",
2829 pring->ringno,
2830 irsp->un.asyncstat.evt_code);
2831 return 1;
2832 }
2833
3163f725
JS
2834 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2835 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2836 if (irsp->ulpBdeCount > 0) {
2837 dmzbuf = lpfc_sli_get_buff(phba, pring,
2838 irsp->un.ulpWord[3]);
2839 lpfc_in_buf_free(phba, dmzbuf);
2840 }
2841
2842 if (irsp->ulpBdeCount > 1) {
2843 dmzbuf = lpfc_sli_get_buff(phba, pring,
2844 irsp->unsli3.sli3Words[3]);
2845 lpfc_in_buf_free(phba, dmzbuf);
2846 }
2847
2848 if (irsp->ulpBdeCount > 2) {
2849 dmzbuf = lpfc_sli_get_buff(phba, pring,
2850 irsp->unsli3.sli3Words[7]);
2851 lpfc_in_buf_free(phba, dmzbuf);
2852 }
2853
2854 return 1;
2855 }
2856
92d7f7b0 2857 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2858 if (irsp->ulpBdeCount != 0) {
2859 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2860 irsp->un.ulpWord[3]);
2861 if (!saveq->context2)
2862 lpfc_printf_log(phba,
2863 KERN_ERR,
2864 LOG_SLI,
2865 "0341 Ring %d Cannot find buffer for "
2866 "an unsolicited iocb. tag 0x%x\n",
2867 pring->ringno,
2868 irsp->un.ulpWord[3]);
76bb24ef
JS
2869 }
2870 if (irsp->ulpBdeCount == 2) {
2871 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2872 irsp->unsli3.sli3Words[7]);
2873 if (!saveq->context3)
2874 lpfc_printf_log(phba,
2875 KERN_ERR,
2876 LOG_SLI,
2877 "0342 Ring %d Cannot find buffer for an"
2878 " unsolicited iocb. tag 0x%x\n",
2879 pring->ringno,
2880 irsp->unsli3.sli3Words[7]);
2881 }
2882 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2883 irsp = &(iocbq->iocb);
76bb24ef
JS
2884 if (irsp->ulpBdeCount != 0) {
2885 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2886 irsp->un.ulpWord[3]);
9c2face6 2887 if (!iocbq->context2)
76bb24ef
JS
2888 lpfc_printf_log(phba,
2889 KERN_ERR,
2890 LOG_SLI,
2891 "0343 Ring %d Cannot find "
2892 "buffer for an unsolicited iocb"
2893 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2894 irsp->un.ulpWord[3]);
76bb24ef
JS
2895 }
2896 if (irsp->ulpBdeCount == 2) {
2897 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2898 irsp->unsli3.sli3Words[7]);
9c2face6 2899 if (!iocbq->context3)
76bb24ef
JS
2900 lpfc_printf_log(phba,
2901 KERN_ERR,
2902 LOG_SLI,
2903 "0344 Ring %d Cannot find "
2904 "buffer for an unsolicited "
2905 "iocb. tag 0x%x\n",
2906 pring->ringno,
2907 irsp->unsli3.sli3Words[7]);
2908 }
2909 }
92d7f7b0 2910 }
9c2face6
JS
2911 if (irsp->ulpBdeCount != 0 &&
2912 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2913 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2914 int found = 0;
2915
2916 /* search continue save q for same XRI */
2917 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2918 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2919 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2920 list_add_tail(&saveq->list, &iocbq->list);
2921 found = 1;
2922 break;
2923 }
2924 }
2925 if (!found)
2926 list_add_tail(&saveq->clist,
2927 &pring->iocb_continue_saveq);
2928 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2929 list_del_init(&iocbq->clist);
2930 saveq = iocbq;
2931 irsp = &(saveq->iocb);
2932 } else
2933 return 0;
2934 }
2935 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2936 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2937 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2938 Rctl = FC_RCTL_ELS_REQ;
2939 Type = FC_TYPE_ELS;
9c2face6
JS
2940 } else {
2941 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2942 Rctl = w5p->hcsw.Rctl;
2943 Type = w5p->hcsw.Type;
2944
2945 /* Firmware Workaround */
2946 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2947 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2948 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2949 Rctl = FC_RCTL_ELS_REQ;
2950 Type = FC_TYPE_ELS;
9c2face6
JS
2951 w5p->hcsw.Rctl = Rctl;
2952 w5p->hcsw.Type = Type;
2953 }
2954 }
92d7f7b0 2955
3772a991 2956 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2957 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2958 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2959 "Type x%x received\n",
e8b62011 2960 pring->ringno, Rctl, Type);
3772a991 2961
92d7f7b0 2962 return 1;
dea3101e 2963}
2964
e59058c4 2965/**
3621a710 2966 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2967 * @phba: Pointer to HBA context object.
2968 * @pring: Pointer to driver SLI ring object.
2969 * @prspiocb: Pointer to response iocb object.
2970 *
2971 * This function looks up the iocb_lookup table to get the command iocb
2972 * corresponding to the given response iocb using the iotag of the
341b2aa8
DK
2973 * response iocb. This function is called with the hbalock held
2974 * for sli3 devices or the ring_lock for sli4 devices.
e59058c4
JS
2975 * This function returns the command iocb object if it finds the command
2976 * iocb else returns NULL.
2977 **/
dea3101e 2978static struct lpfc_iocbq *
2e0fef85
JS
2979lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2980 struct lpfc_sli_ring *pring,
2981 struct lpfc_iocbq *prspiocb)
dea3101e 2982{
dea3101e 2983 struct lpfc_iocbq *cmd_iocb = NULL;
2984 uint16_t iotag;
1c2ba475 2985 lockdep_assert_held(&phba->hbalock);
dea3101e 2986
604a3e30
JB
2987 iotag = prspiocb->iocb.ulpIoTag;
2988
2989 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2990 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 2991 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
2992 /* remove from txcmpl queue list */
2993 list_del_init(&cmd_iocb->list);
4f2e66c6 2994 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 2995 pring->txcmplq_cnt--;
89533e9b 2996 return cmd_iocb;
2a9bf3d0 2997 }
dea3101e 2998 }
2999
dea3101e 3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 3001 "0317 iotag x%x is out of "
604a3e30 3002 "range: max iotag x%x wd0 x%x\n",
e8b62011 3003 iotag, phba->sli.last_iotag,
604a3e30 3004 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 3005 return NULL;
3006}
3007
3772a991
JS
3008/**
3009 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3010 * @phba: Pointer to HBA context object.
3011 * @pring: Pointer to driver SLI ring object.
3012 * @iotag: IOCB tag.
3013 *
3014 * This function looks up the iocb_lookup table to get the command iocb
3015 * corresponding to the given iotag. This function is called with the
3016 * hbalock held.
3017 * This function returns the command iocb object if it finds the command
3018 * iocb else returns NULL.
3019 **/
3020static struct lpfc_iocbq *
3021lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3022 struct lpfc_sli_ring *pring, uint16_t iotag)
3023{
895427bd 3024 struct lpfc_iocbq *cmd_iocb = NULL;
3772a991 3025
1c2ba475 3026 lockdep_assert_held(&phba->hbalock);
3772a991
JS
3027 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3028 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
3029 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3030 /* remove from txcmpl queue list */
3031 list_del_init(&cmd_iocb->list);
3032 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 3033 pring->txcmplq_cnt--;
4f2e66c6 3034 return cmd_iocb;
2a9bf3d0 3035 }
3772a991 3036 }
89533e9b 3037
3772a991 3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
3039 "0372 iotag x%x lookup error: max iotag (x%x) "
3040 "iocb_flag x%x\n",
3041 iotag, phba->sli.last_iotag,
3042 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
3043 return NULL;
3044}
3045
e59058c4 3046/**
3621a710 3047 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
3048 * @phba: Pointer to HBA context object.
3049 * @pring: Pointer to driver SLI ring object.
3050 * @saveq: Pointer to the response iocb to be processed.
3051 *
3052 * This function is called by the ring event handler for non-fcp
3053 * rings when there is a new response iocb in the response ring.
3054 * The caller is not required to hold any locks. This function
3055 * gets the command iocb associated with the response iocb and
3056 * calls the completion handler for the command iocb. If there
3057 * is no completion handler, the function will free the resources
3058 * associated with command iocb. If the response iocb is for
3059 * an already aborted command iocb, the status of the completion
3060 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3061 * This function always returns 1.
3062 **/
dea3101e 3063static int
2e0fef85 3064lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 3065 struct lpfc_iocbq *saveq)
3066{
2e0fef85 3067 struct lpfc_iocbq *cmdiocbp;
dea3101e 3068 int rc = 1;
3069 unsigned long iflag;
3070
3071 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
341b2aa8
DK
3072 if (phba->sli_rev == LPFC_SLI_REV4)
3073 spin_lock_irqsave(&pring->ring_lock, iflag);
3074 else
3075 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 3076 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
341b2aa8
DK
3077 if (phba->sli_rev == LPFC_SLI_REV4)
3078 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3079 else
3080 spin_unlock_irqrestore(&phba->hbalock, iflag);
2e0fef85 3081
dea3101e 3082 if (cmdiocbp) {
3083 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
3084 /*
3085 * If an ELS command failed send an event to mgmt
3086 * application.
3087 */
3088 if (saveq->iocb.ulpStatus &&
3089 (pring->ringno == LPFC_ELS_RING) &&
3090 (cmdiocbp->iocb.ulpCommand ==
3091 CMD_ELS_REQUEST64_CR))
3092 lpfc_send_els_failure_event(phba,
3093 cmdiocbp, saveq);
3094
dea3101e 3095 /*
3096 * Post all ELS completions to the worker thread.
3097 * All other are passed to the completion callback.
3098 */
3099 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
3100 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3101 (cmdiocbp->iocb_flag &
3102 LPFC_DRIVER_ABORTED)) {
3103 spin_lock_irqsave(&phba->hbalock,
3104 iflag);
07951076
JS
3105 cmdiocbp->iocb_flag &=
3106 ~LPFC_DRIVER_ABORTED;
341af102
JS
3107 spin_unlock_irqrestore(&phba->hbalock,
3108 iflag);
07951076
JS
3109 saveq->iocb.ulpStatus =
3110 IOSTAT_LOCAL_REJECT;
3111 saveq->iocb.un.ulpWord[4] =
3112 IOERR_SLI_ABORTED;
0ff10d46
JS
3113
3114 /* Firmware could still be in progress
3115 * of DMAing payload, so don't free data
3116 * buffer till after a hbeat.
3117 */
341af102
JS
3118 spin_lock_irqsave(&phba->hbalock,
3119 iflag);
0ff10d46 3120 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
3121 spin_unlock_irqrestore(&phba->hbalock,
3122 iflag);
3123 }
0f65ff68
JS
3124 if (phba->sli_rev == LPFC_SLI_REV4) {
3125 if (saveq->iocb_flag &
3126 LPFC_EXCHANGE_BUSY) {
3127 /* Set cmdiocb flag for the
3128 * exchange busy so sgl (xri)
3129 * will not be released until
3130 * the abort xri is received
3131 * from hba.
3132 */
3133 spin_lock_irqsave(
3134 &phba->hbalock, iflag);
3135 cmdiocbp->iocb_flag |=
3136 LPFC_EXCHANGE_BUSY;
3137 spin_unlock_irqrestore(
3138 &phba->hbalock, iflag);
3139 }
3140 if (cmdiocbp->iocb_flag &
3141 LPFC_DRIVER_ABORTED) {
3142 /*
3143 * Clear LPFC_DRIVER_ABORTED
3144 * bit in case it was driver
3145 * initiated abort.
3146 */
3147 spin_lock_irqsave(
3148 &phba->hbalock, iflag);
3149 cmdiocbp->iocb_flag &=
3150 ~LPFC_DRIVER_ABORTED;
3151 spin_unlock_irqrestore(
3152 &phba->hbalock, iflag);
3153 cmdiocbp->iocb.ulpStatus =
3154 IOSTAT_LOCAL_REJECT;
3155 cmdiocbp->iocb.un.ulpWord[4] =
3156 IOERR_ABORT_REQUESTED;
3157 /*
3158 * For SLI4, irsiocb contains
3159 * NO_XRI in sli_xritag, it
3160 * shall not affect releasing
3161 * sgl (xri) process.
3162 */
3163 saveq->iocb.ulpStatus =
3164 IOSTAT_LOCAL_REJECT;
3165 saveq->iocb.un.ulpWord[4] =
3166 IOERR_SLI_ABORTED;
3167 spin_lock_irqsave(
3168 &phba->hbalock, iflag);
3169 saveq->iocb_flag |=
3170 LPFC_DELAY_MEM_FREE;
3171 spin_unlock_irqrestore(
3172 &phba->hbalock, iflag);
3173 }
07951076 3174 }
dea3101e 3175 }
2e0fef85 3176 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
3177 } else
3178 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 3179 } else {
3180 /*
3181 * Unknown initiating command based on the response iotag.
3182 * This could be the case on the ELS ring because of
3183 * lpfc_els_abort().
3184 */
3185 if (pring->ringno != LPFC_ELS_RING) {
3186 /*
3187 * Ring <ringno> handler: unexpected completion IoTag
3188 * <IoTag>
3189 */
a257bf90 3190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
3191 "0322 Ring %d handler: "
3192 "unexpected completion IoTag x%x "
3193 "Data: x%x x%x x%x x%x\n",
3194 pring->ringno,
3195 saveq->iocb.ulpIoTag,
3196 saveq->iocb.ulpStatus,
3197 saveq->iocb.un.ulpWord[4],
3198 saveq->iocb.ulpCommand,
3199 saveq->iocb.ulpContext);
dea3101e 3200 }
3201 }
68876920 3202
dea3101e 3203 return rc;
3204}
3205
e59058c4 3206/**
3621a710 3207 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
3208 * @phba: Pointer to HBA context object.
3209 * @pring: Pointer to driver SLI ring object.
3210 *
3211 * This function is called from the iocb ring event handlers when
3212 * put pointer is ahead of the get pointer for a ring. This function signal
3213 * an error attention condition to the worker thread and the worker
3214 * thread will transition the HBA to offline state.
3215 **/
2e0fef85
JS
3216static void
3217lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 3218{
34b02dcd 3219 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 3220 /*
025dfdaf 3221 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
3222 * rsp ring <portRspMax>
3223 */
3224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3225 "0312 Ring %d handler: portRspPut %d "
025dfdaf 3226 "is bigger than rsp ring %d\n",
e8b62011 3227 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 3228 pring->sli.sli3.numRiocb);
875fbdfe 3229
2e0fef85 3230 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
3231
3232 /*
3233 * All error attention handlers are posted to
3234 * worker thread
3235 */
3236 phba->work_ha |= HA_ERATT;
3237 phba->work_hs = HS_FFER3;
92d7f7b0 3238
5e9d9b82 3239 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
3240
3241 return;
3242}
3243
9399627f 3244/**
3621a710 3245 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
3246 * @ptr: Pointer to address of HBA context object.
3247 *
3248 * This function is invoked by the Error Attention polling timer when the
3249 * timer times out. It will check the SLI Error Attention register for
3250 * possible attention events. If so, it will post an Error Attention event
3251 * and wake up worker thread to process it. Otherwise, it will set up the
3252 * Error Attention polling timer for the next poll.
3253 **/
f22eb4d3 3254void lpfc_poll_eratt(struct timer_list *t)
9399627f
JS
3255{
3256 struct lpfc_hba *phba;
eb016566 3257 uint32_t eratt = 0;
aa6fbb75 3258 uint64_t sli_intr, cnt;
9399627f 3259
f22eb4d3 3260 phba = from_timer(phba, t, eratt_poll);
9399627f 3261
aa6fbb75
JS
3262 /* Here we will also keep track of interrupts per sec of the hba */
3263 sli_intr = phba->sli.slistat.sli_intr;
3264
3265 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3266 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3267 sli_intr);
3268 else
3269 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3270
65791f1f
JS
3271 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3272 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3273 phba->sli.slistat.sli_ips = cnt;
3274
3275 phba->sli.slistat.sli_prev_intr = sli_intr;
3276
9399627f
JS
3277 /* Check chip HA register for error event */
3278 eratt = lpfc_sli_check_eratt(phba);
3279
3280 if (eratt)
3281 /* Tell the worker thread there is work to do */
3282 lpfc_worker_wake_up(phba);
3283 else
3284 /* Restart the timer for next eratt poll */
256ec0d0
JS
3285 mod_timer(&phba->eratt_poll,
3286 jiffies +
65791f1f 3287 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3288 return;
3289}
3290
875fbdfe 3291
e59058c4 3292/**
3621a710 3293 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3294 * @phba: Pointer to HBA context object.
3295 * @pring: Pointer to driver SLI ring object.
3296 * @mask: Host attention register mask for this ring.
3297 *
3298 * This function is called from the interrupt context when there is a ring
3299 * event for the fcp ring. The caller does not hold any lock.
3300 * The function processes each response iocb in the response ring until it
25985edc 3301 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3302 * LE bit set. The function will call the completion handler of the command iocb
3303 * if the response iocb indicates a completion for a command iocb or it is
3304 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3305 * function if this is an unsolicited iocb.
dea3101e 3306 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3307 * to check it explicitly.
3308 */
3309int
2e0fef85
JS
3310lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3311 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3312{
34b02dcd 3313 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3314 IOCB_t *irsp = NULL;
87f6eaff 3315 IOCB_t *entry = NULL;
dea3101e 3316 struct lpfc_iocbq *cmdiocbq = NULL;
3317 struct lpfc_iocbq rspiocbq;
dea3101e 3318 uint32_t status;
3319 uint32_t portRspPut, portRspMax;
3320 int rc = 1;
3321 lpfc_iocb_type type;
3322 unsigned long iflag;
3323 uint32_t rsp_cmpl = 0;
dea3101e 3324
2e0fef85 3325 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3326 pring->stats.iocb_event++;
3327
dea3101e 3328 /*
3329 * The next available response entry should never exceed the maximum
3330 * entries. If it does, treat it as an adapter hardware error.
3331 */
7e56aa25 3332 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3333 portRspPut = le32_to_cpu(pgp->rspPutInx);
3334 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3335 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3336 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3337 return 1;
3338 }
45ed1190
JS
3339 if (phba->fcp_ring_in_use) {
3340 spin_unlock_irqrestore(&phba->hbalock, iflag);
3341 return 1;
3342 } else
3343 phba->fcp_ring_in_use = 1;
dea3101e 3344
3345 rmb();
7e56aa25 3346 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3347 /*
3348 * Fetch an entry off the ring and copy it into a local data
3349 * structure. The copy involves a byte-swap since the
3350 * network byte order and pci byte orders are different.
3351 */
ed957684 3352 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3353 phba->last_completion_time = jiffies;
875fbdfe 3354
7e56aa25
JS
3355 if (++pring->sli.sli3.rspidx >= portRspMax)
3356 pring->sli.sli3.rspidx = 0;
875fbdfe 3357
87f6eaff
JSEC
3358 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3359 (uint32_t *) &rspiocbq.iocb,
ed957684 3360 phba->iocb_rsp_size);
a4bc3379 3361 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3362 irsp = &rspiocbq.iocb;
3363
dea3101e 3364 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3365 pring->stats.iocb_rsp++;
3366 rsp_cmpl++;
3367
3368 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3369 /*
3370 * If resource errors reported from HBA, reduce
3371 * queuedepths of the SCSI device.
3372 */
3373 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3374 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3375 IOERR_NO_RESOURCES)) {
92d7f7b0 3376 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3377 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3378 spin_lock_irqsave(&phba->hbalock, iflag);
3379 }
3380
dea3101e 3381 /* Rsp ring <ringno> error: IOCB */
3382 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3383 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3384 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3385 pring->ringno,
92d7f7b0
JS
3386 irsp->un.ulpWord[0],
3387 irsp->un.ulpWord[1],
3388 irsp->un.ulpWord[2],
3389 irsp->un.ulpWord[3],
3390 irsp->un.ulpWord[4],
3391 irsp->un.ulpWord[5],
d7c255b2
JS
3392 *(uint32_t *)&irsp->un1,
3393 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3394 }
3395
3396 switch (type) {
3397 case LPFC_ABORT_IOCB:
3398 case LPFC_SOL_IOCB:
3399 /*
3400 * Idle exchange closed via ABTS from port. No iocb
3401 * resources need to be recovered.
3402 */
3403 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3404 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3405 "0333 IOCB cmd 0x%x"
dca9479b 3406 " processed. Skipping"
92d7f7b0 3407 " completion\n",
dca9479b 3408 irsp->ulpCommand);
dea3101e 3409 break;
3410 }
3411
604a3e30
JB
3412 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3413 &rspiocbq);
0f65ff68
JS
3414 if (unlikely(!cmdiocbq))
3415 break;
3416 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3417 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3418 if (cmdiocbq->iocb_cmpl) {
3419 spin_unlock_irqrestore(&phba->hbalock, iflag);
3420 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3421 &rspiocbq);
3422 spin_lock_irqsave(&phba->hbalock, iflag);
3423 }
dea3101e 3424 break;
a4bc3379 3425 case LPFC_UNSOL_IOCB:
2e0fef85 3426 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3427 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3428 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3429 break;
dea3101e 3430 default:
3431 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3432 char adaptermsg[LPFC_MAX_ADPTMSG];
3433 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3434 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3435 MAX_MSG_DATA);
898eb71c
JP
3436 dev_warn(&((phba->pcidev)->dev),
3437 "lpfc%d: %s\n",
dea3101e 3438 phba->brd_no, adaptermsg);
3439 } else {
3440 /* Unknown IOCB command */
3441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3442 "0334 Unknown IOCB command "
92d7f7b0 3443 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3444 type, irsp->ulpCommand,
92d7f7b0
JS
3445 irsp->ulpStatus,
3446 irsp->ulpIoTag,
3447 irsp->ulpContext);
dea3101e 3448 }
3449 break;
3450 }
3451
3452 /*
3453 * The response IOCB has been processed. Update the ring
3454 * pointer in SLIM. If the port response put pointer has not
3455 * been updated, sync the pgp->rspPutInx and fetch the new port
3456 * response put pointer.
3457 */
7e56aa25
JS
3458 writel(pring->sli.sli3.rspidx,
3459 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3460
7e56aa25 3461 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3462 portRspPut = le32_to_cpu(pgp->rspPutInx);
3463 }
3464
3465 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3466 pring->stats.iocb_rsp_full++;
3467 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3468 writel(status, phba->CAregaddr);
3469 readl(phba->CAregaddr);
3470 }
3471 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3472 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3473 pring->stats.iocb_cmd_empty++;
3474
3475 /* Force update of the local copy of cmdGetInx */
7e56aa25 3476 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3477 lpfc_sli_resume_iocb(phba, pring);
3478
3479 if ((pring->lpfc_sli_cmd_available))
3480 (pring->lpfc_sli_cmd_available) (phba, pring);
3481
3482 }
3483
45ed1190 3484 phba->fcp_ring_in_use = 0;
2e0fef85 3485 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3486 return rc;
3487}
3488
e59058c4 3489/**
3772a991
JS
3490 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3491 * @phba: Pointer to HBA context object.
3492 * @pring: Pointer to driver SLI ring object.
3493 * @rspiocbp: Pointer to driver response IOCB object.
3494 *
3495 * This function is called from the worker thread when there is a slow-path
3496 * response IOCB to process. This function chains all the response iocbs until
3497 * seeing the iocb with the LE bit set. The function will call
3498 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3499 * completion of a command iocb. The function will call the
3500 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3501 * The function frees the resources or calls the completion handler if this
3502 * iocb is an abort completion. The function returns NULL when the response
3503 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3504 * this function shall chain the iocb on to the iocb_continueq and return the
3505 * response iocb passed in.
3506 **/
3507static struct lpfc_iocbq *
3508lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3509 struct lpfc_iocbq *rspiocbp)
3510{
3511 struct lpfc_iocbq *saveq;
3512 struct lpfc_iocbq *cmdiocbp;
3513 struct lpfc_iocbq *next_iocb;
3514 IOCB_t *irsp = NULL;
3515 uint32_t free_saveq;
3516 uint8_t iocb_cmd_type;
3517 lpfc_iocb_type type;
3518 unsigned long iflag;
3519 int rc;
3520
3521 spin_lock_irqsave(&phba->hbalock, iflag);
3522 /* First add the response iocb to the countinueq list */
3523 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3524 pring->iocb_continueq_cnt++;
3525
70f23fd6 3526 /* Now, determine whether the list is completed for processing */
3772a991
JS
3527 irsp = &rspiocbp->iocb;
3528 if (irsp->ulpLe) {
3529 /*
3530 * By default, the driver expects to free all resources
3531 * associated with this iocb completion.
3532 */
3533 free_saveq = 1;
3534 saveq = list_get_first(&pring->iocb_continueq,
3535 struct lpfc_iocbq, list);
3536 irsp = &(saveq->iocb);
3537 list_del_init(&pring->iocb_continueq);
3538 pring->iocb_continueq_cnt = 0;
3539
3540 pring->stats.iocb_rsp++;
3541
3542 /*
3543 * If resource errors reported from HBA, reduce
3544 * queuedepths of the SCSI device.
3545 */
3546 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3547 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3548 IOERR_NO_RESOURCES)) {
3772a991
JS
3549 spin_unlock_irqrestore(&phba->hbalock, iflag);
3550 phba->lpfc_rampdown_queue_depth(phba);
3551 spin_lock_irqsave(&phba->hbalock, iflag);
3552 }
3553
3554 if (irsp->ulpStatus) {
3555 /* Rsp ring <ringno> error: IOCB */
3556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3557 "0328 Rsp Ring %d error: "
3558 "IOCB Data: "
3559 "x%x x%x x%x x%x "
3560 "x%x x%x x%x x%x "
3561 "x%x x%x x%x x%x "
3562 "x%x x%x x%x x%x\n",
3563 pring->ringno,
3564 irsp->un.ulpWord[0],
3565 irsp->un.ulpWord[1],
3566 irsp->un.ulpWord[2],
3567 irsp->un.ulpWord[3],
3568 irsp->un.ulpWord[4],
3569 irsp->un.ulpWord[5],
3570 *(((uint32_t *) irsp) + 6),
3571 *(((uint32_t *) irsp) + 7),
3572 *(((uint32_t *) irsp) + 8),
3573 *(((uint32_t *) irsp) + 9),
3574 *(((uint32_t *) irsp) + 10),
3575 *(((uint32_t *) irsp) + 11),
3576 *(((uint32_t *) irsp) + 12),
3577 *(((uint32_t *) irsp) + 13),
3578 *(((uint32_t *) irsp) + 14),
3579 *(((uint32_t *) irsp) + 15));
3580 }
3581
3582 /*
3583 * Fetch the IOCB command type and call the correct completion
3584 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3585 * get freed back to the lpfc_iocb_list by the discovery
3586 * kernel thread.
3587 */
3588 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3589 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3590 switch (type) {
3591 case LPFC_SOL_IOCB:
3592 spin_unlock_irqrestore(&phba->hbalock, iflag);
3593 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3594 spin_lock_irqsave(&phba->hbalock, iflag);
3595 break;
3596
3597 case LPFC_UNSOL_IOCB:
3598 spin_unlock_irqrestore(&phba->hbalock, iflag);
3599 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3600 spin_lock_irqsave(&phba->hbalock, iflag);
3601 if (!rc)
3602 free_saveq = 0;
3603 break;
3604
3605 case LPFC_ABORT_IOCB:
3606 cmdiocbp = NULL;
3607 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3608 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3609 saveq);
3610 if (cmdiocbp) {
3611 /* Call the specified completion routine */
3612 if (cmdiocbp->iocb_cmpl) {
3613 spin_unlock_irqrestore(&phba->hbalock,
3614 iflag);
3615 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3616 saveq);
3617 spin_lock_irqsave(&phba->hbalock,
3618 iflag);
3619 } else
3620 __lpfc_sli_release_iocbq(phba,
3621 cmdiocbp);
3622 }
3623 break;
3624
3625 case LPFC_UNKNOWN_IOCB:
3626 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3627 char adaptermsg[LPFC_MAX_ADPTMSG];
3628 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3629 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3630 MAX_MSG_DATA);
3631 dev_warn(&((phba->pcidev)->dev),
3632 "lpfc%d: %s\n",
3633 phba->brd_no, adaptermsg);
3634 } else {
3635 /* Unknown IOCB command */
3636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3637 "0335 Unknown IOCB "
3638 "command Data: x%x "
3639 "x%x x%x x%x\n",
3640 irsp->ulpCommand,
3641 irsp->ulpStatus,
3642 irsp->ulpIoTag,
3643 irsp->ulpContext);
3644 }
3645 break;
3646 }
3647
3648 if (free_saveq) {
3649 list_for_each_entry_safe(rspiocbp, next_iocb,
3650 &saveq->list, list) {
61f35bff 3651 list_del_init(&rspiocbp->list);
3772a991
JS
3652 __lpfc_sli_release_iocbq(phba, rspiocbp);
3653 }
3654 __lpfc_sli_release_iocbq(phba, saveq);
3655 }
3656 rspiocbp = NULL;
3657 }
3658 spin_unlock_irqrestore(&phba->hbalock, iflag);
3659 return rspiocbp;
3660}
3661
3662/**
3663 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3664 * @phba: Pointer to HBA context object.
3665 * @pring: Pointer to driver SLI ring object.
3666 * @mask: Host attention register mask for this ring.
3667 *
3772a991
JS
3668 * This routine wraps the actual slow_ring event process routine from the
3669 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3670 **/
3772a991 3671void
2e0fef85
JS
3672lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3673 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3674{
3675 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3676}
3677
3678/**
3679 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3680 * @phba: Pointer to HBA context object.
3681 * @pring: Pointer to driver SLI ring object.
3682 * @mask: Host attention register mask for this ring.
3683 *
3684 * This function is called from the worker thread when there is a ring event
3685 * for non-fcp rings. The caller does not hold any lock. The function will
3686 * remove each response iocb in the response ring and calls the handle
3687 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3688 **/
3689static void
3690lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3691 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3692{
34b02dcd 3693 struct lpfc_pgp *pgp;
dea3101e 3694 IOCB_t *entry;
3695 IOCB_t *irsp = NULL;
3696 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3697 uint32_t portRspPut, portRspMax;
dea3101e 3698 unsigned long iflag;
3772a991 3699 uint32_t status;
dea3101e 3700
34b02dcd 3701 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3702 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3703 pring->stats.iocb_event++;
3704
dea3101e 3705 /*
3706 * The next available response entry should never exceed the maximum
3707 * entries. If it does, treat it as an adapter hardware error.
3708 */
7e56aa25 3709 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3710 portRspPut = le32_to_cpu(pgp->rspPutInx);
3711 if (portRspPut >= portRspMax) {
3712 /*
025dfdaf 3713 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3714 * rsp ring <portRspMax>
3715 */
ed957684 3716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3717 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3718 "is bigger than rsp ring %d\n",
e8b62011 3719 pring->ringno, portRspPut, portRspMax);
dea3101e 3720
2e0fef85
JS
3721 phba->link_state = LPFC_HBA_ERROR;
3722 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3723
3724 phba->work_hs = HS_FFER3;
3725 lpfc_handle_eratt(phba);
3726
3772a991 3727 return;
dea3101e 3728 }
3729
3730 rmb();
7e56aa25 3731 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3732 /*
3733 * Build a completion list and call the appropriate handler.
3734 * The process is to get the next available response iocb, get
3735 * a free iocb from the list, copy the response data into the
3736 * free iocb, insert to the continuation list, and update the
3737 * next response index to slim. This process makes response
3738 * iocb's in the ring available to DMA as fast as possible but
3739 * pays a penalty for a copy operation. Since the iocb is
3740 * only 32 bytes, this penalty is considered small relative to
3741 * the PCI reads for register values and a slim write. When
3742 * the ulpLe field is set, the entire Command has been
3743 * received.
3744 */
ed957684
JS
3745 entry = lpfc_resp_iocb(phba, pring);
3746
858c9f6c 3747 phba->last_completion_time = jiffies;
2e0fef85 3748 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3749 if (rspiocbp == NULL) {
3750 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3751 "completion.\n", __func__);
dea3101e 3752 break;
3753 }
3754
ed957684
JS
3755 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3756 phba->iocb_rsp_size);
dea3101e 3757 irsp = &rspiocbp->iocb;
3758
7e56aa25
JS
3759 if (++pring->sli.sli3.rspidx >= portRspMax)
3760 pring->sli.sli3.rspidx = 0;
dea3101e 3761
a58cbd52
JS
3762 if (pring->ringno == LPFC_ELS_RING) {
3763 lpfc_debugfs_slow_ring_trc(phba,
3764 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3765 *(((uint32_t *) irsp) + 4),
3766 *(((uint32_t *) irsp) + 6),
3767 *(((uint32_t *) irsp) + 7));
3768 }
3769
7e56aa25
JS
3770 writel(pring->sli.sli3.rspidx,
3771 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3772
3772a991
JS
3773 spin_unlock_irqrestore(&phba->hbalock, iflag);
3774 /* Handle the response IOCB */
3775 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3776 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3777
3778 /*
3779 * If the port response put pointer has not been updated, sync
3780 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3781 * response put pointer.
3782 */
7e56aa25 3783 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3784 portRspPut = le32_to_cpu(pgp->rspPutInx);
3785 }
7e56aa25 3786 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3787
92d7f7b0 3788 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3789 /* At least one response entry has been freed */
3790 pring->stats.iocb_rsp_full++;
3791 /* SET RxRE_RSP in Chip Att register */
3792 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3793 writel(status, phba->CAregaddr);
3794 readl(phba->CAregaddr); /* flush */
3795 }
3796 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3797 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3798 pring->stats.iocb_cmd_empty++;
3799
3800 /* Force update of the local copy of cmdGetInx */
7e56aa25 3801 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3802 lpfc_sli_resume_iocb(phba, pring);
3803
3804 if ((pring->lpfc_sli_cmd_available))
3805 (pring->lpfc_sli_cmd_available) (phba, pring);
3806
3807 }
3808
2e0fef85 3809 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3810 return;
dea3101e 3811}
3812
4f774513
JS
3813/**
3814 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3815 * @phba: Pointer to HBA context object.
3816 * @pring: Pointer to driver SLI ring object.
3817 * @mask: Host attention register mask for this ring.
3818 *
3819 * This function is called from the worker thread when there is a pending
3820 * ELS response iocb on the driver internal slow-path response iocb worker
3821 * queue. The caller does not hold any lock. The function will remove each
3822 * response iocb from the response worker queue and calls the handle
3823 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3824 **/
3825static void
3826lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3827 struct lpfc_sli_ring *pring, uint32_t mask)
3828{
3829 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3830 struct hbq_dmabuf *dmabuf;
3831 struct lpfc_cq_event *cq_event;
4f774513 3832 unsigned long iflag;
0ef01a2d 3833 int count = 0;
4f774513 3834
45ed1190
JS
3835 spin_lock_irqsave(&phba->hbalock, iflag);
3836 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3837 spin_unlock_irqrestore(&phba->hbalock, iflag);
3838 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3839 /* Get the response iocb from the head of work queue */
3840 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3841 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3842 cq_event, struct lpfc_cq_event, list);
4f774513 3843 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3844
3845 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3846 case CQE_CODE_COMPL_WQE:
3847 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3848 cq_event);
45ed1190
JS
3849 /* Translate ELS WCQE to response IOCBQ */
3850 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3851 irspiocbq);
3852 if (irspiocbq)
3853 lpfc_sli_sp_handle_rspiocb(phba, pring,
3854 irspiocbq);
0ef01a2d 3855 count++;
4d9ab994
JS
3856 break;
3857 case CQE_CODE_RECEIVE:
7851fe2c 3858 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3859 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3860 cq_event);
3861 lpfc_sli4_handle_received_buffer(phba, dmabuf);
0ef01a2d 3862 count++;
4d9ab994
JS
3863 break;
3864 default:
3865 break;
3866 }
0ef01a2d
JS
3867
3868 /* Limit the number of events to 64 to avoid soft lockups */
3869 if (count == 64)
3870 break;
4f774513
JS
3871 }
3872}
3873
e59058c4 3874/**
3621a710 3875 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3876 * @phba: Pointer to HBA context object.
3877 * @pring: Pointer to driver SLI ring object.
3878 *
3879 * This function aborts all iocbs in the given ring and frees all the iocb
3880 * objects in txq. This function issues an abort iocb for all the iocb commands
3881 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3882 * the return of this function. The caller is not required to hold any locks.
3883 **/
2e0fef85 3884void
dea3101e 3885lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3886{
2534ba75 3887 LIST_HEAD(completions);
dea3101e 3888 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3889
92d7f7b0
JS
3890 if (pring->ringno == LPFC_ELS_RING) {
3891 lpfc_fabric_abort_hba(phba);
3892 }
3893
dea3101e 3894 /* Error everything on txq and txcmplq
3895 * First do the txq.
3896 */
db55fba8
JS
3897 if (phba->sli_rev >= LPFC_SLI_REV4) {
3898 spin_lock_irq(&pring->ring_lock);
3899 list_splice_init(&pring->txq, &completions);
3900 pring->txq_cnt = 0;
3901 spin_unlock_irq(&pring->ring_lock);
dea3101e 3902
db55fba8
JS
3903 spin_lock_irq(&phba->hbalock);
3904 /* Next issue ABTS for everything on the txcmplq */
3905 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3906 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3907 spin_unlock_irq(&phba->hbalock);
3908 } else {
3909 spin_lock_irq(&phba->hbalock);
3910 list_splice_init(&pring->txq, &completions);
3911 pring->txq_cnt = 0;
dea3101e 3912
db55fba8
JS
3913 /* Next issue ABTS for everything on the txcmplq */
3914 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3915 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3916 spin_unlock_irq(&phba->hbalock);
3917 }
dea3101e 3918
a257bf90
JS
3919 /* Cancel all the IOCBs from the completions list */
3920 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3921 IOERR_SLI_ABORTED);
dea3101e 3922}
3923
895427bd
JS
3924/**
3925 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3926 * @phba: Pointer to HBA context object.
3927 * @pring: Pointer to driver SLI ring object.
3928 *
3929 * This function aborts all iocbs in the given ring and frees all the iocb
3930 * objects in txq. This function issues an abort iocb for all the iocb commands
3931 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3932 * the return of this function. The caller is not required to hold any locks.
3933 **/
3934void
3935lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3936{
3937 LIST_HEAD(completions);
3938 struct lpfc_iocbq *iocb, *next_iocb;
3939
3940 if (pring->ringno == LPFC_ELS_RING)
3941 lpfc_fabric_abort_hba(phba);
3942
3943 spin_lock_irq(&phba->hbalock);
3944 /* Next issue ABTS for everything on the txcmplq */
3945 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3946 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3947 spin_unlock_irq(&phba->hbalock);
3948}
3949
3950
db55fba8
JS
3951/**
3952 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3953 * @phba: Pointer to HBA context object.
3954 * @pring: Pointer to driver SLI ring object.
3955 *
3956 * This function aborts all iocbs in FCP rings and frees all the iocb
3957 * objects in txq. This function issues an abort iocb for all the iocb commands
3958 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3959 * the return of this function. The caller is not required to hold any locks.
3960 **/
3961void
3962lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3963{
3964 struct lpfc_sli *psli = &phba->sli;
3965 struct lpfc_sli_ring *pring;
3966 uint32_t i;
3967
3968 /* Look on all the FCP Rings for the iotag */
3969 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
3970 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3971 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
3972 lpfc_sli_abort_iocb_ring(phba, pring);
3973 }
3974 } else {
895427bd 3975 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
3976 lpfc_sli_abort_iocb_ring(phba, pring);
3977 }
3978}
3979
895427bd
JS
3980/**
3981 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3982 * @phba: Pointer to HBA context object.
3983 *
3984 * This function aborts all wqes in NVME rings. This function issues an
3985 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3986 * the txcmplq is not guaranteed to complete before the return of this
3987 * function. The caller is not required to hold any locks.
3988 **/
3989void
3990lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3991{
3992 struct lpfc_sli_ring *pring;
3993 uint32_t i;
3994
cdb42bec
JS
3995 if ((phba->sli_rev < LPFC_SLI_REV4) ||
3996 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
895427bd
JS
3997 return;
3998
3999 /* Abort all IO on each NVME ring. */
cdb42bec
JS
4000 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
895427bd
JS
4002 lpfc_sli_abort_wqe_ring(phba, pring);
4003 }
4004}
4005
db55fba8 4006
a8e497d5 4007/**
3621a710 4008 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
4009 * @phba: Pointer to HBA context object.
4010 *
4011 * This function flushes all iocbs in the fcp ring and frees all the iocb
4012 * objects in txq and txcmplq. This function will not issue abort iocbs
4013 * for all the iocb commands in txcmplq, they will just be returned with
4014 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4015 * slot has been permanently disabled.
4016 **/
4017void
4018lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4019{
4020 LIST_HEAD(txq);
4021 LIST_HEAD(txcmplq);
a8e497d5
JS
4022 struct lpfc_sli *psli = &phba->sli;
4023 struct lpfc_sli_ring *pring;
db55fba8 4024 uint32_t i;
c1dd9111 4025 struct lpfc_iocbq *piocb, *next_iocb;
a8e497d5
JS
4026
4027 spin_lock_irq(&phba->hbalock);
4f2e66c6
JS
4028 /* Indicate the I/O queues are flushed */
4029 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
4030 spin_unlock_irq(&phba->hbalock);
4031
db55fba8
JS
4032 /* Look on all the FCP Rings for the iotag */
4033 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
4034 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4035 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
4036
4037 spin_lock_irq(&pring->ring_lock);
4038 /* Retrieve everything on txq */
4039 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4040 list_for_each_entry_safe(piocb, next_iocb,
4041 &pring->txcmplq, list)
4042 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4043 /* Retrieve everything on the txcmplq */
4044 list_splice_init(&pring->txcmplq, &txcmplq);
4045 pring->txq_cnt = 0;
4046 pring->txcmplq_cnt = 0;
4047 spin_unlock_irq(&pring->ring_lock);
4048
4049 /* Flush the txq */
4050 lpfc_sli_cancel_iocbs(phba, &txq,
4051 IOSTAT_LOCAL_REJECT,
4052 IOERR_SLI_DOWN);
4053 /* Flush the txcmpq */
4054 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4055 IOSTAT_LOCAL_REJECT,
4056 IOERR_SLI_DOWN);
4057 }
4058 } else {
895427bd 4059 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 4060
db55fba8
JS
4061 spin_lock_irq(&phba->hbalock);
4062 /* Retrieve everything on txq */
4063 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4064 list_for_each_entry_safe(piocb, next_iocb,
4065 &pring->txcmplq, list)
4066 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4067 /* Retrieve everything on the txcmplq */
4068 list_splice_init(&pring->txcmplq, &txcmplq);
4069 pring->txq_cnt = 0;
4070 pring->txcmplq_cnt = 0;
4071 spin_unlock_irq(&phba->hbalock);
4072
4073 /* Flush the txq */
4074 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4075 IOERR_SLI_DOWN);
4076 /* Flush the txcmpq */
4077 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4078 IOERR_SLI_DOWN);
4079 }
a8e497d5
JS
4080}
4081
895427bd
JS
4082/**
4083 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4084 * @phba: Pointer to HBA context object.
4085 *
4086 * This function flushes all wqes in the nvme rings and frees all resources
4087 * in the txcmplq. This function does not issue abort wqes for the IO
4088 * commands in txcmplq, they will just be returned with
4089 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4090 * slot has been permanently disabled.
4091 **/
4092void
4093lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4094{
4095 LIST_HEAD(txcmplq);
4096 struct lpfc_sli_ring *pring;
4097 uint32_t i;
c1dd9111 4098 struct lpfc_iocbq *piocb, *next_iocb;
895427bd 4099
cdb42bec
JS
4100 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4101 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
895427bd
JS
4102 return;
4103
4104 /* Hint to other driver operations that a flush is in progress. */
4105 spin_lock_irq(&phba->hbalock);
4106 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4107 spin_unlock_irq(&phba->hbalock);
4108
4109 /* Cycle through all NVME rings and complete each IO with
4110 * a local driver reason code. This is a flush so no
4111 * abort exchange to FW.
4112 */
cdb42bec
JS
4113 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4114 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
895427bd 4115
895427bd 4116 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
4117 list_for_each_entry_safe(piocb, next_iocb,
4118 &pring->txcmplq, list)
4119 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4120 /* Retrieve everything on the txcmplq */
895427bd
JS
4121 list_splice_init(&pring->txcmplq, &txcmplq);
4122 pring->txcmplq_cnt = 0;
4123 spin_unlock_irq(&pring->ring_lock);
4124
4125 /* Flush the txcmpq &&&PAE */
4126 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4127 IOSTAT_LOCAL_REJECT,
4128 IOERR_SLI_DOWN);
4129 }
4130}
4131
e59058c4 4132/**
3772a991 4133 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
4134 * @phba: Pointer to HBA context object.
4135 * @mask: Bit mask to be checked.
4136 *
4137 * This function reads the host status register and compares
4138 * with the provided bit mask to check if HBA completed
4139 * the restart. This function will wait in a loop for the
4140 * HBA to complete restart. If the HBA does not restart within
4141 * 15 iterations, the function will reset the HBA again. The
4142 * function returns 1 when HBA fail to restart otherwise returns
4143 * zero.
4144 **/
3772a991
JS
4145static int
4146lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 4147{
41415862
JW
4148 uint32_t status;
4149 int i = 0;
4150 int retval = 0;
dea3101e 4151
41415862 4152 /* Read the HBA Host Status Register */
9940b97b
JS
4153 if (lpfc_readl(phba->HSregaddr, &status))
4154 return 1;
dea3101e 4155
41415862
JW
4156 /*
4157 * Check status register every 100ms for 5 retries, then every
4158 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4159 * every 2.5 sec for 4.
4160 * Break our of the loop if errors occurred during init.
4161 */
4162 while (((status & mask) != mask) &&
4163 !(status & HS_FFERM) &&
4164 i++ < 20) {
dea3101e 4165
41415862
JW
4166 if (i <= 5)
4167 msleep(10);
4168 else if (i <= 10)
4169 msleep(500);
4170 else
4171 msleep(2500);
dea3101e 4172
41415862 4173 if (i == 15) {
2e0fef85 4174 /* Do post */
92d7f7b0 4175 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
4176 lpfc_sli_brdrestart(phba);
4177 }
4178 /* Read the HBA Host Status Register */
9940b97b
JS
4179 if (lpfc_readl(phba->HSregaddr, &status)) {
4180 retval = 1;
4181 break;
4182 }
41415862 4183 }
dea3101e 4184
41415862
JW
4185 /* Check to see if any errors occurred during init */
4186 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
4187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4188 "2751 Adapter failed to restart, "
4189 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4190 status,
4191 readl(phba->MBslimaddr + 0xa8),
4192 readl(phba->MBslimaddr + 0xac));
2e0fef85 4193 phba->link_state = LPFC_HBA_ERROR;
41415862 4194 retval = 1;
dea3101e 4195 }
dea3101e 4196
41415862
JW
4197 return retval;
4198}
dea3101e 4199
da0436e9
JS
4200/**
4201 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4202 * @phba: Pointer to HBA context object.
4203 * @mask: Bit mask to be checked.
4204 *
4205 * This function checks the host status register to check if HBA is
4206 * ready. This function will wait in a loop for the HBA to be ready
4207 * If the HBA is not ready , the function will will reset the HBA PCI
4208 * function again. The function returns 1 when HBA fail to be ready
4209 * otherwise returns zero.
4210 **/
4211static int
4212lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4213{
4214 uint32_t status;
4215 int retval = 0;
4216
4217 /* Read the HBA Host Status Register */
4218 status = lpfc_sli4_post_status_check(phba);
4219
4220 if (status) {
4221 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4222 lpfc_sli_brdrestart(phba);
4223 status = lpfc_sli4_post_status_check(phba);
4224 }
4225
4226 /* Check to see if any errors occurred during init */
4227 if (status) {
4228 phba->link_state = LPFC_HBA_ERROR;
4229 retval = 1;
4230 } else
4231 phba->sli4_hba.intr_enable = 0;
4232
4233 return retval;
4234}
4235
4236/**
4237 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4238 * @phba: Pointer to HBA context object.
4239 * @mask: Bit mask to be checked.
4240 *
4241 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4242 * from the API jump table function pointer from the lpfc_hba struct.
4243 **/
4244int
4245lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4246{
4247 return phba->lpfc_sli_brdready(phba, mask);
4248}
4249
9290831f
JS
4250#define BARRIER_TEST_PATTERN (0xdeadbeef)
4251
e59058c4 4252/**
3621a710 4253 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
4254 * @phba: Pointer to HBA context object.
4255 *
1b51197d
JS
4256 * This function is called before resetting an HBA. This function is called
4257 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 4258 **/
2e0fef85 4259void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 4260{
65a29c16
JS
4261 uint32_t __iomem *resp_buf;
4262 uint32_t __iomem *mbox_buf;
9290831f 4263 volatile uint32_t mbox;
9940b97b 4264 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
4265 int i;
4266 uint8_t hdrtype;
4267
1c2ba475
JT
4268 lockdep_assert_held(&phba->hbalock);
4269
9290831f
JS
4270 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4271 if (hdrtype != 0x80 ||
4272 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4273 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4274 return;
4275
4276 /*
4277 * Tell the other part of the chip to suspend temporarily all
4278 * its DMA activity.
4279 */
65a29c16 4280 resp_buf = phba->MBslimaddr;
9290831f
JS
4281
4282 /* Disable the error attention */
9940b97b
JS
4283 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4284 return;
9290831f
JS
4285 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4286 readl(phba->HCregaddr); /* flush */
2e0fef85 4287 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4288
9940b97b
JS
4289 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4290 return;
4291 if (ha_copy & HA_ERATT) {
9290831f
JS
4292 /* Clear Chip error bit */
4293 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4294 phba->pport->stopped = 1;
9290831f
JS
4295 }
4296
4297 mbox = 0;
4298 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4299 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4300
4301 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4302 mbox_buf = phba->MBslimaddr;
9290831f
JS
4303 writel(mbox, mbox_buf);
4304
9940b97b
JS
4305 for (i = 0; i < 50; i++) {
4306 if (lpfc_readl((resp_buf + 1), &resp_data))
4307 return;
4308 if (resp_data != ~(BARRIER_TEST_PATTERN))
4309 mdelay(1);
4310 else
4311 break;
4312 }
4313 resp_data = 0;
4314 if (lpfc_readl((resp_buf + 1), &resp_data))
4315 return;
4316 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4317 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4318 phba->pport->stopped)
9290831f
JS
4319 goto restore_hc;
4320 else
4321 goto clear_errat;
4322 }
4323
4324 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4325 resp_data = 0;
4326 for (i = 0; i < 500; i++) {
4327 if (lpfc_readl(resp_buf, &resp_data))
4328 return;
4329 if (resp_data != mbox)
4330 mdelay(1);
4331 else
4332 break;
4333 }
9290831f
JS
4334
4335clear_errat:
4336
9940b97b
JS
4337 while (++i < 500) {
4338 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4339 return;
4340 if (!(ha_copy & HA_ERATT))
4341 mdelay(1);
4342 else
4343 break;
4344 }
9290831f
JS
4345
4346 if (readl(phba->HAregaddr) & HA_ERATT) {
4347 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4348 phba->pport->stopped = 1;
9290831f
JS
4349 }
4350
4351restore_hc:
2e0fef85 4352 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4353 writel(hc_copy, phba->HCregaddr);
4354 readl(phba->HCregaddr); /* flush */
4355}
4356
e59058c4 4357/**
3621a710 4358 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4359 * @phba: Pointer to HBA context object.
4360 *
4361 * This function issues a kill_board mailbox command and waits for
4362 * the error attention interrupt. This function is called for stopping
4363 * the firmware processing. The caller is not required to hold any
4364 * locks. This function calls lpfc_hba_down_post function to free
4365 * any pending commands after the kill. The function will return 1 when it
4366 * fails to kill the board else will return 0.
4367 **/
41415862 4368int
2e0fef85 4369lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4370{
4371 struct lpfc_sli *psli;
4372 LPFC_MBOXQ_t *pmb;
4373 uint32_t status;
4374 uint32_t ha_copy;
4375 int retval;
4376 int i = 0;
dea3101e 4377
41415862 4378 psli = &phba->sli;
dea3101e 4379
41415862 4380 /* Kill HBA */
ed957684 4381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4382 "0329 Kill HBA Data: x%x x%x\n",
4383 phba->pport->port_state, psli->sli_flag);
41415862 4384
98c9ea5c
JS
4385 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4386 if (!pmb)
41415862 4387 return 1;
41415862
JW
4388
4389 /* Disable the error attention */
2e0fef85 4390 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4391 if (lpfc_readl(phba->HCregaddr, &status)) {
4392 spin_unlock_irq(&phba->hbalock);
4393 mempool_free(pmb, phba->mbox_mem_pool);
4394 return 1;
4395 }
41415862
JW
4396 status &= ~HC_ERINT_ENA;
4397 writel(status, phba->HCregaddr);
4398 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4399 phba->link_flag |= LS_IGNORE_ERATT;
4400 spin_unlock_irq(&phba->hbalock);
41415862
JW
4401
4402 lpfc_kill_board(phba, pmb);
4403 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4404 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4405
4406 if (retval != MBX_SUCCESS) {
4407 if (retval != MBX_BUSY)
4408 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4410 "2752 KILL_BOARD command failed retval %d\n",
4411 retval);
2e0fef85
JS
4412 spin_lock_irq(&phba->hbalock);
4413 phba->link_flag &= ~LS_IGNORE_ERATT;
4414 spin_unlock_irq(&phba->hbalock);
41415862
JW
4415 return 1;
4416 }
4417
f4b4c68f
JS
4418 spin_lock_irq(&phba->hbalock);
4419 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4420 spin_unlock_irq(&phba->hbalock);
9290831f 4421
41415862
JW
4422 mempool_free(pmb, phba->mbox_mem_pool);
4423
4424 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4425 * attention every 100ms for 3 seconds. If we don't get ERATT after
4426 * 3 seconds we still set HBA_ERROR state because the status of the
4427 * board is now undefined.
4428 */
9940b97b
JS
4429 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4430 return 1;
41415862
JW
4431 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4432 mdelay(100);
9940b97b
JS
4433 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4434 return 1;
41415862
JW
4435 }
4436
4437 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4438 if (ha_copy & HA_ERATT) {
4439 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4440 phba->pport->stopped = 1;
9290831f 4441 }
2e0fef85 4442 spin_lock_irq(&phba->hbalock);
41415862 4443 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4444 psli->mbox_active = NULL;
2e0fef85
JS
4445 phba->link_flag &= ~LS_IGNORE_ERATT;
4446 spin_unlock_irq(&phba->hbalock);
41415862 4447
41415862 4448 lpfc_hba_down_post(phba);
2e0fef85 4449 phba->link_state = LPFC_HBA_ERROR;
41415862 4450
2e0fef85 4451 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 4452}
4453
e59058c4 4454/**
3772a991 4455 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4456 * @phba: Pointer to HBA context object.
4457 *
4458 * This function resets the HBA by writing HC_INITFF to the control
4459 * register. After the HBA resets, this function resets all the iocb ring
4460 * indices. This function disables PCI layer parity checking during
4461 * the reset.
4462 * This function returns 0 always.
4463 * The caller is not required to hold any locks.
4464 **/
41415862 4465int
2e0fef85 4466lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4467{
41415862 4468 struct lpfc_sli *psli;
dea3101e 4469 struct lpfc_sli_ring *pring;
41415862 4470 uint16_t cfg_value;
dea3101e 4471 int i;
dea3101e 4472
41415862 4473 psli = &phba->sli;
dea3101e 4474
41415862
JW
4475 /* Reset HBA */
4476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4477 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4478 (phba->pport) ? phba->pport->port_state : 0,
4479 psli->sli_flag);
dea3101e 4480
4481 /* perform board reset */
4482 phba->fc_eventTag = 0;
4d9ab994 4483 phba->link_events = 0;
4492b739
JS
4484 if (phba->pport) {
4485 phba->pport->fc_myDID = 0;
4486 phba->pport->fc_prevDID = 0;
4487 }
dea3101e 4488
41415862 4489 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4490 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4491 return -EIO;
4492
41415862
JW
4493 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4494 (cfg_value &
4495 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4496
3772a991
JS
4497 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4498
41415862
JW
4499 /* Now toggle INITFF bit in the Host Control Register */
4500 writel(HC_INITFF, phba->HCregaddr);
4501 mdelay(1);
4502 readl(phba->HCregaddr); /* flush */
4503 writel(0, phba->HCregaddr);
4504 readl(phba->HCregaddr); /* flush */
4505
4506 /* Restore PCI cmd register */
4507 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 4508
4509 /* Initialize relevant SLI info */
41415862 4510 for (i = 0; i < psli->num_rings; i++) {
895427bd 4511 pring = &psli->sli3_ring[i];
dea3101e 4512 pring->flag = 0;
7e56aa25
JS
4513 pring->sli.sli3.rspidx = 0;
4514 pring->sli.sli3.next_cmdidx = 0;
4515 pring->sli.sli3.local_getidx = 0;
4516 pring->sli.sli3.cmdidx = 0;
dea3101e 4517 pring->missbufcnt = 0;
4518 }
dea3101e 4519
2e0fef85 4520 phba->link_state = LPFC_WARM_START;
41415862
JW
4521 return 0;
4522}
4523
e59058c4 4524/**
da0436e9
JS
4525 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4526 * @phba: Pointer to HBA context object.
4527 *
4528 * This function resets a SLI4 HBA. This function disables PCI layer parity
4529 * checking during resets the device. The caller is not required to hold
4530 * any locks.
4531 *
4532 * This function returns 0 always.
4533 **/
4534int
4535lpfc_sli4_brdreset(struct lpfc_hba *phba)
4536{
4537 struct lpfc_sli *psli = &phba->sli;
4538 uint16_t cfg_value;
0293635e 4539 int rc = 0;
da0436e9
JS
4540
4541 /* Reset HBA */
4542 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4543 "0295 Reset HBA Data: x%x x%x x%x\n",
4544 phba->pport->port_state, psli->sli_flag,
4545 phba->hba_flag);
da0436e9
JS
4546
4547 /* perform board reset */
4548 phba->fc_eventTag = 0;
4d9ab994 4549 phba->link_events = 0;
da0436e9
JS
4550 phba->pport->fc_myDID = 0;
4551 phba->pport->fc_prevDID = 0;
4552
da0436e9
JS
4553 spin_lock_irq(&phba->hbalock);
4554 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4555 phba->fcf.fcf_flag = 0;
da0436e9
JS
4556 spin_unlock_irq(&phba->hbalock);
4557
0293635e
JS
4558 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4559 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4560 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4561 return rc;
4562 }
4563
da0436e9
JS
4564 /* Now physically reset the device */
4565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4566 "0389 Performing PCI function reset!\n");
be858b65
JS
4567
4568 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4569 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4570 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4571 "3205 PCI read Config failed\n");
4572 return -EIO;
4573 }
4574
be858b65
JS
4575 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4576 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4577
88318816 4578 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4579 rc = lpfc_pci_function_reset(phba);
da0436e9 4580
be858b65
JS
4581 /* Restore PCI cmd register */
4582 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4583
27b01b82 4584 return rc;
da0436e9
JS
4585}
4586
4587/**
4588 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4589 * @phba: Pointer to HBA context object.
4590 *
4591 * This function is called in the SLI initialization code path to
4592 * restart the HBA. The caller is not required to hold any lock.
4593 * This function writes MBX_RESTART mailbox command to the SLIM and
4594 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4595 * function to free any pending commands. The function enables
4596 * POST only during the first initialization. The function returns zero.
4597 * The function does not guarantee completion of MBX_RESTART mailbox
4598 * command before the return of this function.
4599 **/
da0436e9
JS
4600static int
4601lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4602{
4603 MAILBOX_t *mb;
4604 struct lpfc_sli *psli;
41415862
JW
4605 volatile uint32_t word0;
4606 void __iomem *to_slim;
0d878419 4607 uint32_t hba_aer_enabled;
41415862 4608
2e0fef85 4609 spin_lock_irq(&phba->hbalock);
41415862 4610
0d878419
JS
4611 /* Take PCIe device Advanced Error Reporting (AER) state */
4612 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4613
41415862
JW
4614 psli = &phba->sli;
4615
4616 /* Restart HBA */
4617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4618 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4619 (phba->pport) ? phba->pport->port_state : 0,
4620 psli->sli_flag);
41415862
JW
4621
4622 word0 = 0;
4623 mb = (MAILBOX_t *) &word0;
4624 mb->mbxCommand = MBX_RESTART;
4625 mb->mbxHc = 1;
4626
9290831f
JS
4627 lpfc_reset_barrier(phba);
4628
41415862
JW
4629 to_slim = phba->MBslimaddr;
4630 writel(*(uint32_t *) mb, to_slim);
4631 readl(to_slim); /* flush */
4632
4633 /* Only skip post after fc_ffinit is completed */
4492b739 4634 if (phba->pport && phba->pport->port_state)
41415862 4635 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4636 else
41415862 4637 word0 = 0; /* This is really setting up word1 */
65a29c16 4638 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4639 writel(*(uint32_t *) mb, to_slim);
4640 readl(to_slim); /* flush */
dea3101e 4641
41415862 4642 lpfc_sli_brdreset(phba);
4492b739
JS
4643 if (phba->pport)
4644 phba->pport->stopped = 0;
2e0fef85 4645 phba->link_state = LPFC_INIT_START;
da0436e9 4646 phba->hba_flag = 0;
2e0fef85 4647 spin_unlock_irq(&phba->hbalock);
41415862 4648
64ba8818 4649 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4650 psli->stats_start = ktime_get_seconds();
64ba8818 4651
eaf15d5b
JS
4652 /* Give the INITFF and Post time to settle. */
4653 mdelay(100);
41415862 4654
0d878419
JS
4655 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4656 if (hba_aer_enabled)
4657 pci_disable_pcie_error_reporting(phba->pcidev);
4658
41415862 4659 lpfc_hba_down_post(phba);
dea3101e 4660
4661 return 0;
4662}
4663
da0436e9
JS
4664/**
4665 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4666 * @phba: Pointer to HBA context object.
4667 *
4668 * This function is called in the SLI initialization code path to restart
4669 * a SLI4 HBA. The caller is not required to hold any lock.
4670 * At the end of the function, it calls lpfc_hba_down_post function to
4671 * free any pending commands.
4672 **/
4673static int
4674lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4675{
4676 struct lpfc_sli *psli = &phba->sli;
75baf696 4677 uint32_t hba_aer_enabled;
27b01b82 4678 int rc;
da0436e9
JS
4679
4680 /* Restart HBA */
4681 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4682 "0296 Restart HBA Data: x%x x%x\n",
4683 phba->pport->port_state, psli->sli_flag);
4684
75baf696
JS
4685 /* Take PCIe device Advanced Error Reporting (AER) state */
4686 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4687
27b01b82 4688 rc = lpfc_sli4_brdreset(phba);
5a9eeff5
JS
4689 if (rc)
4690 return rc;
da0436e9
JS
4691
4692 spin_lock_irq(&phba->hbalock);
4693 phba->pport->stopped = 0;
4694 phba->link_state = LPFC_INIT_START;
4695 phba->hba_flag = 0;
4696 spin_unlock_irq(&phba->hbalock);
4697
4698 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4699 psli->stats_start = ktime_get_seconds();
da0436e9 4700
75baf696
JS
4701 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4702 if (hba_aer_enabled)
4703 pci_disable_pcie_error_reporting(phba->pcidev);
4704
da0436e9 4705 lpfc_hba_down_post(phba);
569dbe84 4706 lpfc_sli4_queue_destroy(phba);
da0436e9 4707
27b01b82 4708 return rc;
da0436e9
JS
4709}
4710
4711/**
4712 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4713 * @phba: Pointer to HBA context object.
4714 *
4715 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4716 * API jump table function pointer from the lpfc_hba struct.
4717**/
4718int
4719lpfc_sli_brdrestart(struct lpfc_hba *phba)
4720{
4721 return phba->lpfc_sli_brdrestart(phba);
4722}
4723
e59058c4 4724/**
3621a710 4725 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4726 * @phba: Pointer to HBA context object.
4727 *
4728 * This function is called after a HBA restart to wait for successful
4729 * restart of the HBA. Successful restart of the HBA is indicated by
4730 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4731 * iteration, the function will restart the HBA again. The function returns
4732 * zero if HBA successfully restarted else returns negative error code.
4733 **/
4492b739 4734int
dea3101e 4735lpfc_sli_chipset_init(struct lpfc_hba *phba)
4736{
4737 uint32_t status, i = 0;
4738
4739 /* Read the HBA Host Status Register */
9940b97b
JS
4740 if (lpfc_readl(phba->HSregaddr, &status))
4741 return -EIO;
dea3101e 4742
4743 /* Check status register to see what current state is */
4744 i = 0;
4745 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4746
dcf2a4e0
JS
4747 /* Check every 10ms for 10 retries, then every 100ms for 90
4748 * retries, then every 1 sec for 50 retires for a total of
4749 * ~60 seconds before reset the board again and check every
4750 * 1 sec for 50 retries. The up to 60 seconds before the
4751 * board ready is required by the Falcon FIPS zeroization
4752 * complete, and any reset the board in between shall cause
4753 * restart of zeroization, further delay the board ready.
dea3101e 4754 */
dcf2a4e0 4755 if (i++ >= 200) {
dea3101e 4756 /* Adapter failed to init, timeout, status reg
4757 <status> */
ed957684 4758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4759 "0436 Adapter failed to init, "
09372820
JS
4760 "timeout, status reg x%x, "
4761 "FW Data: A8 x%x AC x%x\n", status,
4762 readl(phba->MBslimaddr + 0xa8),
4763 readl(phba->MBslimaddr + 0xac));
2e0fef85 4764 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4765 return -ETIMEDOUT;
4766 }
4767
4768 /* Check to see if any errors occurred during init */
4769 if (status & HS_FFERM) {
4770 /* ERROR: During chipset initialization */
4771 /* Adapter failed to init, chipset, status reg
4772 <status> */
ed957684 4773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4774 "0437 Adapter failed to init, "
09372820
JS
4775 "chipset, status reg x%x, "
4776 "FW Data: A8 x%x AC x%x\n", status,
4777 readl(phba->MBslimaddr + 0xa8),
4778 readl(phba->MBslimaddr + 0xac));
2e0fef85 4779 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4780 return -EIO;
4781 }
4782
dcf2a4e0 4783 if (i <= 10)
dea3101e 4784 msleep(10);
dcf2a4e0
JS
4785 else if (i <= 100)
4786 msleep(100);
4787 else
4788 msleep(1000);
dea3101e 4789
dcf2a4e0
JS
4790 if (i == 150) {
4791 /* Do post */
92d7f7b0 4792 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4793 lpfc_sli_brdrestart(phba);
dea3101e 4794 }
4795 /* Read the HBA Host Status Register */
9940b97b
JS
4796 if (lpfc_readl(phba->HSregaddr, &status))
4797 return -EIO;
dea3101e 4798 }
4799
4800 /* Check to see if any errors occurred during init */
4801 if (status & HS_FFERM) {
4802 /* ERROR: During chipset initialization */
4803 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4805 "0438 Adapter failed to init, chipset, "
09372820
JS
4806 "status reg x%x, "
4807 "FW Data: A8 x%x AC x%x\n", status,
4808 readl(phba->MBslimaddr + 0xa8),
4809 readl(phba->MBslimaddr + 0xac));
2e0fef85 4810 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4811 return -EIO;
4812 }
4813
4814 /* Clear all interrupt enable conditions */
4815 writel(0, phba->HCregaddr);
4816 readl(phba->HCregaddr); /* flush */
4817
4818 /* setup host attn register */
4819 writel(0xffffffff, phba->HAregaddr);
4820 readl(phba->HAregaddr); /* flush */
4821 return 0;
4822}
4823
e59058c4 4824/**
3621a710 4825 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4826 *
4827 * This function calculates and returns the number of HBQs required to be
4828 * configured.
4829 **/
78b2d852 4830int
ed957684
JS
4831lpfc_sli_hbq_count(void)
4832{
92d7f7b0 4833 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4834}
4835
e59058c4 4836/**
3621a710 4837 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4838 *
4839 * This function adds the number of hbq entries in every HBQ to get
4840 * the total number of hbq entries required for the HBA and returns
4841 * the total count.
4842 **/
ed957684
JS
4843static int
4844lpfc_sli_hbq_entry_count(void)
4845{
4846 int hbq_count = lpfc_sli_hbq_count();
4847 int count = 0;
4848 int i;
4849
4850 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4851 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4852 return count;
4853}
4854
e59058c4 4855/**
3621a710 4856 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4857 *
4858 * This function calculates amount of memory required for all hbq entries
4859 * to be configured and returns the total memory required.
4860 **/
dea3101e 4861int
ed957684
JS
4862lpfc_sli_hbq_size(void)
4863{
4864 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4865}
4866
e59058c4 4867/**
3621a710 4868 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4869 * @phba: Pointer to HBA context object.
4870 *
4871 * This function is called during the SLI initialization to configure
4872 * all the HBQs and post buffers to the HBQ. The caller is not
4873 * required to hold any locks. This function will return zero if successful
4874 * else it will return negative error code.
4875 **/
ed957684
JS
4876static int
4877lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4878{
4879 int hbq_count = lpfc_sli_hbq_count();
4880 LPFC_MBOXQ_t *pmb;
4881 MAILBOX_t *pmbox;
4882 uint32_t hbqno;
4883 uint32_t hbq_entry_index;
ed957684 4884
92d7f7b0
JS
4885 /* Get a Mailbox buffer to setup mailbox
4886 * commands for HBA initialization
4887 */
ed957684
JS
4888 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4889
4890 if (!pmb)
4891 return -ENOMEM;
4892
04c68496 4893 pmbox = &pmb->u.mb;
ed957684
JS
4894
4895 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4896 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4897 phba->hbq_in_use = 1;
ed957684
JS
4898
4899 hbq_entry_index = 0;
4900 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4901 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4902 phba->hbqs[hbqno].hbqPutIdx = 0;
4903 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4904 phba->hbqs[hbqno].entry_count =
92d7f7b0 4905 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4906 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4907 hbq_entry_index, pmb);
ed957684
JS
4908 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4909
4910 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4911 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4912 mbxStatus <status>, ring <num> */
4913
4914 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4915 LOG_SLI | LOG_VPORT,
e8b62011 4916 "1805 Adapter failed to init. "
ed957684 4917 "Data: x%x x%x x%x\n",
e8b62011 4918 pmbox->mbxCommand,
ed957684
JS
4919 pmbox->mbxStatus, hbqno);
4920
4921 phba->link_state = LPFC_HBA_ERROR;
4922 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4923 return -ENXIO;
ed957684
JS
4924 }
4925 }
4926 phba->hbq_count = hbq_count;
4927
ed957684
JS
4928 mempool_free(pmb, phba->mbox_mem_pool);
4929
92d7f7b0 4930 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4931 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4932 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4933 return 0;
4934}
4935
4f774513
JS
4936/**
4937 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4938 * @phba: Pointer to HBA context object.
4939 *
4940 * This function is called during the SLI initialization to configure
4941 * all the HBQs and post buffers to the HBQ. The caller is not
4942 * required to hold any locks. This function will return zero if successful
4943 * else it will return negative error code.
4944 **/
4945static int
4946lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4947{
4948 phba->hbq_in_use = 1;
895427bd
JS
4949 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4950 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 4951 phba->hbq_count = 1;
895427bd 4952 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 4953 /* Initially populate or replenish the HBQs */
4f774513
JS
4954 return 0;
4955}
4956
e59058c4 4957/**
3621a710 4958 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4959 * @phba: Pointer to HBA context object.
4960 * @sli_mode: sli mode - 2/3
4961 *
183b8021 4962 * This function is called by the sli initialization code path
e59058c4
JS
4963 * to issue config_port mailbox command. This function restarts the
4964 * HBA firmware and issues a config_port mailbox command to configure
4965 * the SLI interface in the sli mode specified by sli_mode
4966 * variable. The caller is not required to hold any locks.
4967 * The function returns 0 if successful, else returns negative error
4968 * code.
4969 **/
9399627f
JS
4970int
4971lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4972{
4973 LPFC_MBOXQ_t *pmb;
4974 uint32_t resetcount = 0, rc = 0, done = 0;
4975
4976 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4977 if (!pmb) {
2e0fef85 4978 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4979 return -ENOMEM;
4980 }
4981
ed957684 4982 phba->sli_rev = sli_mode;
dea3101e 4983 while (resetcount < 2 && !done) {
2e0fef85 4984 spin_lock_irq(&phba->hbalock);
1c067a42 4985 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4986 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4987 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4988 lpfc_sli_brdrestart(phba);
dea3101e 4989 rc = lpfc_sli_chipset_init(phba);
4990 if (rc)
4991 break;
4992
2e0fef85 4993 spin_lock_irq(&phba->hbalock);
1c067a42 4994 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4995 spin_unlock_irq(&phba->hbalock);
dea3101e 4996 resetcount++;
4997
ed957684
JS
4998 /* Call pre CONFIG_PORT mailbox command initialization. A
4999 * value of 0 means the call was successful. Any other
5000 * nonzero value is a failure, but if ERESTART is returned,
5001 * the driver may reset the HBA and try again.
5002 */
dea3101e 5003 rc = lpfc_config_port_prep(phba);
5004 if (rc == -ERESTART) {
ed957684 5005 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 5006 continue;
34b02dcd 5007 } else if (rc)
dea3101e 5008 break;
6d368e53 5009
2e0fef85 5010 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 5011 lpfc_config_port(phba, pmb);
5012 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
5013 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5014 LPFC_SLI3_HBQ_ENABLED |
5015 LPFC_SLI3_CRP_ENABLED |
bc73905a 5016 LPFC_SLI3_DSS_ENABLED);
ed957684 5017 if (rc != MBX_SUCCESS) {
dea3101e 5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5019 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 5020 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 5021 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 5022 spin_lock_irq(&phba->hbalock);
04c68496 5023 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
5024 spin_unlock_irq(&phba->hbalock);
5025 rc = -ENXIO;
04c68496
JS
5026 } else {
5027 /* Allow asynchronous mailbox command to go through */
5028 spin_lock_irq(&phba->hbalock);
5029 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5030 spin_unlock_irq(&phba->hbalock);
ed957684 5031 done = 1;
cb69f7de
JS
5032
5033 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5034 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5035 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5036 "3110 Port did not grant ASABT\n");
04c68496 5037 }
dea3101e 5038 }
ed957684
JS
5039 if (!done) {
5040 rc = -EINVAL;
5041 goto do_prep_failed;
5042 }
04c68496
JS
5043 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5044 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
5045 rc = -ENXIO;
5046 goto do_prep_failed;
5047 }
04c68496 5048 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 5049 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
5050 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5051 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5052 phba->max_vpi : phba->max_vports;
5053
34b02dcd
JS
5054 } else
5055 phba->max_vpi = 0;
bc73905a
JS
5056 phba->fips_level = 0;
5057 phba->fips_spec_rev = 0;
5058 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 5059 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
5060 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5061 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5063 "2850 Security Crypto Active. FIPS x%d "
5064 "(Spec Rev: x%d)",
5065 phba->fips_level, phba->fips_spec_rev);
5066 }
5067 if (pmb->u.mb.un.varCfgPort.sec_err) {
5068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5069 "2856 Config Port Security Crypto "
5070 "Error: x%x ",
5071 pmb->u.mb.un.varCfgPort.sec_err);
5072 }
04c68496 5073 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 5074 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 5075 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 5076 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
5077
5078 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5079 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6 5080
f44ac12f
JS
5081 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5082 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5083 phba->cfg_enable_bg = 0;
5084 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
e2a0a9d6
JS
5085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5086 "0443 Adapter did not grant "
5087 "BlockGuard\n");
f44ac12f 5088 }
e2a0a9d6 5089 }
34b02dcd 5090 } else {
8f34f4ce 5091 phba->hbq_get = NULL;
34b02dcd 5092 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 5093 phba->max_vpi = 0;
ed957684 5094 }
92d7f7b0 5095do_prep_failed:
ed957684
JS
5096 mempool_free(pmb, phba->mbox_mem_pool);
5097 return rc;
5098}
5099
e59058c4
JS
5100
5101/**
183b8021 5102 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
5103 * @phba: Pointer to HBA context object.
5104 *
183b8021
MY
5105 * This function is the main SLI initialization function. This function
5106 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
5107 * error attention handler code. Caller is not required to hold any
5108 * locks. This function issues config_port mailbox command to configure
5109 * the SLI, setup iocb rings and HBQ rings. In the end the function
5110 * calls the config_port_post function to issue init_link mailbox
5111 * command and to start the discovery. The function will return zero
5112 * if successful, else it will return negative error code.
5113 **/
ed957684
JS
5114int
5115lpfc_sli_hba_setup(struct lpfc_hba *phba)
5116{
5117 uint32_t rc;
6d368e53
JS
5118 int mode = 3, i;
5119 int longs;
ed957684 5120
12247e81 5121 switch (phba->cfg_sli_mode) {
ed957684 5122 case 2:
78b2d852 5123 if (phba->cfg_enable_npiv) {
92d7f7b0 5124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 5125 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 5126 "parameter (%d) to auto (0).\n",
12247e81 5127 phba->cfg_sli_mode);
92d7f7b0
JS
5128 break;
5129 }
ed957684
JS
5130 mode = 2;
5131 break;
5132 case 0:
5133 case 3:
5134 break;
5135 default:
92d7f7b0 5136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
5137 "1819 Unrecognized sli_mode parameter: %d.\n",
5138 phba->cfg_sli_mode);
ed957684
JS
5139
5140 break;
5141 }
b5c53958 5142 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 5143
9399627f
JS
5144 rc = lpfc_sli_config_port(phba, mode);
5145
12247e81 5146 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 5147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
5148 "1820 Unable to select SLI-3. "
5149 "Not supported by adapter.\n");
ed957684 5150 if (rc && mode != 2)
9399627f 5151 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
5152 else if (rc && mode == 2)
5153 rc = lpfc_sli_config_port(phba, 3);
ed957684 5154 if (rc)
dea3101e 5155 goto lpfc_sli_hba_setup_error;
5156
0d878419
JS
5157 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5158 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5159 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5160 if (!rc) {
5161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5162 "2709 This device supports "
5163 "Advanced Error Reporting (AER)\n");
5164 spin_lock_irq(&phba->hbalock);
5165 phba->hba_flag |= HBA_AER_ENABLED;
5166 spin_unlock_irq(&phba->hbalock);
5167 } else {
5168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5169 "2708 This device does not support "
b069d7eb
JS
5170 "Advanced Error Reporting (AER): %d\n",
5171 rc);
0d878419
JS
5172 phba->cfg_aer_support = 0;
5173 }
5174 }
5175
ed957684
JS
5176 if (phba->sli_rev == 3) {
5177 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5178 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
5179 } else {
5180 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5181 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 5182 phba->sli3_options = 0;
ed957684
JS
5183 }
5184
5185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
5186 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5187 phba->sli_rev, phba->max_vpi);
ed957684 5188 rc = lpfc_sli_ring_map(phba);
dea3101e 5189
5190 if (rc)
5191 goto lpfc_sli_hba_setup_error;
5192
6d368e53
JS
5193 /* Initialize VPIs. */
5194 if (phba->sli_rev == LPFC_SLI_REV3) {
5195 /*
5196 * The VPI bitmask and physical ID array are allocated
5197 * and initialized once only - at driver load. A port
5198 * reset doesn't need to reinitialize this memory.
5199 */
5200 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5201 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
6396bb22
KC
5202 phba->vpi_bmask = kcalloc(longs,
5203 sizeof(unsigned long),
6d368e53
JS
5204 GFP_KERNEL);
5205 if (!phba->vpi_bmask) {
5206 rc = -ENOMEM;
5207 goto lpfc_sli_hba_setup_error;
5208 }
5209
6396bb22
KC
5210 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5211 sizeof(uint16_t),
5212 GFP_KERNEL);
6d368e53
JS
5213 if (!phba->vpi_ids) {
5214 kfree(phba->vpi_bmask);
5215 rc = -ENOMEM;
5216 goto lpfc_sli_hba_setup_error;
5217 }
5218 for (i = 0; i < phba->max_vpi; i++)
5219 phba->vpi_ids[i] = i;
5220 }
5221 }
5222
9399627f 5223 /* Init HBQs */
ed957684
JS
5224 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5225 rc = lpfc_sli_hbq_setup(phba);
5226 if (rc)
5227 goto lpfc_sli_hba_setup_error;
5228 }
04c68496 5229 spin_lock_irq(&phba->hbalock);
dea3101e 5230 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 5231 spin_unlock_irq(&phba->hbalock);
dea3101e 5232
5233 rc = lpfc_config_port_post(phba);
5234 if (rc)
5235 goto lpfc_sli_hba_setup_error;
5236
ed957684
JS
5237 return rc;
5238
92d7f7b0 5239lpfc_sli_hba_setup_error:
2e0fef85 5240 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 5241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5242 "0445 Firmware initialization failed\n");
dea3101e 5243 return rc;
5244}
5245
e59058c4 5246/**
da0436e9
JS
5247 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5248 * @phba: Pointer to HBA context object.
5249 * @mboxq: mailbox pointer.
5250 * This function issue a dump mailbox command to read config region
5251 * 23 and parse the records in the region and populate driver
5252 * data structure.
e59058c4 5253 **/
da0436e9 5254static int
ff78d8f9 5255lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 5256{
ff78d8f9 5257 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
5258 struct lpfc_dmabuf *mp;
5259 struct lpfc_mqe *mqe;
5260 uint32_t data_length;
5261 int rc;
dea3101e 5262
da0436e9
JS
5263 /* Program the default value of vlan_id and fc_map */
5264 phba->valid_vlan = 0;
5265 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5266 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5267 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 5268
ff78d8f9
JS
5269 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5270 if (!mboxq)
da0436e9
JS
5271 return -ENOMEM;
5272
ff78d8f9
JS
5273 mqe = &mboxq->u.mqe;
5274 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5275 rc = -ENOMEM;
5276 goto out_free_mboxq;
5277 }
5278
3e1f0718 5279 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
5280 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5281
5282 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5283 "(%d):2571 Mailbox cmd x%x Status x%x "
5284 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5285 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5286 "CQ: x%x x%x x%x x%x\n",
5287 mboxq->vport ? mboxq->vport->vpi : 0,
5288 bf_get(lpfc_mqe_command, mqe),
5289 bf_get(lpfc_mqe_status, mqe),
5290 mqe->un.mb_words[0], mqe->un.mb_words[1],
5291 mqe->un.mb_words[2], mqe->un.mb_words[3],
5292 mqe->un.mb_words[4], mqe->un.mb_words[5],
5293 mqe->un.mb_words[6], mqe->un.mb_words[7],
5294 mqe->un.mb_words[8], mqe->un.mb_words[9],
5295 mqe->un.mb_words[10], mqe->un.mb_words[11],
5296 mqe->un.mb_words[12], mqe->un.mb_words[13],
5297 mqe->un.mb_words[14], mqe->un.mb_words[15],
5298 mqe->un.mb_words[16], mqe->un.mb_words[50],
5299 mboxq->mcqe.word0,
5300 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5301 mboxq->mcqe.trailer);
5302
5303 if (rc) {
5304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5305 kfree(mp);
ff78d8f9
JS
5306 rc = -EIO;
5307 goto out_free_mboxq;
da0436e9
JS
5308 }
5309 data_length = mqe->un.mb_words[5];
a0c87cbd 5310 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5311 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5312 kfree(mp);
ff78d8f9
JS
5313 rc = -EIO;
5314 goto out_free_mboxq;
d11e31dd 5315 }
dea3101e 5316
da0436e9
JS
5317 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5318 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5319 kfree(mp);
ff78d8f9
JS
5320 rc = 0;
5321
5322out_free_mboxq:
5323 mempool_free(mboxq, phba->mbox_mem_pool);
5324 return rc;
da0436e9 5325}
e59058c4
JS
5326
5327/**
da0436e9
JS
5328 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5329 * @phba: pointer to lpfc hba data structure.
5330 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5331 * @vpd: pointer to the memory to hold resulting port vpd data.
5332 * @vpd_size: On input, the number of bytes allocated to @vpd.
5333 * On output, the number of data bytes in @vpd.
e59058c4 5334 *
da0436e9
JS
5335 * This routine executes a READ_REV SLI4 mailbox command. In
5336 * addition, this routine gets the port vpd data.
5337 *
5338 * Return codes
af901ca1 5339 * 0 - successful
d439d286 5340 * -ENOMEM - could not allocated memory.
e59058c4 5341 **/
da0436e9
JS
5342static int
5343lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5344 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5345{
da0436e9
JS
5346 int rc = 0;
5347 uint32_t dma_size;
5348 struct lpfc_dmabuf *dmabuf;
5349 struct lpfc_mqe *mqe;
dea3101e 5350
da0436e9
JS
5351 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5352 if (!dmabuf)
5353 return -ENOMEM;
5354
5355 /*
5356 * Get a DMA buffer for the vpd data resulting from the READ_REV
5357 * mailbox command.
a257bf90 5358 */
da0436e9 5359 dma_size = *vpd_size;
750afb08
LC
5360 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5361 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5362 if (!dmabuf->virt) {
5363 kfree(dmabuf);
5364 return -ENOMEM;
a257bf90
JS
5365 }
5366
da0436e9
JS
5367 /*
5368 * The SLI4 implementation of READ_REV conflicts at word1,
5369 * bits 31:16 and SLI4 adds vpd functionality not present
5370 * in SLI3. This code corrects the conflicts.
1dcb58e5 5371 */
da0436e9
JS
5372 lpfc_read_rev(phba, mboxq);
5373 mqe = &mboxq->u.mqe;
5374 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5375 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5376 mqe->un.read_rev.word1 &= 0x0000FFFF;
5377 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5378 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5379
5380 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5381 if (rc) {
5382 dma_free_coherent(&phba->pcidev->dev, dma_size,
5383 dmabuf->virt, dmabuf->phys);
def9c7a9 5384 kfree(dmabuf);
da0436e9
JS
5385 return -EIO;
5386 }
1dcb58e5 5387
da0436e9
JS
5388 /*
5389 * The available vpd length cannot be bigger than the
5390 * DMA buffer passed to the port. Catch the less than
5391 * case and update the caller's size.
5392 */
5393 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5394 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5395
d7c47992
JS
5396 memcpy(vpd, dmabuf->virt, *vpd_size);
5397
da0436e9
JS
5398 dma_free_coherent(&phba->pcidev->dev, dma_size,
5399 dmabuf->virt, dmabuf->phys);
5400 kfree(dmabuf);
5401 return 0;
dea3101e 5402}
5403
cd1c8301 5404/**
b3b4f3e1 5405 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
cd1c8301
JS
5406 * @phba: pointer to lpfc hba data structure.
5407 *
5408 * This routine retrieves SLI4 device physical port name this PCI function
5409 * is attached to.
5410 *
5411 * Return codes
4907cb7b 5412 * 0 - successful
b3b4f3e1 5413 * otherwise - failed to retrieve controller attributes
cd1c8301
JS
5414 **/
5415static int
b3b4f3e1 5416lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
cd1c8301
JS
5417{
5418 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5419 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5420 struct lpfc_controller_attribute *cntl_attr;
cd1c8301
JS
5421 void *virtaddr = NULL;
5422 uint32_t alloclen, reqlen;
5423 uint32_t shdr_status, shdr_add_status;
5424 union lpfc_sli4_cfg_shdr *shdr;
cd1c8301
JS
5425 int rc;
5426
cd1c8301
JS
5427 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5428 if (!mboxq)
5429 return -ENOMEM;
cd1c8301 5430
b3b4f3e1 5431 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
cd1c8301
JS
5432 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5433 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5434 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5435 LPFC_SLI4_MBX_NEMBED);
b3b4f3e1 5436
cd1c8301
JS
5437 if (alloclen < reqlen) {
5438 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5439 "3084 Allocated DMA memory size (%d) is "
5440 "less than the requested DMA memory size "
5441 "(%d)\n", alloclen, reqlen);
5442 rc = -ENOMEM;
5443 goto out_free_mboxq;
5444 }
5445 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5446 virtaddr = mboxq->sge_array->addr[0];
5447 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5448 shdr = &mbx_cntl_attr->cfg_shdr;
5449 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5451 if (shdr_status || shdr_add_status || rc) {
5452 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5453 "3085 Mailbox x%x (x%x/x%x) failed, "
5454 "rc:x%x, status:x%x, add_status:x%x\n",
5455 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5456 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5457 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5458 rc, shdr_status, shdr_add_status);
5459 rc = -ENXIO;
5460 goto out_free_mboxq;
5461 }
b3b4f3e1 5462
cd1c8301
JS
5463 cntl_attr = &mbx_cntl_attr->cntl_attr;
5464 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5465 phba->sli4_hba.lnk_info.lnk_tp =
5466 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5467 phba->sli4_hba.lnk_info.lnk_no =
5468 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
b3b4f3e1
JS
5469
5470 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5471 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5472 sizeof(phba->BIOSVersion));
5473
cd1c8301 5474 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
b3b4f3e1 5475 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
cd1c8301 5476 phba->sli4_hba.lnk_info.lnk_tp,
b3b4f3e1
JS
5477 phba->sli4_hba.lnk_info.lnk_no,
5478 phba->BIOSVersion);
5479out_free_mboxq:
5480 if (rc != MBX_TIMEOUT) {
5481 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5482 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5483 else
5484 mempool_free(mboxq, phba->mbox_mem_pool);
5485 }
5486 return rc;
5487}
5488
5489/**
5490 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5491 * @phba: pointer to lpfc hba data structure.
5492 *
5493 * This routine retrieves SLI4 device physical port name this PCI function
5494 * is attached to.
5495 *
5496 * Return codes
5497 * 0 - successful
5498 * otherwise - failed to retrieve physical port name
5499 **/
5500static int
5501lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5502{
5503 LPFC_MBOXQ_t *mboxq;
5504 struct lpfc_mbx_get_port_name *get_port_name;
5505 uint32_t shdr_status, shdr_add_status;
5506 union lpfc_sli4_cfg_shdr *shdr;
5507 char cport_name = 0;
5508 int rc;
5509
5510 /* We assume nothing at this point */
5511 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5512 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5513
5514 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5515 if (!mboxq)
5516 return -ENOMEM;
5517 /* obtain link type and link number via READ_CONFIG */
5518 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5519 lpfc_sli4_read_config(phba);
5520 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5521 goto retrieve_ppname;
5522
5523 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5524 rc = lpfc_sli4_get_ctl_attr(phba);
5525 if (rc)
5526 goto out_free_mboxq;
cd1c8301
JS
5527
5528retrieve_ppname:
5529 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5530 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5531 sizeof(struct lpfc_mbx_get_port_name) -
5532 sizeof(struct lpfc_sli4_cfg_mhdr),
5533 LPFC_SLI4_MBX_EMBED);
5534 get_port_name = &mboxq->u.mqe.un.get_port_name;
5535 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5536 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5537 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5538 phba->sli4_hba.lnk_info.lnk_tp);
5539 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5540 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5541 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5542 if (shdr_status || shdr_add_status || rc) {
5543 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5544 "3087 Mailbox x%x (x%x/x%x) failed: "
5545 "rc:x%x, status:x%x, add_status:x%x\n",
5546 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5547 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5548 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5549 rc, shdr_status, shdr_add_status);
5550 rc = -ENXIO;
5551 goto out_free_mboxq;
5552 }
5553 switch (phba->sli4_hba.lnk_info.lnk_no) {
5554 case LPFC_LINK_NUMBER_0:
5555 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5556 &get_port_name->u.response);
5557 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5558 break;
5559 case LPFC_LINK_NUMBER_1:
5560 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5561 &get_port_name->u.response);
5562 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5563 break;
5564 case LPFC_LINK_NUMBER_2:
5565 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5566 &get_port_name->u.response);
5567 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5568 break;
5569 case LPFC_LINK_NUMBER_3:
5570 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5571 &get_port_name->u.response);
5572 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5573 break;
5574 default:
5575 break;
5576 }
5577
5578 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5579 phba->Port[0] = cport_name;
5580 phba->Port[1] = '\0';
5581 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5582 "3091 SLI get port name: %s\n", phba->Port);
5583 }
5584
5585out_free_mboxq:
5586 if (rc != MBX_TIMEOUT) {
5587 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5588 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5589 else
5590 mempool_free(mboxq, phba->mbox_mem_pool);
5591 }
5592 return rc;
5593}
5594
e59058c4 5595/**
da0436e9
JS
5596 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5597 * @phba: pointer to lpfc hba data structure.
e59058c4 5598 *
da0436e9
JS
5599 * This routine is called to explicitly arm the SLI4 device's completion and
5600 * event queues
5601 **/
5602static void
5603lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5604{
895427bd 5605 int qidx;
b71413dd 5606 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
cdb42bec 5607 struct lpfc_sli4_hdw_queue *qp;
da0436e9 5608
32517fc0
JS
5609 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5610 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
b71413dd 5611 if (sli4_hba->nvmels_cq)
32517fc0
JS
5612 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5613 LPFC_QUEUE_REARM);
1ba981fd 5614
cdb42bec
JS
5615 qp = sli4_hba->hdwq;
5616 if (sli4_hba->hdwq) {
5617 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
32517fc0
JS
5618 sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0,
5619 LPFC_QUEUE_REARM);
5620 sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0,
5621 LPFC_QUEUE_REARM);
cdb42bec 5622 }
1ba981fd 5623
6a828b0f 5624 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++)
32517fc0
JS
5625 sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq,
5626 0, LPFC_QUEUE_REARM);
cdb42bec 5627 }
1ba981fd 5628
2d7dbc4c
JS
5629 if (phba->nvmet_support) {
5630 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
32517fc0
JS
5631 sli4_hba->sli4_write_cq_db(phba,
5632 sli4_hba->nvmet_cqset[qidx], 0,
2d7dbc4c
JS
5633 LPFC_QUEUE_REARM);
5634 }
2e90f4b5 5635 }
da0436e9
JS
5636}
5637
6d368e53
JS
5638/**
5639 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5640 * @phba: Pointer to HBA context object.
5641 * @type: The resource extent type.
b76f2dc9
JS
5642 * @extnt_count: buffer to hold port available extent count.
5643 * @extnt_size: buffer to hold element count per extent.
6d368e53 5644 *
b76f2dc9
JS
5645 * This function calls the port and retrievs the number of available
5646 * extents and their size for a particular extent type.
5647 *
5648 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5649 **/
b76f2dc9 5650int
6d368e53
JS
5651lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5652 uint16_t *extnt_count, uint16_t *extnt_size)
5653{
5654 int rc = 0;
5655 uint32_t length;
5656 uint32_t mbox_tmo;
5657 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5658 LPFC_MBOXQ_t *mbox;
5659
5660 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5661 if (!mbox)
5662 return -ENOMEM;
5663
5664 /* Find out how many extents are available for this resource type */
5665 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5666 sizeof(struct lpfc_sli4_cfg_mhdr));
5667 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5668 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5669 length, LPFC_SLI4_MBX_EMBED);
5670
5671 /* Send an extents count of 0 - the GET doesn't use it. */
5672 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5673 LPFC_SLI4_MBX_EMBED);
5674 if (unlikely(rc)) {
5675 rc = -EIO;
5676 goto err_exit;
5677 }
5678
5679 if (!phba->sli4_hba.intr_enable)
5680 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5681 else {
a183a15f 5682 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5683 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5684 }
5685 if (unlikely(rc)) {
5686 rc = -EIO;
5687 goto err_exit;
5688 }
5689
5690 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5691 if (bf_get(lpfc_mbox_hdr_status,
5692 &rsrc_info->header.cfg_shdr.response)) {
5693 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5694 "2930 Failed to get resource extents "
5695 "Status 0x%x Add'l Status 0x%x\n",
5696 bf_get(lpfc_mbox_hdr_status,
5697 &rsrc_info->header.cfg_shdr.response),
5698 bf_get(lpfc_mbox_hdr_add_status,
5699 &rsrc_info->header.cfg_shdr.response));
5700 rc = -EIO;
5701 goto err_exit;
5702 }
5703
5704 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5705 &rsrc_info->u.rsp);
5706 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5707 &rsrc_info->u.rsp);
8a9d2e80
JS
5708
5709 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5710 "3162 Retrieved extents type-%d from port: count:%d, "
5711 "size:%d\n", type, *extnt_count, *extnt_size);
5712
5713err_exit:
6d368e53
JS
5714 mempool_free(mbox, phba->mbox_mem_pool);
5715 return rc;
5716}
5717
5718/**
5719 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5720 * @phba: Pointer to HBA context object.
5721 * @type: The extent type to check.
5722 *
5723 * This function reads the current available extents from the port and checks
5724 * if the extent count or extent size has changed since the last access.
5725 * Callers use this routine post port reset to understand if there is a
5726 * extent reprovisioning requirement.
5727 *
5728 * Returns:
5729 * -Error: error indicates problem.
5730 * 1: Extent count or size has changed.
5731 * 0: No changes.
5732 **/
5733static int
5734lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5735{
5736 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5737 uint16_t size_diff, rsrc_ext_size;
5738 int rc = 0;
5739 struct lpfc_rsrc_blks *rsrc_entry;
5740 struct list_head *rsrc_blk_list = NULL;
5741
5742 size_diff = 0;
5743 curr_ext_cnt = 0;
5744 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5745 &rsrc_ext_cnt,
5746 &rsrc_ext_size);
5747 if (unlikely(rc))
5748 return -EIO;
5749
5750 switch (type) {
5751 case LPFC_RSC_TYPE_FCOE_RPI:
5752 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5753 break;
5754 case LPFC_RSC_TYPE_FCOE_VPI:
5755 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5756 break;
5757 case LPFC_RSC_TYPE_FCOE_XRI:
5758 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5759 break;
5760 case LPFC_RSC_TYPE_FCOE_VFI:
5761 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5762 break;
5763 default:
5764 break;
5765 }
5766
5767 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5768 curr_ext_cnt++;
5769 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5770 size_diff++;
5771 }
5772
5773 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5774 rc = 1;
5775
5776 return rc;
5777}
5778
5779/**
5780 * lpfc_sli4_cfg_post_extnts -
5781 * @phba: Pointer to HBA context object.
5782 * @extnt_cnt - number of available extents.
5783 * @type - the extent type (rpi, xri, vfi, vpi).
5784 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5785 * @mbox - pointer to the caller's allocated mailbox structure.
5786 *
5787 * This function executes the extents allocation request. It also
5788 * takes care of the amount of memory needed to allocate or get the
5789 * allocated extents. It is the caller's responsibility to evaluate
5790 * the response.
5791 *
5792 * Returns:
5793 * -Error: Error value describes the condition found.
5794 * 0: if successful
5795 **/
5796static int
8a9d2e80 5797lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5798 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5799{
5800 int rc = 0;
5801 uint32_t req_len;
5802 uint32_t emb_len;
5803 uint32_t alloc_len, mbox_tmo;
5804
5805 /* Calculate the total requested length of the dma memory */
8a9d2e80 5806 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5807
5808 /*
5809 * Calculate the size of an embedded mailbox. The uint32_t
5810 * accounts for extents-specific word.
5811 */
5812 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5813 sizeof(uint32_t);
5814
5815 /*
5816 * Presume the allocation and response will fit into an embedded
5817 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5818 */
5819 *emb = LPFC_SLI4_MBX_EMBED;
5820 if (req_len > emb_len) {
8a9d2e80 5821 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5822 sizeof(union lpfc_sli4_cfg_shdr) +
5823 sizeof(uint32_t);
5824 *emb = LPFC_SLI4_MBX_NEMBED;
5825 }
5826
5827 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5828 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5829 req_len, *emb);
5830 if (alloc_len < req_len) {
5831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5832 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5833 "less than the requested DMA memory "
5834 "size (x%x)\n", alloc_len, req_len);
5835 return -ENOMEM;
5836 }
8a9d2e80 5837 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5838 if (unlikely(rc))
5839 return -EIO;
5840
5841 if (!phba->sli4_hba.intr_enable)
5842 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5843 else {
a183a15f 5844 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5845 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5846 }
5847
5848 if (unlikely(rc))
5849 rc = -EIO;
5850 return rc;
5851}
5852
5853/**
5854 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5855 * @phba: Pointer to HBA context object.
5856 * @type: The resource extent type to allocate.
5857 *
5858 * This function allocates the number of elements for the specified
5859 * resource type.
5860 **/
5861static int
5862lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5863{
5864 bool emb = false;
5865 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5866 uint16_t rsrc_id, rsrc_start, j, k;
5867 uint16_t *ids;
5868 int i, rc;
5869 unsigned long longs;
5870 unsigned long *bmask;
5871 struct lpfc_rsrc_blks *rsrc_blks;
5872 LPFC_MBOXQ_t *mbox;
5873 uint32_t length;
5874 struct lpfc_id_range *id_array = NULL;
5875 void *virtaddr = NULL;
5876 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5877 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5878 struct list_head *ext_blk_list;
5879
5880 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5881 &rsrc_cnt,
5882 &rsrc_size);
5883 if (unlikely(rc))
5884 return -EIO;
5885
5886 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5887 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5888 "3009 No available Resource Extents "
5889 "for resource type 0x%x: Count: 0x%x, "
5890 "Size 0x%x\n", type, rsrc_cnt,
5891 rsrc_size);
5892 return -ENOMEM;
5893 }
5894
8a9d2e80
JS
5895 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5896 "2903 Post resource extents type-0x%x: "
5897 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5898
5899 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5900 if (!mbox)
5901 return -ENOMEM;
5902
8a9d2e80 5903 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5904 if (unlikely(rc)) {
5905 rc = -EIO;
5906 goto err_exit;
5907 }
5908
5909 /*
5910 * Figure out where the response is located. Then get local pointers
5911 * to the response data. The port does not guarantee to respond to
5912 * all extents counts request so update the local variable with the
5913 * allocated count from the port.
5914 */
5915 if (emb == LPFC_SLI4_MBX_EMBED) {
5916 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5917 id_array = &rsrc_ext->u.rsp.id[0];
5918 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5919 } else {
5920 virtaddr = mbox->sge_array->addr[0];
5921 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5922 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5923 id_array = &n_rsrc->id;
5924 }
5925
5926 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5927 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5928
5929 /*
5930 * Based on the resource size and count, correct the base and max
5931 * resource values.
5932 */
5933 length = sizeof(struct lpfc_rsrc_blks);
5934 switch (type) {
5935 case LPFC_RSC_TYPE_FCOE_RPI:
6396bb22 5936 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
5937 sizeof(unsigned long),
5938 GFP_KERNEL);
5939 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5940 rc = -ENOMEM;
5941 goto err_exit;
5942 }
6396bb22 5943 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5944 sizeof(uint16_t),
5945 GFP_KERNEL);
5946 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5947 kfree(phba->sli4_hba.rpi_bmask);
5948 rc = -ENOMEM;
5949 goto err_exit;
5950 }
5951
5952 /*
5953 * The next_rpi was initialized with the maximum available
5954 * count but the port may allocate a smaller number. Catch
5955 * that case and update the next_rpi.
5956 */
5957 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5958
5959 /* Initialize local ptrs for common extent processing later. */
5960 bmask = phba->sli4_hba.rpi_bmask;
5961 ids = phba->sli4_hba.rpi_ids;
5962 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5963 break;
5964 case LPFC_RSC_TYPE_FCOE_VPI:
6396bb22 5965 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
5966 GFP_KERNEL);
5967 if (unlikely(!phba->vpi_bmask)) {
5968 rc = -ENOMEM;
5969 goto err_exit;
5970 }
6396bb22 5971 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6d368e53
JS
5972 GFP_KERNEL);
5973 if (unlikely(!phba->vpi_ids)) {
5974 kfree(phba->vpi_bmask);
5975 rc = -ENOMEM;
5976 goto err_exit;
5977 }
5978
5979 /* Initialize local ptrs for common extent processing later. */
5980 bmask = phba->vpi_bmask;
5981 ids = phba->vpi_ids;
5982 ext_blk_list = &phba->lpfc_vpi_blk_list;
5983 break;
5984 case LPFC_RSC_TYPE_FCOE_XRI:
6396bb22 5985 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
5986 sizeof(unsigned long),
5987 GFP_KERNEL);
5988 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5989 rc = -ENOMEM;
5990 goto err_exit;
5991 }
8a9d2e80 5992 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 5993 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5994 sizeof(uint16_t),
5995 GFP_KERNEL);
5996 if (unlikely(!phba->sli4_hba.xri_ids)) {
5997 kfree(phba->sli4_hba.xri_bmask);
5998 rc = -ENOMEM;
5999 goto err_exit;
6000 }
6001
6002 /* Initialize local ptrs for common extent processing later. */
6003 bmask = phba->sli4_hba.xri_bmask;
6004 ids = phba->sli4_hba.xri_ids;
6005 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6006 break;
6007 case LPFC_RSC_TYPE_FCOE_VFI:
6396bb22 6008 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6009 sizeof(unsigned long),
6010 GFP_KERNEL);
6011 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6012 rc = -ENOMEM;
6013 goto err_exit;
6014 }
6396bb22 6015 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
6016 sizeof(uint16_t),
6017 GFP_KERNEL);
6018 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6019 kfree(phba->sli4_hba.vfi_bmask);
6020 rc = -ENOMEM;
6021 goto err_exit;
6022 }
6023
6024 /* Initialize local ptrs for common extent processing later. */
6025 bmask = phba->sli4_hba.vfi_bmask;
6026 ids = phba->sli4_hba.vfi_ids;
6027 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6028 break;
6029 default:
6030 /* Unsupported Opcode. Fail call. */
6031 id_array = NULL;
6032 bmask = NULL;
6033 ids = NULL;
6034 ext_blk_list = NULL;
6035 goto err_exit;
6036 }
6037
6038 /*
6039 * Complete initializing the extent configuration with the
6040 * allocated ids assigned to this function. The bitmask serves
6041 * as an index into the array and manages the available ids. The
6042 * array just stores the ids communicated to the port via the wqes.
6043 */
6044 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6045 if ((i % 2) == 0)
6046 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6047 &id_array[k]);
6048 else
6049 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6050 &id_array[k]);
6051
6052 rsrc_blks = kzalloc(length, GFP_KERNEL);
6053 if (unlikely(!rsrc_blks)) {
6054 rc = -ENOMEM;
6055 kfree(bmask);
6056 kfree(ids);
6057 goto err_exit;
6058 }
6059 rsrc_blks->rsrc_start = rsrc_id;
6060 rsrc_blks->rsrc_size = rsrc_size;
6061 list_add_tail(&rsrc_blks->list, ext_blk_list);
6062 rsrc_start = rsrc_id;
895427bd 6063 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5e5b511d 6064 phba->sli4_hba.io_xri_start = rsrc_start +
895427bd 6065 lpfc_sli4_get_iocb_cnt(phba);
895427bd 6066 }
6d368e53
JS
6067
6068 while (rsrc_id < (rsrc_start + rsrc_size)) {
6069 ids[j] = rsrc_id;
6070 rsrc_id++;
6071 j++;
6072 }
6073 /* Entire word processed. Get next word.*/
6074 if ((i % 2) == 1)
6075 k++;
6076 }
6077 err_exit:
6078 lpfc_sli4_mbox_cmd_free(phba, mbox);
6079 return rc;
6080}
6081
895427bd
JS
6082
6083
6d368e53
JS
6084/**
6085 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6086 * @phba: Pointer to HBA context object.
6087 * @type: the extent's type.
6088 *
6089 * This function deallocates all extents of a particular resource type.
6090 * SLI4 does not allow for deallocating a particular extent range. It
6091 * is the caller's responsibility to release all kernel memory resources.
6092 **/
6093static int
6094lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6095{
6096 int rc;
6097 uint32_t length, mbox_tmo = 0;
6098 LPFC_MBOXQ_t *mbox;
6099 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6100 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6101
6102 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6103 if (!mbox)
6104 return -ENOMEM;
6105
6106 /*
6107 * This function sends an embedded mailbox because it only sends the
6108 * the resource type. All extents of this type are released by the
6109 * port.
6110 */
6111 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6112 sizeof(struct lpfc_sli4_cfg_mhdr));
6113 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6114 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6115 length, LPFC_SLI4_MBX_EMBED);
6116
6117 /* Send an extents count of 0 - the dealloc doesn't use it. */
6118 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6119 LPFC_SLI4_MBX_EMBED);
6120 if (unlikely(rc)) {
6121 rc = -EIO;
6122 goto out_free_mbox;
6123 }
6124 if (!phba->sli4_hba.intr_enable)
6125 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6126 else {
a183a15f 6127 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
6128 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6129 }
6130 if (unlikely(rc)) {
6131 rc = -EIO;
6132 goto out_free_mbox;
6133 }
6134
6135 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6136 if (bf_get(lpfc_mbox_hdr_status,
6137 &dealloc_rsrc->header.cfg_shdr.response)) {
6138 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6139 "2919 Failed to release resource extents "
6140 "for type %d - Status 0x%x Add'l Status 0x%x. "
6141 "Resource memory not released.\n",
6142 type,
6143 bf_get(lpfc_mbox_hdr_status,
6144 &dealloc_rsrc->header.cfg_shdr.response),
6145 bf_get(lpfc_mbox_hdr_add_status,
6146 &dealloc_rsrc->header.cfg_shdr.response));
6147 rc = -EIO;
6148 goto out_free_mbox;
6149 }
6150
6151 /* Release kernel memory resources for the specific type. */
6152 switch (type) {
6153 case LPFC_RSC_TYPE_FCOE_VPI:
6154 kfree(phba->vpi_bmask);
6155 kfree(phba->vpi_ids);
6156 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6157 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6158 &phba->lpfc_vpi_blk_list, list) {
6159 list_del_init(&rsrc_blk->list);
6160 kfree(rsrc_blk);
6161 }
16a3a208 6162 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6163 break;
6164 case LPFC_RSC_TYPE_FCOE_XRI:
6165 kfree(phba->sli4_hba.xri_bmask);
6166 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6167 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6168 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6169 list_del_init(&rsrc_blk->list);
6170 kfree(rsrc_blk);
6171 }
6172 break;
6173 case LPFC_RSC_TYPE_FCOE_VFI:
6174 kfree(phba->sli4_hba.vfi_bmask);
6175 kfree(phba->sli4_hba.vfi_ids);
6176 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6177 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6178 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6179 list_del_init(&rsrc_blk->list);
6180 kfree(rsrc_blk);
6181 }
6182 break;
6183 case LPFC_RSC_TYPE_FCOE_RPI:
6184 /* RPI bitmask and physical id array are cleaned up earlier. */
6185 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6186 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6187 list_del_init(&rsrc_blk->list);
6188 kfree(rsrc_blk);
6189 }
6190 break;
6191 default:
6192 break;
6193 }
6194
6195 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6196
6197 out_free_mbox:
6198 mempool_free(mbox, phba->mbox_mem_pool);
6199 return rc;
6200}
6201
bd4b3e5c 6202static void
7bdedb34
JS
6203lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6204 uint32_t feature)
65791f1f 6205{
65791f1f 6206 uint32_t len;
65791f1f 6207
65791f1f
JS
6208 len = sizeof(struct lpfc_mbx_set_feature) -
6209 sizeof(struct lpfc_sli4_cfg_mhdr);
6210 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6211 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6212 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
6213
6214 switch (feature) {
6215 case LPFC_SET_UE_RECOVERY:
6216 bf_set(lpfc_mbx_set_feature_UER,
6217 &mbox->u.mqe.un.set_feature, 1);
6218 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6219 mbox->u.mqe.un.set_feature.param_len = 8;
6220 break;
6221 case LPFC_SET_MDS_DIAGS:
6222 bf_set(lpfc_mbx_set_feature_mds,
6223 &mbox->u.mqe.un.set_feature, 1);
6224 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
ae9e28f3 6225 &mbox->u.mqe.un.set_feature, 1);
7bdedb34
JS
6226 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6227 mbox->u.mqe.un.set_feature.param_len = 8;
6228 break;
65791f1f 6229 }
7bdedb34
JS
6230
6231 return;
65791f1f
JS
6232}
6233
1165a5c2
JS
6234/**
6235 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6236 * @phba: Pointer to HBA context object.
6237 *
6238 * Disable FW logging into host memory on the adapter. To
6239 * be done before reading logs from the host memory.
6240 **/
6241void
6242lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6243{
6244 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6245
6246 ras_fwlog->ras_active = false;
6247
6248 /* Disable FW logging to host memory */
6249 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6250 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6251}
6252
d2cc9bcd
JS
6253/**
6254 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6255 * @phba: Pointer to HBA context object.
6256 *
6257 * This function is called to free memory allocated for RAS FW logging
6258 * support in the driver.
6259 **/
6260void
6261lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6262{
6263 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6264 struct lpfc_dmabuf *dmabuf, *next;
6265
6266 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6267 list_for_each_entry_safe(dmabuf, next,
6268 &ras_fwlog->fwlog_buff_list,
6269 list) {
6270 list_del(&dmabuf->list);
6271 dma_free_coherent(&phba->pcidev->dev,
6272 LPFC_RAS_MAX_ENTRY_SIZE,
6273 dmabuf->virt, dmabuf->phys);
6274 kfree(dmabuf);
6275 }
6276 }
6277
6278 if (ras_fwlog->lwpd.virt) {
6279 dma_free_coherent(&phba->pcidev->dev,
6280 sizeof(uint32_t) * 2,
6281 ras_fwlog->lwpd.virt,
6282 ras_fwlog->lwpd.phys);
6283 ras_fwlog->lwpd.virt = NULL;
6284 }
6285
6286 ras_fwlog->ras_active = false;
6287}
6288
6289/**
6290 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6291 * @phba: Pointer to HBA context object.
6292 * @fwlog_buff_count: Count of buffers to be created.
6293 *
6294 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6295 * to update FW log is posted to the adapter.
6296 * Buffer count is calculated based on module param ras_fwlog_buffsize
6297 * Size of each buffer posted to FW is 64K.
6298 **/
6299
6300static int
6301lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6302 uint32_t fwlog_buff_count)
6303{
6304 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6305 struct lpfc_dmabuf *dmabuf;
6306 int rc = 0, i = 0;
6307
6308 /* Initialize List */
6309 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6310
6311 /* Allocate memory for the LWPD */
6312 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6313 sizeof(uint32_t) * 2,
6314 &ras_fwlog->lwpd.phys,
6315 GFP_KERNEL);
6316 if (!ras_fwlog->lwpd.virt) {
cb34990b 6317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6318 "6185 LWPD Memory Alloc Failed\n");
6319
6320 return -ENOMEM;
6321 }
6322
6323 ras_fwlog->fw_buffcount = fwlog_buff_count;
6324 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6325 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6326 GFP_KERNEL);
6327 if (!dmabuf) {
6328 rc = -ENOMEM;
6329 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6330 "6186 Memory Alloc failed FW logging");
6331 goto free_mem;
6332 }
6333
750afb08 6334 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
d2cc9bcd 6335 LPFC_RAS_MAX_ENTRY_SIZE,
750afb08 6336 &dmabuf->phys, GFP_KERNEL);
d2cc9bcd
JS
6337 if (!dmabuf->virt) {
6338 kfree(dmabuf);
6339 rc = -ENOMEM;
6340 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6341 "6187 DMA Alloc Failed FW logging");
6342 goto free_mem;
6343 }
d2cc9bcd
JS
6344 dmabuf->buffer_tag = i;
6345 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6346 }
6347
6348free_mem:
6349 if (rc)
6350 lpfc_sli4_ras_dma_free(phba);
6351
6352 return rc;
6353}
6354
6355/**
6356 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6357 * @phba: pointer to lpfc hba data structure.
6358 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6359 *
6360 * Completion handler for driver's RAS MBX command to the device.
6361 **/
6362static void
6363lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6364{
6365 MAILBOX_t *mb;
6366 union lpfc_sli4_cfg_shdr *shdr;
6367 uint32_t shdr_status, shdr_add_status;
6368 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6369
6370 mb = &pmb->u.mb;
6371
6372 shdr = (union lpfc_sli4_cfg_shdr *)
6373 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6374 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6375 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6376
6377 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
cb34990b 6378 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
d2cc9bcd
JS
6379 "6188 FW LOG mailbox "
6380 "completed with status x%x add_status x%x,"
6381 " mbx status x%x\n",
6382 shdr_status, shdr_add_status, mb->mbxStatus);
cb34990b
JS
6383
6384 ras_fwlog->ras_hwsupport = false;
d2cc9bcd
JS
6385 goto disable_ras;
6386 }
6387
6388 ras_fwlog->ras_active = true;
6389 mempool_free(pmb, phba->mbox_mem_pool);
6390
6391 return;
6392
6393disable_ras:
6394 /* Free RAS DMA memory */
6395 lpfc_sli4_ras_dma_free(phba);
6396 mempool_free(pmb, phba->mbox_mem_pool);
6397}
6398
6399/**
6400 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6401 * @phba: pointer to lpfc hba data structure.
6402 * @fwlog_level: Logging verbosity level.
6403 * @fwlog_enable: Enable/Disable logging.
6404 *
6405 * Initialize memory and post mailbox command to enable FW logging in host
6406 * memory.
6407 **/
6408int
6409lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6410 uint32_t fwlog_level,
6411 uint32_t fwlog_enable)
6412{
6413 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6414 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6415 struct lpfc_dmabuf *dmabuf;
6416 LPFC_MBOXQ_t *mbox;
6417 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6418 int rc = 0;
6419
6420 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6421 phba->cfg_ras_fwlog_buffsize);
6422 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6423
6424 /*
6425 * If re-enabling FW logging support use earlier allocated
6426 * DMA buffers while posting MBX command.
6427 **/
6428 if (!ras_fwlog->lwpd.virt) {
6429 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6430 if (rc) {
6431 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
cb34990b 6432 "6189 FW Log Memory Allocation Failed");
d2cc9bcd
JS
6433 return rc;
6434 }
6435 }
6436
6437 /* Setup Mailbox command */
6438 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6439 if (!mbox) {
cb34990b 6440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6441 "6190 RAS MBX Alloc Failed");
6442 rc = -ENOMEM;
6443 goto mem_free;
6444 }
6445
6446 ras_fwlog->fw_loglevel = fwlog_level;
6447 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6448 sizeof(struct lpfc_sli4_cfg_mhdr));
6449
6450 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6451 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6452 len, LPFC_SLI4_MBX_EMBED);
6453
6454 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6455 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6456 fwlog_enable);
6457 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6458 ras_fwlog->fw_loglevel);
6459 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6460 ras_fwlog->fw_buffcount);
6461 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6462 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6463
6464 /* Update DMA buffer address */
6465 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6466 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6467
6468 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6469 putPaddrLow(dmabuf->phys);
6470
6471 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6472 putPaddrHigh(dmabuf->phys);
6473 }
6474
6475 /* Update LPWD address */
6476 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6477 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6478
6479 mbox->vport = phba->pport;
6480 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6481
6482 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6483
6484 if (rc == MBX_NOT_FINISHED) {
cb34990b
JS
6485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6486 "6191 FW-Log Mailbox failed. "
d2cc9bcd
JS
6487 "status %d mbxStatus : x%x", rc,
6488 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6489 mempool_free(mbox, phba->mbox_mem_pool);
6490 rc = -EIO;
6491 goto mem_free;
6492 } else
6493 rc = 0;
6494mem_free:
6495 if (rc)
6496 lpfc_sli4_ras_dma_free(phba);
6497
6498 return rc;
6499}
6500
6501/**
6502 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6503 * @phba: Pointer to HBA context object.
6504 *
6505 * Check if RAS is supported on the adapter and initialize it.
6506 **/
6507void
6508lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6509{
6510 /* Check RAS FW Log needs to be enabled or not */
6511 if (lpfc_check_fwlog_support(phba))
6512 return;
6513
6514 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6515 LPFC_RAS_ENABLE_LOGGING);
6516}
6517
6d368e53
JS
6518/**
6519 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6520 * @phba: Pointer to HBA context object.
6521 *
6522 * This function allocates all SLI4 resource identifiers.
6523 **/
6524int
6525lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6526{
6527 int i, rc, error = 0;
6528 uint16_t count, base;
6529 unsigned long longs;
6530
ff78d8f9
JS
6531 if (!phba->sli4_hba.rpi_hdrs_in_use)
6532 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
6533 if (phba->sli4_hba.extents_in_use) {
6534 /*
6535 * The port supports resource extents. The XRI, VPI, VFI, RPI
6536 * resource extent count must be read and allocated before
6537 * provisioning the resource id arrays.
6538 */
6539 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6540 LPFC_IDX_RSRC_RDY) {
6541 /*
6542 * Extent-based resources are set - the driver could
6543 * be in a port reset. Figure out if any corrective
6544 * actions need to be taken.
6545 */
6546 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6547 LPFC_RSC_TYPE_FCOE_VFI);
6548 if (rc != 0)
6549 error++;
6550 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6551 LPFC_RSC_TYPE_FCOE_VPI);
6552 if (rc != 0)
6553 error++;
6554 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6555 LPFC_RSC_TYPE_FCOE_XRI);
6556 if (rc != 0)
6557 error++;
6558 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6559 LPFC_RSC_TYPE_FCOE_RPI);
6560 if (rc != 0)
6561 error++;
6562
6563 /*
6564 * It's possible that the number of resources
6565 * provided to this port instance changed between
6566 * resets. Detect this condition and reallocate
6567 * resources. Otherwise, there is no action.
6568 */
6569 if (error) {
6570 lpfc_printf_log(phba, KERN_INFO,
6571 LOG_MBOX | LOG_INIT,
6572 "2931 Detected extent resource "
6573 "change. Reallocating all "
6574 "extents.\n");
6575 rc = lpfc_sli4_dealloc_extent(phba,
6576 LPFC_RSC_TYPE_FCOE_VFI);
6577 rc = lpfc_sli4_dealloc_extent(phba,
6578 LPFC_RSC_TYPE_FCOE_VPI);
6579 rc = lpfc_sli4_dealloc_extent(phba,
6580 LPFC_RSC_TYPE_FCOE_XRI);
6581 rc = lpfc_sli4_dealloc_extent(phba,
6582 LPFC_RSC_TYPE_FCOE_RPI);
6583 } else
6584 return 0;
6585 }
6586
6587 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6588 if (unlikely(rc))
6589 goto err_exit;
6590
6591 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6592 if (unlikely(rc))
6593 goto err_exit;
6594
6595 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6596 if (unlikely(rc))
6597 goto err_exit;
6598
6599 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6600 if (unlikely(rc))
6601 goto err_exit;
6602 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6603 LPFC_IDX_RSRC_RDY);
6604 return rc;
6605 } else {
6606 /*
6607 * The port does not support resource extents. The XRI, VPI,
6608 * VFI, RPI resource ids were determined from READ_CONFIG.
6609 * Just allocate the bitmasks and provision the resource id
6610 * arrays. If a port reset is active, the resources don't
6611 * need any action - just exit.
6612 */
6613 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6614 LPFC_IDX_RSRC_RDY) {
6615 lpfc_sli4_dealloc_resource_identifiers(phba);
6616 lpfc_sli4_remove_rpis(phba);
6617 }
6d368e53
JS
6618 /* RPIs. */
6619 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6620 if (count <= 0) {
6621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6622 "3279 Invalid provisioning of "
6623 "rpi:%d\n", count);
6624 rc = -EINVAL;
6625 goto err_exit;
6626 }
6d368e53
JS
6627 base = phba->sli4_hba.max_cfg_param.rpi_base;
6628 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6629 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6630 sizeof(unsigned long),
6631 GFP_KERNEL);
6632 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6633 rc = -ENOMEM;
6634 goto err_exit;
6635 }
6396bb22 6636 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6637 GFP_KERNEL);
6638 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6639 rc = -ENOMEM;
6640 goto free_rpi_bmask;
6641 }
6642
6643 for (i = 0; i < count; i++)
6644 phba->sli4_hba.rpi_ids[i] = base + i;
6645
6646 /* VPIs. */
6647 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6648 if (count <= 0) {
6649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6650 "3280 Invalid provisioning of "
6651 "vpi:%d\n", count);
6652 rc = -EINVAL;
6653 goto free_rpi_ids;
6654 }
6d368e53
JS
6655 base = phba->sli4_hba.max_cfg_param.vpi_base;
6656 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6657 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6658 GFP_KERNEL);
6659 if (unlikely(!phba->vpi_bmask)) {
6660 rc = -ENOMEM;
6661 goto free_rpi_ids;
6662 }
6396bb22 6663 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6664 GFP_KERNEL);
6665 if (unlikely(!phba->vpi_ids)) {
6666 rc = -ENOMEM;
6667 goto free_vpi_bmask;
6668 }
6669
6670 for (i = 0; i < count; i++)
6671 phba->vpi_ids[i] = base + i;
6672
6673 /* XRIs. */
6674 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6675 if (count <= 0) {
6676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6677 "3281 Invalid provisioning of "
6678 "xri:%d\n", count);
6679 rc = -EINVAL;
6680 goto free_vpi_ids;
6681 }
6d368e53
JS
6682 base = phba->sli4_hba.max_cfg_param.xri_base;
6683 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6684 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6685 sizeof(unsigned long),
6686 GFP_KERNEL);
6687 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6688 rc = -ENOMEM;
6689 goto free_vpi_ids;
6690 }
41899be7 6691 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6692 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6693 GFP_KERNEL);
6694 if (unlikely(!phba->sli4_hba.xri_ids)) {
6695 rc = -ENOMEM;
6696 goto free_xri_bmask;
6697 }
6698
6699 for (i = 0; i < count; i++)
6700 phba->sli4_hba.xri_ids[i] = base + i;
6701
6702 /* VFIs. */
6703 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6704 if (count <= 0) {
6705 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6706 "3282 Invalid provisioning of "
6707 "vfi:%d\n", count);
6708 rc = -EINVAL;
6709 goto free_xri_ids;
6710 }
6d368e53
JS
6711 base = phba->sli4_hba.max_cfg_param.vfi_base;
6712 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6713 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6714 sizeof(unsigned long),
6715 GFP_KERNEL);
6716 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6717 rc = -ENOMEM;
6718 goto free_xri_ids;
6719 }
6396bb22 6720 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6721 GFP_KERNEL);
6722 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6723 rc = -ENOMEM;
6724 goto free_vfi_bmask;
6725 }
6726
6727 for (i = 0; i < count; i++)
6728 phba->sli4_hba.vfi_ids[i] = base + i;
6729
6730 /*
6731 * Mark all resources ready. An HBA reset doesn't need
6732 * to reset the initialization.
6733 */
6734 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6735 LPFC_IDX_RSRC_RDY);
6736 return 0;
6737 }
6738
6739 free_vfi_bmask:
6740 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6741 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6742 free_xri_ids:
6743 kfree(phba->sli4_hba.xri_ids);
cd60be49 6744 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6745 free_xri_bmask:
6746 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6747 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6748 free_vpi_ids:
6749 kfree(phba->vpi_ids);
cd60be49 6750 phba->vpi_ids = NULL;
6d368e53
JS
6751 free_vpi_bmask:
6752 kfree(phba->vpi_bmask);
cd60be49 6753 phba->vpi_bmask = NULL;
6d368e53
JS
6754 free_rpi_ids:
6755 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6756 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6757 free_rpi_bmask:
6758 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6759 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6760 err_exit:
6761 return rc;
6762}
6763
6764/**
6765 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6766 * @phba: Pointer to HBA context object.
6767 *
6768 * This function allocates the number of elements for the specified
6769 * resource type.
6770 **/
6771int
6772lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6773{
6774 if (phba->sli4_hba.extents_in_use) {
6775 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6776 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6777 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6778 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6779 } else {
6780 kfree(phba->vpi_bmask);
16a3a208 6781 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6782 kfree(phba->vpi_ids);
6783 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6784 kfree(phba->sli4_hba.xri_bmask);
6785 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6786 kfree(phba->sli4_hba.vfi_bmask);
6787 kfree(phba->sli4_hba.vfi_ids);
6788 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6789 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6790 }
6791
6792 return 0;
6793}
6794
b76f2dc9
JS
6795/**
6796 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6797 * @phba: Pointer to HBA context object.
6798 * @type: The resource extent type.
6799 * @extnt_count: buffer to hold port extent count response
6800 * @extnt_size: buffer to hold port extent size response.
6801 *
6802 * This function calls the port to read the host allocated extents
6803 * for a particular type.
6804 **/
6805int
6806lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6807 uint16_t *extnt_cnt, uint16_t *extnt_size)
6808{
6809 bool emb;
6810 int rc = 0;
6811 uint16_t curr_blks = 0;
6812 uint32_t req_len, emb_len;
6813 uint32_t alloc_len, mbox_tmo;
6814 struct list_head *blk_list_head;
6815 struct lpfc_rsrc_blks *rsrc_blk;
6816 LPFC_MBOXQ_t *mbox;
6817 void *virtaddr = NULL;
6818 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6819 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6820 union lpfc_sli4_cfg_shdr *shdr;
6821
6822 switch (type) {
6823 case LPFC_RSC_TYPE_FCOE_VPI:
6824 blk_list_head = &phba->lpfc_vpi_blk_list;
6825 break;
6826 case LPFC_RSC_TYPE_FCOE_XRI:
6827 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6828 break;
6829 case LPFC_RSC_TYPE_FCOE_VFI:
6830 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6831 break;
6832 case LPFC_RSC_TYPE_FCOE_RPI:
6833 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6834 break;
6835 default:
6836 return -EIO;
6837 }
6838
6839 /* Count the number of extents currently allocatd for this type. */
6840 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6841 if (curr_blks == 0) {
6842 /*
6843 * The GET_ALLOCATED mailbox does not return the size,
6844 * just the count. The size should be just the size
6845 * stored in the current allocated block and all sizes
6846 * for an extent type are the same so set the return
6847 * value now.
6848 */
6849 *extnt_size = rsrc_blk->rsrc_size;
6850 }
6851 curr_blks++;
6852 }
6853
b76f2dc9
JS
6854 /*
6855 * Calculate the size of an embedded mailbox. The uint32_t
6856 * accounts for extents-specific word.
6857 */
6858 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6859 sizeof(uint32_t);
6860
6861 /*
6862 * Presume the allocation and response will fit into an embedded
6863 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6864 */
6865 emb = LPFC_SLI4_MBX_EMBED;
6866 req_len = emb_len;
6867 if (req_len > emb_len) {
6868 req_len = curr_blks * sizeof(uint16_t) +
6869 sizeof(union lpfc_sli4_cfg_shdr) +
6870 sizeof(uint32_t);
6871 emb = LPFC_SLI4_MBX_NEMBED;
6872 }
6873
6874 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6875 if (!mbox)
6876 return -ENOMEM;
6877 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6878
6879 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6880 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6881 req_len, emb);
6882 if (alloc_len < req_len) {
6883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6884 "2983 Allocated DMA memory size (x%x) is "
6885 "less than the requested DMA memory "
6886 "size (x%x)\n", alloc_len, req_len);
6887 rc = -ENOMEM;
6888 goto err_exit;
6889 }
6890 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6891 if (unlikely(rc)) {
6892 rc = -EIO;
6893 goto err_exit;
6894 }
6895
6896 if (!phba->sli4_hba.intr_enable)
6897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6898 else {
a183a15f 6899 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
6900 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6901 }
6902
6903 if (unlikely(rc)) {
6904 rc = -EIO;
6905 goto err_exit;
6906 }
6907
6908 /*
6909 * Figure out where the response is located. Then get local pointers
6910 * to the response data. The port does not guarantee to respond to
6911 * all extents counts request so update the local variable with the
6912 * allocated count from the port.
6913 */
6914 if (emb == LPFC_SLI4_MBX_EMBED) {
6915 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6916 shdr = &rsrc_ext->header.cfg_shdr;
6917 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6918 } else {
6919 virtaddr = mbox->sge_array->addr[0];
6920 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6921 shdr = &n_rsrc->cfg_shdr;
6922 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6923 }
6924
6925 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6926 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6927 "2984 Failed to read allocated resources "
6928 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6929 type,
6930 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6931 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6932 rc = -EIO;
6933 goto err_exit;
6934 }
6935 err_exit:
6936 lpfc_sli4_mbox_cmd_free(phba, mbox);
6937 return rc;
6938}
6939
8a9d2e80 6940/**
0ef69968 6941 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 6942 * @phba: pointer to lpfc hba data structure.
895427bd
JS
6943 * @pring: Pointer to driver SLI ring object.
6944 * @sgl_list: linked link of sgl buffers to post
6945 * @cnt: number of linked list buffers
8a9d2e80 6946 *
895427bd 6947 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
6948 * repost them to the port by using SGL block post. This is needed after a
6949 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
6950 * of buffer sgls which contains contiguous xris and uses the non-embedded
6951 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
6952 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6953 * mailbox command for posting.
6954 *
6955 * Returns: 0 = success, non-zero failure.
6956 **/
6957static int
895427bd
JS
6958lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6959 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
6960{
6961 struct lpfc_sglq *sglq_entry = NULL;
6962 struct lpfc_sglq *sglq_entry_next = NULL;
6963 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
6964 int status, total_cnt;
6965 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
6966 int last_xritag = NO_XRI;
6967 LIST_HEAD(prep_sgl_list);
6968 LIST_HEAD(blck_sgl_list);
6969 LIST_HEAD(allc_sgl_list);
6970 LIST_HEAD(post_sgl_list);
6971 LIST_HEAD(free_sgl_list);
6972
38c20673 6973 spin_lock_irq(&phba->hbalock);
895427bd
JS
6974 spin_lock(&phba->sli4_hba.sgl_list_lock);
6975 list_splice_init(sgl_list, &allc_sgl_list);
6976 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6977 spin_unlock_irq(&phba->hbalock);
8a9d2e80 6978
895427bd 6979 total_cnt = cnt;
8a9d2e80
JS
6980 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6981 &allc_sgl_list, list) {
6982 list_del_init(&sglq_entry->list);
6983 block_cnt++;
6984 if ((last_xritag != NO_XRI) &&
6985 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6986 /* a hole in xri block, form a sgl posting block */
6987 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6988 post_cnt = block_cnt - 1;
6989 /* prepare list for next posting block */
6990 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6991 block_cnt = 1;
6992 } else {
6993 /* prepare list for next posting block */
6994 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6995 /* enough sgls for non-embed sgl mbox command */
6996 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6997 list_splice_init(&prep_sgl_list,
6998 &blck_sgl_list);
6999 post_cnt = block_cnt;
7000 block_cnt = 0;
7001 }
7002 }
7003 num_posted++;
7004
7005 /* keep track of last sgl's xritag */
7006 last_xritag = sglq_entry->sli4_xritag;
7007
895427bd
JS
7008 /* end of repost sgl list condition for buffers */
7009 if (num_posted == total_cnt) {
8a9d2e80
JS
7010 if (post_cnt == 0) {
7011 list_splice_init(&prep_sgl_list,
7012 &blck_sgl_list);
7013 post_cnt = block_cnt;
7014 } else if (block_cnt == 1) {
7015 status = lpfc_sli4_post_sgl(phba,
7016 sglq_entry->phys, 0,
7017 sglq_entry->sli4_xritag);
7018 if (!status) {
7019 /* successful, put sgl to posted list */
7020 list_add_tail(&sglq_entry->list,
7021 &post_sgl_list);
7022 } else {
7023 /* Failure, put sgl to free list */
7024 lpfc_printf_log(phba, KERN_WARNING,
7025 LOG_SLI,
895427bd 7026 "3159 Failed to post "
8a9d2e80
JS
7027 "sgl, xritag:x%x\n",
7028 sglq_entry->sli4_xritag);
7029 list_add_tail(&sglq_entry->list,
7030 &free_sgl_list);
711ea882 7031 total_cnt--;
8a9d2e80
JS
7032 }
7033 }
7034 }
7035
7036 /* continue until a nembed page worth of sgls */
7037 if (post_cnt == 0)
7038 continue;
7039
895427bd
JS
7040 /* post the buffer list sgls as a block */
7041 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7042 post_cnt);
8a9d2e80
JS
7043
7044 if (!status) {
7045 /* success, put sgl list to posted sgl list */
7046 list_splice_init(&blck_sgl_list, &post_sgl_list);
7047 } else {
7048 /* Failure, put sgl list to free sgl list */
7049 sglq_entry_first = list_first_entry(&blck_sgl_list,
7050 struct lpfc_sglq,
7051 list);
7052 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 7053 "3160 Failed to post sgl-list, "
8a9d2e80
JS
7054 "xritag:x%x-x%x\n",
7055 sglq_entry_first->sli4_xritag,
7056 (sglq_entry_first->sli4_xritag +
7057 post_cnt - 1));
7058 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 7059 total_cnt -= post_cnt;
8a9d2e80
JS
7060 }
7061
7062 /* don't reset xirtag due to hole in xri block */
7063 if (block_cnt == 0)
7064 last_xritag = NO_XRI;
7065
895427bd 7066 /* reset sgl post count for next round of posting */
8a9d2e80
JS
7067 post_cnt = 0;
7068 }
7069
895427bd 7070 /* free the sgls failed to post */
8a9d2e80
JS
7071 lpfc_free_sgl_list(phba, &free_sgl_list);
7072
895427bd 7073 /* push sgls posted to the available list */
8a9d2e80 7074 if (!list_empty(&post_sgl_list)) {
38c20673 7075 spin_lock_irq(&phba->hbalock);
895427bd
JS
7076 spin_lock(&phba->sli4_hba.sgl_list_lock);
7077 list_splice_init(&post_sgl_list, sgl_list);
7078 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7079 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
7080 } else {
7081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 7082 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
7083 return -EIO;
7084 }
895427bd
JS
7085
7086 /* return the number of XRIs actually posted */
7087 return total_cnt;
8a9d2e80
JS
7088}
7089
0794d601 7090/**
5e5b511d 7091 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
0794d601
JS
7092 * @phba: pointer to lpfc hba data structure.
7093 *
7094 * This routine walks the list of nvme buffers that have been allocated and
7095 * repost them to the port by using SGL block post. This is needed after a
7096 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7097 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
5e5b511d 7098 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
0794d601
JS
7099 *
7100 * Returns: 0 = success, non-zero failure.
7101 **/
7102int
5e5b511d 7103lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
0794d601
JS
7104{
7105 LIST_HEAD(post_nblist);
7106 int num_posted, rc = 0;
7107
7108 /* get all NVME buffers need to repost to a local list */
5e5b511d 7109 lpfc_io_buf_flush(phba, &post_nblist);
0794d601
JS
7110
7111 /* post the list of nvme buffer sgls to port if available */
7112 if (!list_empty(&post_nblist)) {
5e5b511d
JS
7113 num_posted = lpfc_sli4_post_io_sgl_list(
7114 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
0794d601
JS
7115 /* failed to post any nvme buffer, return error */
7116 if (num_posted == 0)
7117 rc = -EIO;
7118 }
7119 return rc;
7120}
7121
61bda8f7
JS
7122void
7123lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7124{
7125 uint32_t len;
7126
7127 len = sizeof(struct lpfc_mbx_set_host_data) -
7128 sizeof(struct lpfc_sli4_cfg_mhdr);
7129 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7130 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7131 LPFC_SLI4_MBX_EMBED);
7132
7133 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
7134 mbox->u.mqe.un.set_host_data.param_len =
7135 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
7136 snprintf(mbox->u.mqe.un.set_host_data.data,
7137 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7138 "Linux %s v"LPFC_DRIVER_VERSION,
7139 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7140}
7141
a8cf5dfe 7142int
6c621a22 7143lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
a8cf5dfe 7144 struct lpfc_queue *drq, int count, int idx)
6c621a22
JS
7145{
7146 int rc, i;
7147 struct lpfc_rqe hrqe;
7148 struct lpfc_rqe drqe;
7149 struct lpfc_rqb *rqbp;
411de511 7150 unsigned long flags;
6c621a22
JS
7151 struct rqb_dmabuf *rqb_buffer;
7152 LIST_HEAD(rqb_buf_list);
7153
411de511 7154 spin_lock_irqsave(&phba->hbalock, flags);
6c621a22
JS
7155 rqbp = hrq->rqbp;
7156 for (i = 0; i < count; i++) {
7157 /* IF RQ is already full, don't bother */
7158 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7159 break;
7160 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7161 if (!rqb_buffer)
7162 break;
7163 rqb_buffer->hrq = hrq;
7164 rqb_buffer->drq = drq;
a8cf5dfe 7165 rqb_buffer->idx = idx;
6c621a22
JS
7166 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7167 }
7168 while (!list_empty(&rqb_buf_list)) {
7169 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7170 hbuf.list);
7171
7172 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7173 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7174 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7175 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7176 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7177 if (rc < 0) {
411de511
JS
7178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7179 "6421 Cannot post to HRQ %d: %x %x %x "
7180 "DRQ %x %x\n",
7181 hrq->queue_id,
7182 hrq->host_index,
7183 hrq->hba_index,
7184 hrq->entry_count,
7185 drq->host_index,
7186 drq->hba_index);
6c621a22
JS
7187 rqbp->rqb_free_buffer(phba, rqb_buffer);
7188 } else {
7189 list_add_tail(&rqb_buffer->hbuf.list,
7190 &rqbp->rqb_buffer_list);
7191 rqbp->buffer_count++;
7192 }
7193 }
411de511 7194 spin_unlock_irqrestore(&phba->hbalock, flags);
6c621a22
JS
7195 return 1;
7196}
7197
da0436e9 7198/**
183b8021 7199 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
7200 * @phba: Pointer to HBA context object.
7201 *
183b8021
MY
7202 * This function is the main SLI4 device initialization PCI function. This
7203 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
7204 * HBA error attention handler code. Caller is not required to hold any
7205 * locks.
7206 **/
7207int
7208lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7209{
c490850a 7210 int rc, i, cnt, len;
da0436e9
JS
7211 LPFC_MBOXQ_t *mboxq;
7212 struct lpfc_mqe *mqe;
7213 uint8_t *vpd;
7214 uint32_t vpd_size;
7215 uint32_t ftr_rsp = 0;
7216 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7217 struct lpfc_vport *vport = phba->pport;
7218 struct lpfc_dmabuf *mp;
2d7dbc4c 7219 struct lpfc_rqb *rqbp;
da0436e9
JS
7220
7221 /* Perform a PCI function reset to start from clean */
7222 rc = lpfc_pci_function_reset(phba);
7223 if (unlikely(rc))
7224 return -ENODEV;
7225
7226 /* Check the HBA Host Status Register for readyness */
7227 rc = lpfc_sli4_post_status_check(phba);
7228 if (unlikely(rc))
7229 return -ENODEV;
7230 else {
7231 spin_lock_irq(&phba->hbalock);
7232 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7233 spin_unlock_irq(&phba->hbalock);
7234 }
7235
7236 /*
7237 * Allocate a single mailbox container for initializing the
7238 * port.
7239 */
7240 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7241 if (!mboxq)
7242 return -ENOMEM;
7243
da0436e9 7244 /* Issue READ_REV to collect vpd and FW information. */
49198b37 7245 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
7246 vpd = kzalloc(vpd_size, GFP_KERNEL);
7247 if (!vpd) {
7248 rc = -ENOMEM;
7249 goto out_free_mbox;
7250 }
7251
7252 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
7253 if (unlikely(rc)) {
7254 kfree(vpd);
7255 goto out_free_mbox;
7256 }
572709e2 7257
da0436e9 7258 mqe = &mboxq->u.mqe;
f1126688 7259 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 7260 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 7261 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
7262 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7263 } else {
76a95d75 7264 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 7265 }
45ed1190
JS
7266
7267 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7268 LPFC_DCBX_CEE_MODE)
7269 phba->hba_flag |= HBA_FIP_SUPPORT;
7270 else
7271 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7272
4f2e66c6
JS
7273 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7274
c31098ce 7275 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
7276 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7277 "0376 READ_REV Error. SLI Level %d "
7278 "FCoE enabled %d\n",
76a95d75 7279 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 7280 rc = -EIO;
76a95d75
JS
7281 kfree(vpd);
7282 goto out_free_mbox;
da0436e9 7283 }
cd1c8301 7284
ff78d8f9
JS
7285 /*
7286 * Continue initialization with default values even if driver failed
7287 * to read FCoE param config regions, only read parameters if the
7288 * board is FCoE
7289 */
7290 if (phba->hba_flag & HBA_FCOE_MODE &&
7291 lpfc_sli4_read_fcoe_params(phba))
7292 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7293 "2570 Failed to read FCoE parameters\n");
7294
cd1c8301
JS
7295 /*
7296 * Retrieve sli4 device physical port name, failure of doing it
7297 * is considered as non-fatal.
7298 */
7299 rc = lpfc_sli4_retrieve_pport_name(phba);
7300 if (!rc)
7301 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7302 "3080 Successful retrieving SLI4 device "
7303 "physical port name: %s.\n", phba->Port);
7304
b3b4f3e1
JS
7305 rc = lpfc_sli4_get_ctl_attr(phba);
7306 if (!rc)
7307 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7308 "8351 Successful retrieving SLI4 device "
7309 "CTL ATTR\n");
7310
da0436e9
JS
7311 /*
7312 * Evaluate the read rev and vpd data. Populate the driver
7313 * state with the results. If this routine fails, the failure
7314 * is not fatal as the driver will use generic values.
7315 */
7316 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7317 if (unlikely(!rc)) {
7318 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7319 "0377 Error %d parsing vpd. "
7320 "Using defaults.\n", rc);
7321 rc = 0;
7322 }
76a95d75 7323 kfree(vpd);
da0436e9 7324
f1126688
JS
7325 /* Save information as VPD data */
7326 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7327 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4e565cf0
JS
7328
7329 /*
7330 * This is because first G7 ASIC doesn't support the standard
7331 * 0x5a NVME cmd descriptor type/subtype
7332 */
7333 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7334 LPFC_SLI_INTF_IF_TYPE_6) &&
7335 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7336 (phba->vpd.rev.smRev == 0) &&
7337 (phba->cfg_nvme_embed_cmd == 1))
7338 phba->cfg_nvme_embed_cmd = 0;
7339
f1126688
JS
7340 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7341 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7342 &mqe->un.read_rev);
7343 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7344 &mqe->un.read_rev);
7345 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7346 &mqe->un.read_rev);
7347 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7348 &mqe->un.read_rev);
7349 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7350 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7351 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7352 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7353 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7354 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7355 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7356 "(%d):0380 READ_REV Status x%x "
7357 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7358 mboxq->vport ? mboxq->vport->vpi : 0,
7359 bf_get(lpfc_mqe_status, mqe),
7360 phba->vpd.rev.opFwName,
7361 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7362 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 7363
572709e2
JS
7364 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7365 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7366 if (phba->pport->cfg_lun_queue_depth > rc) {
7367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7368 "3362 LUN queue depth changed from %d to %d\n",
7369 phba->pport->cfg_lun_queue_depth, rc);
7370 phba->pport->cfg_lun_queue_depth = rc;
7371 }
7372
65791f1f 7373 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
7374 LPFC_SLI_INTF_IF_TYPE_0) {
7375 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7377 if (rc == MBX_SUCCESS) {
7378 phba->hba_flag |= HBA_RECOVERABLE_UE;
7379 /* Set 1Sec interval to detect UE */
7380 phba->eratt_poll_interval = 1;
7381 phba->sli4_hba.ue_to_sr = bf_get(
7382 lpfc_mbx_set_feature_UESR,
7383 &mboxq->u.mqe.un.set_feature);
7384 phba->sli4_hba.ue_to_rp = bf_get(
7385 lpfc_mbx_set_feature_UERP,
7386 &mboxq->u.mqe.un.set_feature);
7387 }
7388 }
7389
7390 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7391 /* Enable MDS Diagnostics only if the SLI Port supports it */
7392 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7394 if (rc != MBX_SUCCESS)
7395 phba->mds_diags_support = 0;
7396 }
572709e2 7397
da0436e9
JS
7398 /*
7399 * Discover the port's supported feature set and match it against the
7400 * hosts requests.
7401 */
7402 lpfc_request_features(phba, mboxq);
7403 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7404 if (unlikely(rc)) {
7405 rc = -EIO;
76a95d75 7406 goto out_free_mbox;
da0436e9
JS
7407 }
7408
7409 /*
7410 * The port must support FCP initiator mode as this is the
7411 * only mode running in the host.
7412 */
7413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7414 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7415 "0378 No support for fcpi mode.\n");
7416 ftr_rsp++;
7417 }
0bc2b7c5
JS
7418
7419 /* Performance Hints are ONLY for FCoE */
7420 if (phba->hba_flag & HBA_FCOE_MODE) {
7421 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7422 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7423 else
7424 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7425 }
7426
da0436e9
JS
7427 /*
7428 * If the port cannot support the host's requested features
7429 * then turn off the global config parameters to disable the
7430 * feature in the driver. This is not a fatal error.
7431 */
f44ac12f
JS
7432 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7433 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7434 phba->cfg_enable_bg = 0;
7435 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
bf08611b 7436 ftr_rsp++;
f44ac12f 7437 }
bf08611b 7438 }
da0436e9
JS
7439
7440 if (phba->max_vpi && phba->cfg_enable_npiv &&
7441 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7442 ftr_rsp++;
7443
7444 if (ftr_rsp) {
7445 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7446 "0379 Feature Mismatch Data: x%08x %08x "
7447 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7448 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7449 phba->cfg_enable_npiv, phba->max_vpi);
7450 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7451 phba->cfg_enable_bg = 0;
7452 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7453 phba->cfg_enable_npiv = 0;
7454 }
7455
7456 /* These SLI3 features are assumed in SLI4 */
7457 spin_lock_irq(&phba->hbalock);
7458 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7459 spin_unlock_irq(&phba->hbalock);
7460
6d368e53
JS
7461 /*
7462 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7463 * calls depends on these resources to complete port setup.
7464 */
7465 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7466 if (rc) {
7467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7468 "2920 Failed to alloc Resource IDs "
7469 "rc = x%x\n", rc);
7470 goto out_free_mbox;
7471 }
7472
61bda8f7
JS
7473 lpfc_set_host_data(phba, mboxq);
7474
7475 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7476 if (rc) {
7477 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7478 "2134 Failed to set host os driver version %x",
7479 rc);
7480 }
7481
da0436e9 7482 /* Read the port's service parameters. */
9f1177a3
JS
7483 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7484 if (rc) {
7485 phba->link_state = LPFC_HBA_ERROR;
7486 rc = -ENOMEM;
76a95d75 7487 goto out_free_mbox;
9f1177a3
JS
7488 }
7489
da0436e9
JS
7490 mboxq->vport = vport;
7491 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3e1f0718 7492 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
7493 if (rc == MBX_SUCCESS) {
7494 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7495 rc = 0;
7496 }
7497
7498 /*
7499 * This memory was allocated by the lpfc_read_sparam routine. Release
7500 * it to the mbuf pool.
7501 */
7502 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7503 kfree(mp);
3e1f0718 7504 mboxq->ctx_buf = NULL;
da0436e9
JS
7505 if (unlikely(rc)) {
7506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7507 "0382 READ_SPARAM command failed "
7508 "status %d, mbxStatus x%x\n",
7509 rc, bf_get(lpfc_mqe_status, mqe));
7510 phba->link_state = LPFC_HBA_ERROR;
7511 rc = -EIO;
76a95d75 7512 goto out_free_mbox;
da0436e9
JS
7513 }
7514
0558056c 7515 lpfc_update_vport_wwn(vport);
da0436e9
JS
7516
7517 /* Update the fc_host data structures with new wwn. */
7518 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7519 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7520
895427bd
JS
7521 /* Create all the SLI4 queues */
7522 rc = lpfc_sli4_queue_create(phba);
7523 if (rc) {
7524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7525 "3089 Failed to allocate queues\n");
7526 rc = -ENODEV;
7527 goto out_free_mbox;
7528 }
7529 /* Set up all the queues to the device */
7530 rc = lpfc_sli4_queue_setup(phba);
7531 if (unlikely(rc)) {
7532 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7533 "0381 Error %d during queue setup.\n ", rc);
7534 goto out_stop_timers;
7535 }
7536 /* Initialize the driver internal SLI layer lists. */
7537 lpfc_sli4_setup(phba);
7538 lpfc_sli4_queue_init(phba);
7539
7540 /* update host els xri-sgl sizes and mappings */
7541 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
7542 if (unlikely(rc)) {
7543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7544 "1400 Failed to update xri-sgl size and "
7545 "mapping: %d\n", rc);
895427bd 7546 goto out_destroy_queue;
da0436e9
JS
7547 }
7548
8a9d2e80 7549 /* register the els sgl pool to the port */
895427bd
JS
7550 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7551 phba->sli4_hba.els_xri_cnt);
7552 if (unlikely(rc < 0)) {
8a9d2e80
JS
7553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7554 "0582 Error %d during els sgl post "
7555 "operation\n", rc);
7556 rc = -ENODEV;
895427bd 7557 goto out_destroy_queue;
8a9d2e80 7558 }
895427bd 7559 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 7560
f358dd0c
JS
7561 if (phba->nvmet_support) {
7562 /* update host nvmet xri-sgl sizes and mappings */
7563 rc = lpfc_sli4_nvmet_sgl_update(phba);
7564 if (unlikely(rc)) {
7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7566 "6308 Failed to update nvmet-sgl size "
7567 "and mapping: %d\n", rc);
7568 goto out_destroy_queue;
7569 }
7570
7571 /* register the nvmet sgl pool to the port */
7572 rc = lpfc_sli4_repost_sgl_list(
7573 phba,
7574 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7575 phba->sli4_hba.nvmet_xri_cnt);
7576 if (unlikely(rc < 0)) {
7577 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7578 "3117 Error %d during nvmet "
7579 "sgl post\n", rc);
7580 rc = -ENODEV;
7581 goto out_destroy_queue;
7582 }
7583 phba->sli4_hba.nvmet_xri_cnt = rc;
6c621a22
JS
7584
7585 cnt = phba->cfg_iocb_cnt * 1024;
7586 /* We need 1 iocbq for every SGL, for IO processing */
7587 cnt += phba->sli4_hba.nvmet_xri_cnt;
f358dd0c 7588 } else {
0794d601 7589 /* update host common xri-sgl sizes and mappings */
5e5b511d 7590 rc = lpfc_sli4_io_sgl_update(phba);
895427bd
JS
7591 if (unlikely(rc)) {
7592 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601 7593 "6082 Failed to update nvme-sgl size "
895427bd
JS
7594 "and mapping: %d\n", rc);
7595 goto out_destroy_queue;
7596 }
7597
0794d601 7598 /* register the allocated common sgl pool to the port */
5e5b511d 7599 rc = lpfc_sli4_repost_io_sgl_list(phba);
895427bd
JS
7600 if (unlikely(rc)) {
7601 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601
JS
7602 "6116 Error %d during nvme sgl post "
7603 "operation\n", rc);
7604 /* Some NVME buffers were moved to abort nvme list */
7605 /* A pci function reset will repost them */
7606 rc = -ENODEV;
895427bd
JS
7607 goto out_destroy_queue;
7608 }
6c621a22 7609 cnt = phba->cfg_iocb_cnt * 1024;
11e644e2
JS
7610 }
7611
7612 if (!phba->sli.iocbq_lookup) {
6c621a22
JS
7613 /* Initialize and populate the iocb list per host */
7614 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11e644e2 7615 "2821 initialize iocb list %d total %d\n",
6c621a22
JS
7616 phba->cfg_iocb_cnt, cnt);
7617 rc = lpfc_init_iocb_list(phba, cnt);
7618 if (rc) {
7619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11e644e2 7620 "1413 Failed to init iocb list.\n");
6c621a22
JS
7621 goto out_destroy_queue;
7622 }
895427bd
JS
7623 }
7624
11e644e2
JS
7625 if (phba->nvmet_support)
7626 lpfc_nvmet_create_targetport(phba);
7627
2d7dbc4c 7628 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
2d7dbc4c
JS
7629 /* Post initial buffers to all RQs created */
7630 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7631 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7632 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7633 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7634 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 7635 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
7636 rqbp->buffer_count = 0;
7637
2d7dbc4c
JS
7638 lpfc_post_rq_buffer(
7639 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7640 phba->sli4_hba.nvmet_mrq_data[i],
2448e484 7641 phba->cfg_nvmet_mrq_post, i);
2d7dbc4c
JS
7642 }
7643 }
7644
da0436e9
JS
7645 /* Post the rpi header region to the device. */
7646 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7647 if (unlikely(rc)) {
7648 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7649 "0393 Error %d during rpi post operation\n",
7650 rc);
7651 rc = -ENODEV;
895427bd 7652 goto out_destroy_queue;
da0436e9 7653 }
97f2ecf1 7654 lpfc_sli4_node_prep(phba);
da0436e9 7655
895427bd 7656 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 7657 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
7658 /*
7659 * The FC Port needs to register FCFI (index 0)
7660 */
7661 lpfc_reg_fcfi(phba, mboxq);
7662 mboxq->vport = phba->pport;
7663 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7664 if (rc != MBX_SUCCESS)
7665 goto out_unset_queue;
7666 rc = 0;
7667 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7668 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
7669 } else {
7670 /* We are a NVME Target mode with MRQ > 1 */
7671
7672 /* First register the FCFI */
7673 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7674 mboxq->vport = phba->pport;
7675 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7676 if (rc != MBX_SUCCESS)
7677 goto out_unset_queue;
7678 rc = 0;
7679 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7680 &mboxq->u.mqe.un.reg_fcfi_mrq);
7681
7682 /* Next register the MRQs */
7683 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7684 mboxq->vport = phba->pport;
7685 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7686 if (rc != MBX_SUCCESS)
7687 goto out_unset_queue;
7688 rc = 0;
895427bd
JS
7689 }
7690 /* Check if the port is configured to be disabled */
7691 lpfc_sli_read_link_ste(phba);
da0436e9
JS
7692 }
7693
c490850a
JS
7694 /* Don't post more new bufs if repost already recovered
7695 * the nvme sgls.
7696 */
7697 if (phba->nvmet_support == 0) {
7698 if (phba->sli4_hba.io_xri_cnt == 0) {
7699 len = lpfc_new_io_buf(
7700 phba, phba->sli4_hba.io_xri_max);
7701 if (len == 0) {
7702 rc = -ENOMEM;
7703 goto out_unset_queue;
7704 }
7705
7706 if (phba->cfg_xri_rebalancing)
7707 lpfc_create_multixri_pools(phba);
7708 }
7709 } else {
7710 phba->cfg_xri_rebalancing = 0;
7711 }
7712
da0436e9
JS
7713 /* Allow asynchronous mailbox command to go through */
7714 spin_lock_irq(&phba->hbalock);
7715 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7716 spin_unlock_irq(&phba->hbalock);
7717
7718 /* Post receive buffers to the device */
7719 lpfc_sli4_rb_setup(phba);
7720
fc2b989b
JS
7721 /* Reset HBA FCF states after HBA reset */
7722 phba->fcf.fcf_flag = 0;
7723 phba->fcf.current_rec.flag = 0;
7724
da0436e9 7725 /* Start the ELS watchdog timer */
8fa38513 7726 mod_timer(&vport->els_tmofunc,
256ec0d0 7727 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7728
7729 /* Start heart beat timer */
7730 mod_timer(&phba->hb_tmofunc,
256ec0d0 7731 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7732 phba->hb_outstanding = 0;
7733 phba->last_completion_time = jiffies;
7734
32517fc0
JS
7735 /* start eq_delay heartbeat */
7736 if (phba->cfg_auto_imax)
7737 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7738 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7739
da0436e9 7740 /* Start error attention (ERATT) polling timer */
256ec0d0 7741 mod_timer(&phba->eratt_poll,
65791f1f 7742 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7743
75baf696
JS
7744 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7745 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7746 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7747 if (!rc) {
7748 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7749 "2829 This device supports "
7750 "Advanced Error Reporting (AER)\n");
7751 spin_lock_irq(&phba->hbalock);
7752 phba->hba_flag |= HBA_AER_ENABLED;
7753 spin_unlock_irq(&phba->hbalock);
7754 } else {
7755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7756 "2830 This device does not support "
7757 "Advanced Error Reporting (AER)\n");
7758 phba->cfg_aer_support = 0;
7759 }
0a96e975 7760 rc = 0;
75baf696
JS
7761 }
7762
da0436e9
JS
7763 /*
7764 * The port is ready, set the host's link state to LINK_DOWN
7765 * in preparation for link interrupts.
7766 */
da0436e9
JS
7767 spin_lock_irq(&phba->hbalock);
7768 phba->link_state = LPFC_LINK_DOWN;
1dc5ec24
JS
7769
7770 /* Check if physical ports are trunked */
7771 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7772 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7773 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7774 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7775 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7776 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7777 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7778 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
da0436e9 7779 spin_unlock_irq(&phba->hbalock);
1dc5ec24 7780
e8869f5b
JS
7781 /* Arm the CQs and then EQs on device */
7782 lpfc_sli4_arm_cqeq_intr(phba);
7783
7784 /* Indicate device interrupt mode */
7785 phba->sli4_hba.intr_enable = 1;
7786
026abb87
JS
7787 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7788 (phba->hba_flag & LINK_DISABLED)) {
7789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7790 "3103 Adapter Link is disabled.\n");
7791 lpfc_down_link(phba, mboxq);
7792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7793 if (rc != MBX_SUCCESS) {
7794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7795 "3104 Adapter failed to issue "
7796 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
c490850a 7797 goto out_io_buff_free;
026abb87
JS
7798 }
7799 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7800 /* don't perform init_link on SLI4 FC port loopback test */
7801 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7802 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7803 if (rc)
c490850a 7804 goto out_io_buff_free;
1b51197d 7805 }
5350d872
JS
7806 }
7807 mempool_free(mboxq, phba->mbox_mem_pool);
7808 return rc;
c490850a
JS
7809out_io_buff_free:
7810 /* Free allocated IO Buffers */
7811 lpfc_io_free(phba);
76a95d75 7812out_unset_queue:
da0436e9 7813 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7814 lpfc_sli4_queue_unset(phba);
7815out_destroy_queue:
6c621a22 7816 lpfc_free_iocb_list(phba);
5350d872 7817 lpfc_sli4_queue_destroy(phba);
da0436e9 7818out_stop_timers:
5350d872 7819 lpfc_stop_hba_timers(phba);
da0436e9
JS
7820out_free_mbox:
7821 mempool_free(mboxq, phba->mbox_mem_pool);
7822 return rc;
7823}
7824
7825/**
7826 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7827 * @ptr: context object - pointer to hba structure.
7828 *
7829 * This is the callback function for mailbox timer. The mailbox
7830 * timer is armed when a new mailbox command is issued and the timer
7831 * is deleted when the mailbox complete. The function is called by
7832 * the kernel timer code when a mailbox does not complete within
7833 * expected time. This function wakes up the worker thread to
7834 * process the mailbox timeout and returns. All the processing is
7835 * done by the worker thread function lpfc_mbox_timeout_handler.
7836 **/
7837void
f22eb4d3 7838lpfc_mbox_timeout(struct timer_list *t)
da0436e9 7839{
f22eb4d3 7840 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
da0436e9
JS
7841 unsigned long iflag;
7842 uint32_t tmo_posted;
7843
7844 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7845 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7846 if (!tmo_posted)
7847 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7848 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7849
7850 if (!tmo_posted)
7851 lpfc_worker_wake_up(phba);
7852 return;
7853}
7854
e8d3c3b1
JS
7855/**
7856 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7857 * are pending
7858 * @phba: Pointer to HBA context object.
7859 *
7860 * This function checks if any mailbox completions are present on the mailbox
7861 * completion queue.
7862 **/
3bb11fc5 7863static bool
e8d3c3b1
JS
7864lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7865{
7866
7867 uint32_t idx;
7868 struct lpfc_queue *mcq;
7869 struct lpfc_mcqe *mcqe;
7870 bool pending_completions = false;
7365f6fd 7871 uint8_t qe_valid;
e8d3c3b1
JS
7872
7873 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7874 return false;
7875
7876 /* Check for completions on mailbox completion queue */
7877
7878 mcq = phba->sli4_hba.mbx_cq;
7879 idx = mcq->hba_index;
7365f6fd 7880 qe_valid = mcq->qe_valid;
9afbee3d
JS
7881 while (bf_get_le32(lpfc_cqe_valid,
7882 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7883 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
e8d3c3b1
JS
7884 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7885 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7886 pending_completions = true;
7887 break;
7888 }
7889 idx = (idx + 1) % mcq->entry_count;
7890 if (mcq->hba_index == idx)
7891 break;
7365f6fd
JS
7892
7893 /* if the index wrapped around, toggle the valid bit */
7894 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7895 qe_valid = (qe_valid) ? 0 : 1;
e8d3c3b1
JS
7896 }
7897 return pending_completions;
7898
7899}
7900
7901/**
7902 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7903 * that were missed.
7904 * @phba: Pointer to HBA context object.
7905 *
7906 * For sli4, it is possible to miss an interrupt. As such mbox completions
7907 * maybe missed causing erroneous mailbox timeouts to occur. This function
7908 * checks to see if mbox completions are on the mailbox completion queue
7909 * and will process all the completions associated with the eq for the
7910 * mailbox completion queue.
7911 **/
7912bool
7913lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7914{
b71413dd 7915 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
e8d3c3b1
JS
7916 uint32_t eqidx;
7917 struct lpfc_queue *fpeq = NULL;
e8d3c3b1
JS
7918 bool mbox_pending;
7919
7920 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7921 return false;
7922
7923 /* Find the eq associated with the mcq */
7924
cdb42bec 7925 if (sli4_hba->hdwq)
6a828b0f 7926 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++)
cdb42bec 7927 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
b71413dd 7928 sli4_hba->mbx_cq->assoc_qid) {
cdb42bec 7929 fpeq = sli4_hba->hdwq[eqidx].hba_eq;
e8d3c3b1
JS
7930 break;
7931 }
7932 if (!fpeq)
7933 return false;
7934
7935 /* Turn off interrupts from this EQ */
7936
b71413dd 7937 sli4_hba->sli4_eq_clr_intr(fpeq);
e8d3c3b1
JS
7938
7939 /* Check to see if a mbox completion is pending */
7940
7941 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7942
7943 /*
7944 * If a mbox completion is pending, process all the events on EQ
7945 * associated with the mbox completion queue (this could include
7946 * mailbox commands, async events, els commands, receive queue data
7947 * and fcp commands)
7948 */
7949
7950 if (mbox_pending)
32517fc0
JS
7951 /* process and rearm the EQ */
7952 lpfc_sli4_process_eq(phba, fpeq);
7953 else
7954 /* Always clear and re-arm the EQ */
7955 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
e8d3c3b1
JS
7956
7957 return mbox_pending;
7958
7959}
da0436e9
JS
7960
7961/**
7962 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7963 * @phba: Pointer to HBA context object.
7964 *
7965 * This function is called from worker thread when a mailbox command times out.
7966 * The caller is not required to hold any locks. This function will reset the
7967 * HBA and recover all the pending commands.
7968 **/
7969void
7970lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7971{
7972 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
7973 MAILBOX_t *mb = NULL;
7974
da0436e9 7975 struct lpfc_sli *psli = &phba->sli;
da0436e9 7976
e8d3c3b1
JS
7977 /* If the mailbox completed, process the completion and return */
7978 if (lpfc_sli4_process_missed_mbox_completions(phba))
7979 return;
7980
eb016566
JS
7981 if (pmbox != NULL)
7982 mb = &pmbox->u.mb;
da0436e9
JS
7983 /* Check the pmbox pointer first. There is a race condition
7984 * between the mbox timeout handler getting executed in the
7985 * worklist and the mailbox actually completing. When this
7986 * race condition occurs, the mbox_active will be NULL.
7987 */
7988 spin_lock_irq(&phba->hbalock);
7989 if (pmbox == NULL) {
7990 lpfc_printf_log(phba, KERN_WARNING,
7991 LOG_MBOX | LOG_SLI,
7992 "0353 Active Mailbox cleared - mailbox timeout "
7993 "exiting\n");
7994 spin_unlock_irq(&phba->hbalock);
7995 return;
7996 }
7997
7998 /* Mbox cmd <mbxCommand> timeout */
7999 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8000 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
8001 mb->mbxCommand,
8002 phba->pport->port_state,
8003 phba->sli.sli_flag,
8004 phba->sli.mbox_active);
8005 spin_unlock_irq(&phba->hbalock);
8006
8007 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8008 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 8009 * it to fail all outstanding SCSI IO.
da0436e9
JS
8010 */
8011 spin_lock_irq(&phba->pport->work_port_lock);
8012 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8013 spin_unlock_irq(&phba->pport->work_port_lock);
8014 spin_lock_irq(&phba->hbalock);
8015 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 8016 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
8017 spin_unlock_irq(&phba->hbalock);
8018
db55fba8 8019 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
8020
8021 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8022 "0345 Resetting board due to mailbox timeout\n");
8023
8024 /* Reset the HBA device */
8025 lpfc_reset_hba(phba);
8026}
8027
8028/**
8029 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8030 * @phba: Pointer to HBA context object.
8031 * @pmbox: Pointer to mailbox object.
8032 * @flag: Flag indicating how the mailbox need to be processed.
8033 *
8034 * This function is called by discovery code and HBA management code
8035 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8036 * function gets the hbalock to protect the data structures.
8037 * The mailbox command can be submitted in polling mode, in which case
8038 * this function will wait in a polling loop for the completion of the
8039 * mailbox.
8040 * If the mailbox is submitted in no_wait mode (not polling) the
8041 * function will submit the command and returns immediately without waiting
8042 * for the mailbox completion. The no_wait is supported only when HBA
8043 * is in SLI2/SLI3 mode - interrupts are enabled.
8044 * The SLI interface allows only one mailbox pending at a time. If the
8045 * mailbox is issued in polling mode and there is already a mailbox
8046 * pending, then the function will return an error. If the mailbox is issued
8047 * in NO_WAIT mode and there is a mailbox pending already, the function
8048 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8049 * The sli layer owns the mailbox object until the completion of mailbox
8050 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8051 * return codes the caller owns the mailbox command after the return of
8052 * the function.
e59058c4 8053 **/
3772a991
JS
8054static int
8055lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8056 uint32_t flag)
dea3101e 8057{
bf07bdea 8058 MAILBOX_t *mbx;
2e0fef85 8059 struct lpfc_sli *psli = &phba->sli;
dea3101e 8060 uint32_t status, evtctr;
9940b97b 8061 uint32_t ha_copy, hc_copy;
dea3101e 8062 int i;
09372820 8063 unsigned long timeout;
dea3101e 8064 unsigned long drvr_flag = 0;
34b02dcd 8065 uint32_t word0, ldata;
dea3101e 8066 void __iomem *to_slim;
58da1ffb
JS
8067 int processing_queue = 0;
8068
8069 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8070 if (!pmbox) {
8568a4d2 8071 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 8072 /* processing mbox queue from intr_handler */
3772a991
JS
8073 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8074 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8075 return MBX_SUCCESS;
8076 }
58da1ffb 8077 processing_queue = 1;
58da1ffb
JS
8078 pmbox = lpfc_mbox_get(phba);
8079 if (!pmbox) {
8080 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8081 return MBX_SUCCESS;
8082 }
8083 }
dea3101e 8084
ed957684 8085 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 8086 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 8087 if(!pmbox->vport) {
58da1ffb 8088 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 8089 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 8090 LOG_MBOX | LOG_VPORT,
e8b62011 8091 "1806 Mbox x%x failed. No vport\n",
3772a991 8092 pmbox->u.mb.mbxCommand);
ed957684 8093 dump_stack();
58da1ffb 8094 goto out_not_finished;
ed957684
JS
8095 }
8096 }
8097
8d63f375 8098 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
8099 if (unlikely(pci_channel_offline(phba->pcidev))) {
8100 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8101 goto out_not_finished;
8102 }
8d63f375 8103
a257bf90
JS
8104 /* If HBA has a deferred error attention, fail the iocb. */
8105 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8106 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8107 goto out_not_finished;
8108 }
8109
dea3101e 8110 psli = &phba->sli;
92d7f7b0 8111
bf07bdea 8112 mbx = &pmbox->u.mb;
dea3101e 8113 status = MBX_SUCCESS;
8114
2e0fef85
JS
8115 if (phba->link_state == LPFC_HBA_ERROR) {
8116 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
8117
8118 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8119 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8120 "(%d):0311 Mailbox command x%x cannot "
8121 "issue Data: x%x x%x\n",
8122 pmbox->vport ? pmbox->vport->vpi : 0,
8123 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 8124 goto out_not_finished;
41415862
JW
8125 }
8126
bf07bdea 8127 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
8128 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8129 !(hc_copy & HC_MBINT_ENA)) {
8130 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8131 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
8132 "(%d):2528 Mailbox command x%x cannot "
8133 "issue Data: x%x x%x\n",
8134 pmbox->vport ? pmbox->vport->vpi : 0,
8135 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
8136 goto out_not_finished;
8137 }
9290831f
JS
8138 }
8139
dea3101e 8140 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8141 /* Polling for a mbox command when another one is already active
8142 * is not allowed in SLI. Also, the driver must have established
8143 * SLI2 mode to queue and process multiple mbox commands.
8144 */
8145
8146 if (flag & MBX_POLL) {
2e0fef85 8147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8148
8149 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8150 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8151 "(%d):2529 Mailbox command x%x "
8152 "cannot issue Data: x%x x%x\n",
8153 pmbox->vport ? pmbox->vport->vpi : 0,
8154 pmbox->u.mb.mbxCommand,
8155 psli->sli_flag, flag);
58da1ffb 8156 goto out_not_finished;
dea3101e 8157 }
8158
3772a991 8159 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 8160 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8161 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8162 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8163 "(%d):2530 Mailbox command x%x "
8164 "cannot issue Data: x%x x%x\n",
8165 pmbox->vport ? pmbox->vport->vpi : 0,
8166 pmbox->u.mb.mbxCommand,
8167 psli->sli_flag, flag);
58da1ffb 8168 goto out_not_finished;
dea3101e 8169 }
8170
dea3101e 8171 /* Another mailbox command is still being processed, queue this
8172 * command to be processed later.
8173 */
8174 lpfc_mbox_put(phba, pmbox);
8175
8176 /* Mbox cmd issue - BUSY */
ed957684 8177 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8178 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 8179 "x%x x%x x%x x%x\n",
92d7f7b0 8180 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
e92974f6
JS
8181 mbx->mbxCommand,
8182 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8183 psli->sli_flag, flag);
dea3101e 8184
8185 psli->slistat.mbox_busy++;
2e0fef85 8186 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8187
858c9f6c
JS
8188 if (pmbox->vport) {
8189 lpfc_debugfs_disc_trc(pmbox->vport,
8190 LPFC_DISC_TRC_MBOX_VPORT,
8191 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8192 (uint32_t)mbx->mbxCommand,
8193 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8194 }
8195 else {
8196 lpfc_debugfs_disc_trc(phba->pport,
8197 LPFC_DISC_TRC_MBOX,
8198 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8199 (uint32_t)mbx->mbxCommand,
8200 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8201 }
8202
2e0fef85 8203 return MBX_BUSY;
dea3101e 8204 }
8205
dea3101e 8206 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8207
8208 /* If we are not polling, we MUST be in SLI2 mode */
8209 if (flag != MBX_POLL) {
3772a991 8210 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 8211 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 8212 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8213 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8214 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8215 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8216 "(%d):2531 Mailbox command x%x "
8217 "cannot issue Data: x%x x%x\n",
8218 pmbox->vport ? pmbox->vport->vpi : 0,
8219 pmbox->u.mb.mbxCommand,
8220 psli->sli_flag, flag);
58da1ffb 8221 goto out_not_finished;
dea3101e 8222 }
8223 /* timeout active mbox command */
256ec0d0
JS
8224 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8225 1000);
8226 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 8227 }
8228
8229 /* Mailbox cmd <cmd> issue */
ed957684 8230 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8231 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 8232 "x%x\n",
e8b62011 8233 pmbox->vport ? pmbox->vport->vpi : 0,
e92974f6
JS
8234 mbx->mbxCommand,
8235 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8236 psli->sli_flag, flag);
dea3101e 8237
bf07bdea 8238 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
8239 if (pmbox->vport) {
8240 lpfc_debugfs_disc_trc(pmbox->vport,
8241 LPFC_DISC_TRC_MBOX_VPORT,
8242 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8243 (uint32_t)mbx->mbxCommand,
8244 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8245 }
8246 else {
8247 lpfc_debugfs_disc_trc(phba->pport,
8248 LPFC_DISC_TRC_MBOX,
8249 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8250 (uint32_t)mbx->mbxCommand,
8251 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8252 }
8253 }
8254
dea3101e 8255 psli->slistat.mbox_cmd++;
8256 evtctr = psli->slistat.mbox_event;
8257
8258 /* next set own bit for the adapter and copy over command word */
bf07bdea 8259 mbx->mbxOwner = OWN_CHIP;
dea3101e 8260
3772a991 8261 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
8262 /* Populate mbox extension offset word. */
8263 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 8264 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8265 = (uint8_t *)phba->mbox_ext
8266 - (uint8_t *)phba->mbox;
8267 }
8268
8269 /* Copy the mailbox extension data */
3e1f0718
JS
8270 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8271 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8272 (uint8_t *)phba->mbox_ext,
8273 pmbox->in_ext_byte_len);
7a470277
JS
8274 }
8275 /* Copy command data to host SLIM area */
bf07bdea 8276 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 8277 } else {
7a470277
JS
8278 /* Populate mbox extension offset word. */
8279 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 8280 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8281 = MAILBOX_HBA_EXT_OFFSET;
8282
8283 /* Copy the mailbox extension data */
3e1f0718 8284 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
7a470277
JS
8285 lpfc_memcpy_to_slim(phba->MBslimaddr +
8286 MAILBOX_HBA_EXT_OFFSET,
3e1f0718 8287 pmbox->ctx_buf, pmbox->in_ext_byte_len);
7a470277 8288
895427bd 8289 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8290 /* copy command data into host mbox for cmpl */
895427bd
JS
8291 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8292 MAILBOX_CMD_SIZE);
dea3101e 8293
8294 /* First copy mbox command data to HBA SLIM, skip past first
8295 word */
8296 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 8297 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 8298 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8299
8300 /* Next copy over first word, with mbxOwner set */
bf07bdea 8301 ldata = *((uint32_t *)mbx);
dea3101e 8302 to_slim = phba->MBslimaddr;
8303 writel(ldata, to_slim);
8304 readl(to_slim); /* flush */
8305
895427bd 8306 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8307 /* switch over to host mailbox */
3772a991 8308 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 8309 }
8310
8311 wmb();
dea3101e 8312
8313 switch (flag) {
8314 case MBX_NOWAIT:
09372820 8315 /* Set up reference to mailbox command */
dea3101e 8316 psli->mbox_active = pmbox;
09372820
JS
8317 /* Interrupt board to do it */
8318 writel(CA_MBATT, phba->CAregaddr);
8319 readl(phba->CAregaddr); /* flush */
8320 /* Don't wait for it to finish, just return */
dea3101e 8321 break;
8322
8323 case MBX_POLL:
09372820 8324 /* Set up null reference to mailbox command */
dea3101e 8325 psli->mbox_active = NULL;
09372820
JS
8326 /* Interrupt board to do it */
8327 writel(CA_MBATT, phba->CAregaddr);
8328 readl(phba->CAregaddr); /* flush */
8329
3772a991 8330 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8331 /* First read mbox status word */
34b02dcd 8332 word0 = *((uint32_t *)phba->mbox);
dea3101e 8333 word0 = le32_to_cpu(word0);
8334 } else {
8335 /* First read mbox status word */
9940b97b
JS
8336 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8337 spin_unlock_irqrestore(&phba->hbalock,
8338 drvr_flag);
8339 goto out_not_finished;
8340 }
dea3101e 8341 }
8342
8343 /* Read the HBA Host Attention Register */
9940b97b
JS
8344 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8345 spin_unlock_irqrestore(&phba->hbalock,
8346 drvr_flag);
8347 goto out_not_finished;
8348 }
a183a15f
JS
8349 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8350 1000) + jiffies;
09372820 8351 i = 0;
dea3101e 8352 /* Wait for command to complete */
41415862
JW
8353 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8354 (!(ha_copy & HA_MBATT) &&
2e0fef85 8355 (phba->link_state > LPFC_WARM_START))) {
09372820 8356 if (time_after(jiffies, timeout)) {
dea3101e 8357 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8358 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 8359 drvr_flag);
58da1ffb 8360 goto out_not_finished;
dea3101e 8361 }
8362
8363 /* Check if we took a mbox interrupt while we were
8364 polling */
8365 if (((word0 & OWN_CHIP) != OWN_CHIP)
8366 && (evtctr != psli->slistat.mbox_event))
8367 break;
8368
09372820
JS
8369 if (i++ > 10) {
8370 spin_unlock_irqrestore(&phba->hbalock,
8371 drvr_flag);
8372 msleep(1);
8373 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8374 }
dea3101e 8375
3772a991 8376 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8377 /* First copy command data */
34b02dcd 8378 word0 = *((uint32_t *)phba->mbox);
dea3101e 8379 word0 = le32_to_cpu(word0);
bf07bdea 8380 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 8381 MAILBOX_t *slimmb;
34b02dcd 8382 uint32_t slimword0;
dea3101e 8383 /* Check real SLIM for any errors */
8384 slimword0 = readl(phba->MBslimaddr);
8385 slimmb = (MAILBOX_t *) & slimword0;
8386 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8387 && slimmb->mbxStatus) {
8388 psli->sli_flag &=
3772a991 8389 ~LPFC_SLI_ACTIVE;
dea3101e 8390 word0 = slimword0;
8391 }
8392 }
8393 } else {
8394 /* First copy command data */
8395 word0 = readl(phba->MBslimaddr);
8396 }
8397 /* Read the HBA Host Attention Register */
9940b97b
JS
8398 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8399 spin_unlock_irqrestore(&phba->hbalock,
8400 drvr_flag);
8401 goto out_not_finished;
8402 }
dea3101e 8403 }
8404
3772a991 8405 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8406 /* copy results back to user */
2ea259ee
JS
8407 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8408 MAILBOX_CMD_SIZE);
7a470277 8409 /* Copy the mailbox extension data */
3e1f0718 8410 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
7a470277 8411 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
3e1f0718 8412 pmbox->ctx_buf,
7a470277
JS
8413 pmbox->out_ext_byte_len);
8414 }
dea3101e 8415 } else {
8416 /* First copy command data */
bf07bdea 8417 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 8418 MAILBOX_CMD_SIZE);
7a470277 8419 /* Copy the mailbox extension data */
3e1f0718
JS
8420 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8421 lpfc_memcpy_from_slim(
8422 pmbox->ctx_buf,
7a470277
JS
8423 phba->MBslimaddr +
8424 MAILBOX_HBA_EXT_OFFSET,
8425 pmbox->out_ext_byte_len);
dea3101e 8426 }
8427 }
8428
8429 writel(HA_MBATT, phba->HAregaddr);
8430 readl(phba->HAregaddr); /* flush */
8431
8432 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 8433 status = mbx->mbxStatus;
dea3101e 8434 }
8435
2e0fef85
JS
8436 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8437 return status;
58da1ffb
JS
8438
8439out_not_finished:
8440 if (processing_queue) {
da0436e9 8441 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
8442 lpfc_mbox_cmpl_put(phba, pmbox);
8443 }
8444 return MBX_NOT_FINISHED;
dea3101e 8445}
8446
f1126688
JS
8447/**
8448 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8449 * @phba: Pointer to HBA context object.
8450 *
8451 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8452 * the driver internal pending mailbox queue. It will then try to wait out the
8453 * possible outstanding mailbox command before return.
8454 *
8455 * Returns:
8456 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8457 * the outstanding mailbox command timed out.
8458 **/
8459static int
8460lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8461{
8462 struct lpfc_sli *psli = &phba->sli;
f1126688 8463 int rc = 0;
a183a15f 8464 unsigned long timeout = 0;
f1126688
JS
8465
8466 /* Mark the asynchronous mailbox command posting as blocked */
8467 spin_lock_irq(&phba->hbalock);
8468 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
8469 /* Determine how long we might wait for the active mailbox
8470 * command to be gracefully completed by firmware.
8471 */
a183a15f
JS
8472 if (phba->sli.mbox_active)
8473 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8474 phba->sli.mbox_active) *
8475 1000) + jiffies;
8476 spin_unlock_irq(&phba->hbalock);
8477
e8d3c3b1
JS
8478 /* Make sure the mailbox is really active */
8479 if (timeout)
8480 lpfc_sli4_process_missed_mbox_completions(phba);
8481
f1126688
JS
8482 /* Wait for the outstnading mailbox command to complete */
8483 while (phba->sli.mbox_active) {
8484 /* Check active mailbox complete status every 2ms */
8485 msleep(2);
8486 if (time_after(jiffies, timeout)) {
8487 /* Timeout, marked the outstanding cmd not complete */
8488 rc = 1;
8489 break;
8490 }
8491 }
8492
8493 /* Can not cleanly block async mailbox command, fails it */
8494 if (rc) {
8495 spin_lock_irq(&phba->hbalock);
8496 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8497 spin_unlock_irq(&phba->hbalock);
8498 }
8499 return rc;
8500}
8501
8502/**
8503 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8504 * @phba: Pointer to HBA context object.
8505 *
8506 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8507 * commands from the driver internal pending mailbox queue. It makes sure
8508 * that there is no outstanding mailbox command before resuming posting
8509 * asynchronous mailbox commands. If, for any reason, there is outstanding
8510 * mailbox command, it will try to wait it out before resuming asynchronous
8511 * mailbox command posting.
8512 **/
8513static void
8514lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8515{
8516 struct lpfc_sli *psli = &phba->sli;
8517
8518 spin_lock_irq(&phba->hbalock);
8519 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8520 /* Asynchronous mailbox posting is not blocked, do nothing */
8521 spin_unlock_irq(&phba->hbalock);
8522 return;
8523 }
8524
8525 /* Outstanding synchronous mailbox command is guaranteed to be done,
8526 * successful or timeout, after timing-out the outstanding mailbox
8527 * command shall always be removed, so just unblock posting async
8528 * mailbox command and resume
8529 */
8530 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8531 spin_unlock_irq(&phba->hbalock);
8532
8533 /* wake up worker thread to post asynchronlous mailbox command */
8534 lpfc_worker_wake_up(phba);
8535}
8536
2d843edc
JS
8537/**
8538 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8539 * @phba: Pointer to HBA context object.
8540 * @mboxq: Pointer to mailbox object.
8541 *
8542 * The function waits for the bootstrap mailbox register ready bit from
8543 * port for twice the regular mailbox command timeout value.
8544 *
8545 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8546 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8547 **/
8548static int
8549lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8550{
8551 uint32_t db_ready;
8552 unsigned long timeout;
8553 struct lpfc_register bmbx_reg;
8554
8555 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8556 * 1000) + jiffies;
8557
8558 do {
8559 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8560 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8561 if (!db_ready)
e2ffe4d5 8562 mdelay(2);
2d843edc
JS
8563
8564 if (time_after(jiffies, timeout))
8565 return MBXERR_ERROR;
8566 } while (!db_ready);
8567
8568 return 0;
8569}
8570
da0436e9
JS
8571/**
8572 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8573 * @phba: Pointer to HBA context object.
8574 * @mboxq: Pointer to mailbox object.
8575 *
8576 * The function posts a mailbox to the port. The mailbox is expected
8577 * to be comletely filled in and ready for the port to operate on it.
8578 * This routine executes a synchronous completion operation on the
8579 * mailbox by polling for its completion.
8580 *
8581 * The caller must not be holding any locks when calling this routine.
8582 *
8583 * Returns:
8584 * MBX_SUCCESS - mailbox posted successfully
8585 * Any of the MBX error values.
8586 **/
8587static int
8588lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8589{
8590 int rc = MBX_SUCCESS;
8591 unsigned long iflag;
da0436e9
JS
8592 uint32_t mcqe_status;
8593 uint32_t mbx_cmnd;
da0436e9
JS
8594 struct lpfc_sli *psli = &phba->sli;
8595 struct lpfc_mqe *mb = &mboxq->u.mqe;
8596 struct lpfc_bmbx_create *mbox_rgn;
8597 struct dma_address *dma_address;
da0436e9
JS
8598
8599 /*
8600 * Only one mailbox can be active to the bootstrap mailbox region
8601 * at a time and there is no queueing provided.
8602 */
8603 spin_lock_irqsave(&phba->hbalock, iflag);
8604 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8605 spin_unlock_irqrestore(&phba->hbalock, iflag);
8606 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8607 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8608 "cannot issue Data: x%x x%x\n",
8609 mboxq->vport ? mboxq->vport->vpi : 0,
8610 mboxq->u.mb.mbxCommand,
a183a15f
JS
8611 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8612 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8613 psli->sli_flag, MBX_POLL);
8614 return MBXERR_ERROR;
8615 }
8616 /* The server grabs the token and owns it until release */
8617 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8618 phba->sli.mbox_active = mboxq;
8619 spin_unlock_irqrestore(&phba->hbalock, iflag);
8620
2d843edc
JS
8621 /* wait for bootstrap mbox register for readyness */
8622 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8623 if (rc)
8624 goto exit;
da0436e9
JS
8625 /*
8626 * Initialize the bootstrap memory region to avoid stale data areas
8627 * in the mailbox post. Then copy the caller's mailbox contents to
8628 * the bmbx mailbox region.
8629 */
8630 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8631 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
48f8fdb4
JS
8632 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8633 sizeof(struct lpfc_mqe));
da0436e9
JS
8634
8635 /* Post the high mailbox dma address to the port and wait for ready. */
8636 dma_address = &phba->sli4_hba.bmbx.dma_address;
8637 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8638
2d843edc
JS
8639 /* wait for bootstrap mbox register for hi-address write done */
8640 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8641 if (rc)
8642 goto exit;
da0436e9
JS
8643
8644 /* Post the low mailbox dma address to the port. */
8645 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 8646
2d843edc
JS
8647 /* wait for bootstrap mbox register for low address write done */
8648 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8649 if (rc)
8650 goto exit;
da0436e9
JS
8651
8652 /*
8653 * Read the CQ to ensure the mailbox has completed.
8654 * If so, update the mailbox status so that the upper layers
8655 * can complete the request normally.
8656 */
48f8fdb4
JS
8657 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8658 sizeof(struct lpfc_mqe));
da0436e9 8659 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
48f8fdb4
JS
8660 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8661 sizeof(struct lpfc_mcqe));
da0436e9 8662 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
8663 /*
8664 * When the CQE status indicates a failure and the mailbox status
8665 * indicates success then copy the CQE status into the mailbox status
8666 * (and prefix it with x4000).
8667 */
da0436e9 8668 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
8669 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8670 bf_set(lpfc_mqe_status, mb,
8671 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 8672 rc = MBXERR_ERROR;
d7c47992
JS
8673 } else
8674 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
8675
8676 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8677 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
8678 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8679 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
8680 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8681 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8682 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8683 bf_get(lpfc_mqe_status, mb),
8684 mb->un.mb_words[0], mb->un.mb_words[1],
8685 mb->un.mb_words[2], mb->un.mb_words[3],
8686 mb->un.mb_words[4], mb->un.mb_words[5],
8687 mb->un.mb_words[6], mb->un.mb_words[7],
8688 mb->un.mb_words[8], mb->un.mb_words[9],
8689 mb->un.mb_words[10], mb->un.mb_words[11],
8690 mb->un.mb_words[12], mboxq->mcqe.word0,
8691 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8692 mboxq->mcqe.trailer);
8693exit:
8694 /* We are holding the token, no needed for lock when release */
8695 spin_lock_irqsave(&phba->hbalock, iflag);
8696 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8697 phba->sli.mbox_active = NULL;
8698 spin_unlock_irqrestore(&phba->hbalock, iflag);
8699 return rc;
8700}
8701
8702/**
8703 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8704 * @phba: Pointer to HBA context object.
8705 * @pmbox: Pointer to mailbox object.
8706 * @flag: Flag indicating how the mailbox need to be processed.
8707 *
8708 * This function is called by discovery code and HBA management code to submit
8709 * a mailbox command to firmware with SLI-4 interface spec.
8710 *
8711 * Return codes the caller owns the mailbox command after the return of the
8712 * function.
8713 **/
8714static int
8715lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8716 uint32_t flag)
8717{
8718 struct lpfc_sli *psli = &phba->sli;
8719 unsigned long iflags;
8720 int rc;
8721
b76f2dc9
JS
8722 /* dump from issue mailbox command if setup */
8723 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8724
8fa38513
JS
8725 rc = lpfc_mbox_dev_check(phba);
8726 if (unlikely(rc)) {
8727 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8728 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
8729 "cannot issue Data: x%x x%x\n",
8730 mboxq->vport ? mboxq->vport->vpi : 0,
8731 mboxq->u.mb.mbxCommand,
a183a15f
JS
8732 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8733 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
8734 psli->sli_flag, flag);
8735 goto out_not_finished;
8736 }
8737
da0436e9
JS
8738 /* Detect polling mode and jump to a handler */
8739 if (!phba->sli4_hba.intr_enable) {
8740 if (flag == MBX_POLL)
8741 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8742 else
8743 rc = -EIO;
8744 if (rc != MBX_SUCCESS)
0558056c 8745 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8746 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8747 "(x%x/x%x) failure: "
8748 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8749 "Data: x%x x%x\n,",
da0436e9
JS
8750 mboxq->vport ? mboxq->vport->vpi : 0,
8751 mboxq->u.mb.mbxCommand,
a183a15f
JS
8752 lpfc_sli_config_mbox_subsys_get(phba,
8753 mboxq),
8754 lpfc_sli_config_mbox_opcode_get(phba,
8755 mboxq),
cc459f19
JS
8756 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8757 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8758 bf_get(lpfc_mcqe_ext_status,
8759 &mboxq->mcqe),
da0436e9
JS
8760 psli->sli_flag, flag);
8761 return rc;
8762 } else if (flag == MBX_POLL) {
f1126688
JS
8763 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8764 "(%d):2542 Try to issue mailbox command "
7365f6fd 8765 "x%x (x%x/x%x) synchronously ahead of async "
f1126688 8766 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8767 mboxq->vport ? mboxq->vport->vpi : 0,
8768 mboxq->u.mb.mbxCommand,
a183a15f
JS
8769 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8770 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8771 psli->sli_flag, flag);
f1126688
JS
8772 /* Try to block the asynchronous mailbox posting */
8773 rc = lpfc_sli4_async_mbox_block(phba);
8774 if (!rc) {
8775 /* Successfully blocked, now issue sync mbox cmd */
8776 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8777 if (rc != MBX_SUCCESS)
cc459f19 8778 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8779 LOG_MBOX | LOG_SLI,
cc459f19
JS
8780 "(%d):2597 Sync Mailbox command "
8781 "x%x (x%x/x%x) failure: "
8782 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8783 "Data: x%x x%x\n,",
8784 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8785 mboxq->u.mb.mbxCommand,
8786 lpfc_sli_config_mbox_subsys_get(phba,
8787 mboxq),
8788 lpfc_sli_config_mbox_opcode_get(phba,
8789 mboxq),
cc459f19
JS
8790 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8791 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8792 bf_get(lpfc_mcqe_ext_status,
8793 &mboxq->mcqe),
a183a15f 8794 psli->sli_flag, flag);
f1126688
JS
8795 /* Unblock the async mailbox posting afterward */
8796 lpfc_sli4_async_mbox_unblock(phba);
8797 }
8798 return rc;
da0436e9
JS
8799 }
8800
8801 /* Now, interrupt mode asynchrous mailbox command */
8802 rc = lpfc_mbox_cmd_check(phba, mboxq);
8803 if (rc) {
8804 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8805 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8806 "cannot issue Data: x%x x%x\n",
8807 mboxq->vport ? mboxq->vport->vpi : 0,
8808 mboxq->u.mb.mbxCommand,
a183a15f
JS
8809 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8810 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8811 psli->sli_flag, flag);
8812 goto out_not_finished;
8813 }
da0436e9
JS
8814
8815 /* Put the mailbox command to the driver internal FIFO */
8816 psli->slistat.mbox_busy++;
8817 spin_lock_irqsave(&phba->hbalock, iflags);
8818 lpfc_mbox_put(phba, mboxq);
8819 spin_unlock_irqrestore(&phba->hbalock, iflags);
8820 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8821 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8822 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8823 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8824 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8825 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8826 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8827 phba->pport->port_state,
8828 psli->sli_flag, MBX_NOWAIT);
8829 /* Wake up worker thread to transport mailbox command from head */
8830 lpfc_worker_wake_up(phba);
8831
8832 return MBX_BUSY;
8833
8834out_not_finished:
8835 return MBX_NOT_FINISHED;
8836}
8837
8838/**
8839 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8840 * @phba: Pointer to HBA context object.
8841 *
8842 * This function is called by worker thread to send a mailbox command to
8843 * SLI4 HBA firmware.
8844 *
8845 **/
8846int
8847lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8848{
8849 struct lpfc_sli *psli = &phba->sli;
8850 LPFC_MBOXQ_t *mboxq;
8851 int rc = MBX_SUCCESS;
8852 unsigned long iflags;
8853 struct lpfc_mqe *mqe;
8854 uint32_t mbx_cmnd;
8855
8856 /* Check interrupt mode before post async mailbox command */
8857 if (unlikely(!phba->sli4_hba.intr_enable))
8858 return MBX_NOT_FINISHED;
8859
8860 /* Check for mailbox command service token */
8861 spin_lock_irqsave(&phba->hbalock, iflags);
8862 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8863 spin_unlock_irqrestore(&phba->hbalock, iflags);
8864 return MBX_NOT_FINISHED;
8865 }
8866 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8867 spin_unlock_irqrestore(&phba->hbalock, iflags);
8868 return MBX_NOT_FINISHED;
8869 }
8870 if (unlikely(phba->sli.mbox_active)) {
8871 spin_unlock_irqrestore(&phba->hbalock, iflags);
8872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8873 "0384 There is pending active mailbox cmd\n");
8874 return MBX_NOT_FINISHED;
8875 }
8876 /* Take the mailbox command service token */
8877 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8878
8879 /* Get the next mailbox command from head of queue */
8880 mboxq = lpfc_mbox_get(phba);
8881
8882 /* If no more mailbox command waiting for post, we're done */
8883 if (!mboxq) {
8884 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8885 spin_unlock_irqrestore(&phba->hbalock, iflags);
8886 return MBX_SUCCESS;
8887 }
8888 phba->sli.mbox_active = mboxq;
8889 spin_unlock_irqrestore(&phba->hbalock, iflags);
8890
8891 /* Check device readiness for posting mailbox command */
8892 rc = lpfc_mbox_dev_check(phba);
8893 if (unlikely(rc))
8894 /* Driver clean routine will clean up pending mailbox */
8895 goto out_not_finished;
8896
8897 /* Prepare the mbox command to be posted */
8898 mqe = &mboxq->u.mqe;
8899 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8900
8901 /* Start timer for the mbox_tmo and log some mailbox post messages */
8902 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 8903 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
8904
8905 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8906 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
8907 "x%x x%x\n",
8908 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
8909 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8910 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8911 phba->pport->port_state, psli->sli_flag);
8912
8913 if (mbx_cmnd != MBX_HEARTBEAT) {
8914 if (mboxq->vport) {
8915 lpfc_debugfs_disc_trc(mboxq->vport,
8916 LPFC_DISC_TRC_MBOX_VPORT,
8917 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8918 mbx_cmnd, mqe->un.mb_words[0],
8919 mqe->un.mb_words[1]);
8920 } else {
8921 lpfc_debugfs_disc_trc(phba->pport,
8922 LPFC_DISC_TRC_MBOX,
8923 "MBOX Send: cmd:x%x mb:x%x x%x",
8924 mbx_cmnd, mqe->un.mb_words[0],
8925 mqe->un.mb_words[1]);
8926 }
8927 }
8928 psli->slistat.mbox_cmd++;
8929
8930 /* Post the mailbox command to the port */
8931 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8932 if (rc != MBX_SUCCESS) {
8933 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8934 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8935 "cannot issue Data: x%x x%x\n",
8936 mboxq->vport ? mboxq->vport->vpi : 0,
8937 mboxq->u.mb.mbxCommand,
a183a15f
JS
8938 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8939 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8940 psli->sli_flag, MBX_NOWAIT);
8941 goto out_not_finished;
8942 }
8943
8944 return rc;
8945
8946out_not_finished:
8947 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
8948 if (phba->sli.mbox_active) {
8949 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8950 __lpfc_mbox_cmpl_put(phba, mboxq);
8951 /* Release the token */
8952 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8953 phba->sli.mbox_active = NULL;
8954 }
da0436e9
JS
8955 spin_unlock_irqrestore(&phba->hbalock, iflags);
8956
8957 return MBX_NOT_FINISHED;
8958}
8959
8960/**
8961 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8962 * @phba: Pointer to HBA context object.
8963 * @pmbox: Pointer to mailbox object.
8964 * @flag: Flag indicating how the mailbox need to be processed.
8965 *
8966 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8967 * the API jump table function pointer from the lpfc_hba struct.
8968 *
8969 * Return codes the caller owns the mailbox command after the return of the
8970 * function.
8971 **/
8972int
8973lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8974{
8975 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8976}
8977
8978/**
25985edc 8979 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
8980 * @phba: The hba struct for which this call is being executed.
8981 * @dev_grp: The HBA PCI-Device group number.
8982 *
8983 * This routine sets up the mbox interface API function jump table in @phba
8984 * struct.
8985 * Returns: 0 - success, -ENODEV - failure.
8986 **/
8987int
8988lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8989{
8990
8991 switch (dev_grp) {
8992 case LPFC_PCI_DEV_LP:
8993 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8994 phba->lpfc_sli_handle_slow_ring_event =
8995 lpfc_sli_handle_slow_ring_event_s3;
8996 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8997 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8998 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8999 break;
9000 case LPFC_PCI_DEV_OC:
9001 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9002 phba->lpfc_sli_handle_slow_ring_event =
9003 lpfc_sli_handle_slow_ring_event_s4;
9004 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9005 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9006 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9007 break;
9008 default:
9009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010 "1420 Invalid HBA PCI-device group: 0x%x\n",
9011 dev_grp);
9012 return -ENODEV;
9013 break;
9014 }
9015 return 0;
9016}
9017
e59058c4 9018/**
3621a710 9019 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
9020 * @phba: Pointer to HBA context object.
9021 * @pring: Pointer to driver SLI ring object.
9022 * @piocb: Pointer to address of newly added command iocb.
9023 *
9024 * This function is called with hbalock held to add a command
9025 * iocb to the txq when SLI layer cannot submit the command iocb
9026 * to the ring.
9027 **/
2a9bf3d0 9028void
92d7f7b0 9029__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9030 struct lpfc_iocbq *piocb)
dea3101e 9031{
1c2ba475 9032 lockdep_assert_held(&phba->hbalock);
dea3101e 9033 /* Insert the caller's iocb in the txq tail for later processing. */
9034 list_add_tail(&piocb->list, &pring->txq);
dea3101e 9035}
9036
e59058c4 9037/**
3621a710 9038 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
9039 * @phba: Pointer to HBA context object.
9040 * @pring: Pointer to driver SLI ring object.
9041 * @piocb: Pointer to address of newly added command iocb.
9042 *
9043 * This function is called with hbalock held before a new
9044 * iocb is submitted to the firmware. This function checks
9045 * txq to flush the iocbs in txq to Firmware before
9046 * submitting new iocbs to the Firmware.
9047 * If there are iocbs in the txq which need to be submitted
9048 * to firmware, lpfc_sli_next_iocb returns the first element
9049 * of the txq after dequeuing it from txq.
9050 * If there is no iocb in the txq then the function will return
9051 * *piocb and *piocb is set to NULL. Caller needs to check
9052 * *piocb to find if there are more commands in the txq.
9053 **/
dea3101e 9054static struct lpfc_iocbq *
9055lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9056 struct lpfc_iocbq **piocb)
dea3101e 9057{
9058 struct lpfc_iocbq * nextiocb;
9059
1c2ba475
JT
9060 lockdep_assert_held(&phba->hbalock);
9061
dea3101e 9062 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9063 if (!nextiocb) {
9064 nextiocb = *piocb;
9065 *piocb = NULL;
9066 }
9067
9068 return nextiocb;
9069}
9070
e59058c4 9071/**
3772a991 9072 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 9073 * @phba: Pointer to HBA context object.
3772a991 9074 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
9075 * @piocb: Pointer to command iocb.
9076 * @flag: Flag indicating if this command can be put into txq.
9077 *
3772a991
JS
9078 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9079 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9080 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9081 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9082 * this function allows only iocbs for posting buffers. This function finds
9083 * next available slot in the command ring and posts the command to the
9084 * available slot and writes the port attention register to request HBA start
9085 * processing new iocb. If there is no slot available in the ring and
9086 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9087 * the function returns IOCB_BUSY.
e59058c4 9088 *
3772a991
JS
9089 * This function is called with hbalock held. The function will return success
9090 * after it successfully submit the iocb to firmware or after adding to the
9091 * txq.
e59058c4 9092 **/
98c9ea5c 9093static int
3772a991 9094__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 9095 struct lpfc_iocbq *piocb, uint32_t flag)
9096{
9097 struct lpfc_iocbq *nextiocb;
9098 IOCB_t *iocb;
895427bd 9099 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 9100
1c2ba475
JT
9101 lockdep_assert_held(&phba->hbalock);
9102
92d7f7b0
JS
9103 if (piocb->iocb_cmpl && (!piocb->vport) &&
9104 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9105 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9106 lpfc_printf_log(phba, KERN_ERR,
9107 LOG_SLI | LOG_VPORT,
e8b62011 9108 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
9109 piocb->iocb.ulpCommand);
9110 dump_stack();
9111 return IOCB_ERROR;
9112 }
9113
9114
8d63f375
LV
9115 /* If the PCI channel is in offline state, do not post iocbs. */
9116 if (unlikely(pci_channel_offline(phba->pcidev)))
9117 return IOCB_ERROR;
9118
a257bf90
JS
9119 /* If HBA has a deferred error attention, fail the iocb. */
9120 if (unlikely(phba->hba_flag & DEFER_ERATT))
9121 return IOCB_ERROR;
9122
dea3101e 9123 /*
9124 * We should never get an IOCB if we are in a < LINK_DOWN state
9125 */
2e0fef85 9126 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 9127 return IOCB_ERROR;
9128
9129 /*
9130 * Check to see if we are blocking IOCB processing because of a
0b727fea 9131 * outstanding event.
dea3101e 9132 */
0b727fea 9133 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 9134 goto iocb_busy;
9135
2e0fef85 9136 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 9137 /*
2680eeaa 9138 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 9139 * can be issued if the link is not up.
9140 */
9141 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
9142 case CMD_GEN_REQUEST64_CR:
9143 case CMD_GEN_REQUEST64_CX:
9144 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9145 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 9146 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
9147 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9148 MENLO_TRANSPORT_TYPE))
9149
9150 goto iocb_busy;
9151 break;
dea3101e 9152 case CMD_QUE_RING_BUF_CN:
9153 case CMD_QUE_RING_BUF64_CN:
dea3101e 9154 /*
9155 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9156 * completion, iocb_cmpl MUST be 0.
9157 */
9158 if (piocb->iocb_cmpl)
9159 piocb->iocb_cmpl = NULL;
9160 /*FALLTHROUGH*/
9161 case CMD_CREATE_XRI_CR:
2680eeaa
JS
9162 case CMD_CLOSE_XRI_CN:
9163 case CMD_CLOSE_XRI_CX:
dea3101e 9164 break;
9165 default:
9166 goto iocb_busy;
9167 }
9168
9169 /*
9170 * For FCP commands, we must be in a state where we can process link
9171 * attention events.
9172 */
895427bd 9173 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 9174 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 9175 goto iocb_busy;
92d7f7b0 9176 }
dea3101e 9177
dea3101e 9178 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9179 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9180 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9181
9182 if (iocb)
9183 lpfc_sli_update_ring(phba, pring);
9184 else
9185 lpfc_sli_update_full_ring(phba, pring);
9186
9187 if (!piocb)
9188 return IOCB_SUCCESS;
9189
9190 goto out_busy;
9191
9192 iocb_busy:
9193 pring->stats.iocb_cmd_delay++;
9194
9195 out_busy:
9196
9197 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 9198 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 9199 return IOCB_SUCCESS;
9200 }
9201
9202 return IOCB_BUSY;
9203}
9204
3772a991 9205/**
4f774513
JS
9206 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9207 * @phba: Pointer to HBA context object.
9208 * @piocb: Pointer to command iocb.
9209 * @sglq: Pointer to the scatter gather queue object.
9210 *
9211 * This routine converts the bpl or bde that is in the IOCB
9212 * to a sgl list for the sli4 hardware. The physical address
9213 * of the bpl/bde is converted back to a virtual address.
9214 * If the IOCB contains a BPL then the list of BDE's is
9215 * converted to sli4_sge's. If the IOCB contains a single
9216 * BDE then it is converted to a single sli_sge.
9217 * The IOCB is still in cpu endianess so the contents of
9218 * the bpl can be used without byte swapping.
9219 *
9220 * Returns valid XRI = Success, NO_XRI = Failure.
9221**/
9222static uint16_t
9223lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9224 struct lpfc_sglq *sglq)
3772a991 9225{
4f774513
JS
9226 uint16_t xritag = NO_XRI;
9227 struct ulp_bde64 *bpl = NULL;
9228 struct ulp_bde64 bde;
9229 struct sli4_sge *sgl = NULL;
1b51197d 9230 struct lpfc_dmabuf *dmabuf;
4f774513
JS
9231 IOCB_t *icmd;
9232 int numBdes = 0;
9233 int i = 0;
63e801ce
JS
9234 uint32_t offset = 0; /* accumulated offset in the sg request list */
9235 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 9236
4f774513
JS
9237 if (!piocbq || !sglq)
9238 return xritag;
9239
9240 sgl = (struct sli4_sge *)sglq->sgl;
9241 icmd = &piocbq->iocb;
6b5151fd
JS
9242 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9243 return sglq->sli4_xritag;
4f774513
JS
9244 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9245 numBdes = icmd->un.genreq64.bdl.bdeSize /
9246 sizeof(struct ulp_bde64);
9247 /* The addrHigh and addrLow fields within the IOCB
9248 * have not been byteswapped yet so there is no
9249 * need to swap them back.
9250 */
1b51197d
JS
9251 if (piocbq->context3)
9252 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9253 else
9254 return xritag;
4f774513 9255
1b51197d 9256 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
9257 if (!bpl)
9258 return xritag;
9259
9260 for (i = 0; i < numBdes; i++) {
9261 /* Should already be byte swapped. */
28baac74
JS
9262 sgl->addr_hi = bpl->addrHigh;
9263 sgl->addr_lo = bpl->addrLow;
9264
0558056c 9265 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9266 if ((i+1) == numBdes)
9267 bf_set(lpfc_sli4_sge_last, sgl, 1);
9268 else
9269 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
9270 /* swap the size field back to the cpu so we
9271 * can assign it to the sgl.
9272 */
9273 bde.tus.w = le32_to_cpu(bpl->tus.w);
9274 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
9275 /* The offsets in the sgl need to be accumulated
9276 * separately for the request and reply lists.
9277 * The request is always first, the reply follows.
9278 */
9279 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9280 /* add up the reply sg entries */
9281 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9282 inbound++;
9283 /* first inbound? reset the offset */
9284 if (inbound == 1)
9285 offset = 0;
9286 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
9287 bf_set(lpfc_sli4_sge_type, sgl,
9288 LPFC_SGE_TYPE_DATA);
63e801ce
JS
9289 offset += bde.tus.f.bdeSize;
9290 }
546fc854 9291 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
9292 bpl++;
9293 sgl++;
9294 }
9295 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9296 /* The addrHigh and addrLow fields of the BDE have not
9297 * been byteswapped yet so they need to be swapped
9298 * before putting them in the sgl.
9299 */
9300 sgl->addr_hi =
9301 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9302 sgl->addr_lo =
9303 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 9304 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9305 bf_set(lpfc_sli4_sge_last, sgl, 1);
9306 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
9307 sgl->sge_len =
9308 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
9309 }
9310 return sglq->sli4_xritag;
3772a991 9311}
92d7f7b0 9312
e59058c4 9313/**
4f774513 9314 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 9315 * @phba: Pointer to HBA context object.
4f774513
JS
9316 * @piocb: Pointer to command iocb.
9317 * @wqe: Pointer to the work queue entry.
e59058c4 9318 *
4f774513
JS
9319 * This routine converts the iocb command to its Work Queue Entry
9320 * equivalent. The wqe pointer should not have any fields set when
9321 * this routine is called because it will memcpy over them.
9322 * This routine does not set the CQ_ID or the WQEC bits in the
9323 * wqe.
e59058c4 9324 *
4f774513 9325 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 9326 **/
cf5bf97e 9327static int
4f774513 9328lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
205e8240 9329 union lpfc_wqe128 *wqe)
cf5bf97e 9330{
5ffc266e 9331 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
9332 uint8_t ct = 0;
9333 uint32_t fip;
9334 uint32_t abort_tag;
9335 uint8_t command_type = ELS_COMMAND_NON_FIP;
9336 uint8_t cmnd;
9337 uint16_t xritag;
dcf2a4e0
JS
9338 uint16_t abrt_iotag;
9339 struct lpfc_iocbq *abrtiocbq;
4f774513 9340 struct ulp_bde64 *bpl = NULL;
f0d9bccc 9341 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
9342 int numBdes, i;
9343 struct ulp_bde64 bde;
c31098ce 9344 struct lpfc_nodelist *ndlp;
ff78d8f9 9345 uint32_t *pcmd;
1b51197d 9346 uint32_t if_type;
4f774513 9347
45ed1190 9348 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 9349 /* The fcp commands will set command type */
0c287589 9350 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 9351 command_type = FCP_COMMAND;
c868595d 9352 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
9353 command_type = ELS_COMMAND_FIP;
9354 else
9355 command_type = ELS_COMMAND_NON_FIP;
9356
b5c53958
JS
9357 if (phba->fcp_embed_io)
9358 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
9359 /* Some of the fields are in the right position already */
9360 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
ae9e28f3
JS
9361 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9362 /* The ct field has moved so reset */
9363 wqe->generic.wqe_com.word7 = 0;
9364 wqe->generic.wqe_com.word10 = 0;
9365 }
b5c53958
JS
9366
9367 abort_tag = (uint32_t) iocbq->iotag;
9368 xritag = iocbq->sli4_xritag;
4f774513
JS
9369 /* words0-2 bpl convert bde */
9370 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
9371 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9372 sizeof(struct ulp_bde64);
4f774513
JS
9373 bpl = (struct ulp_bde64 *)
9374 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9375 if (!bpl)
9376 return IOCB_ERROR;
cf5bf97e 9377
4f774513
JS
9378 /* Should already be byte swapped. */
9379 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9380 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9381 /* swap the size field back to the cpu so we
9382 * can assign it to the sgl.
9383 */
9384 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
9385 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9386 total_len = 0;
9387 for (i = 0; i < numBdes; i++) {
9388 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9389 total_len += bde.tus.f.bdeSize;
9390 }
4f774513 9391 } else
5ffc266e 9392 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 9393
4f774513
JS
9394 iocbq->iocb.ulpIoTag = iocbq->iotag;
9395 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 9396
4f774513
JS
9397 switch (iocbq->iocb.ulpCommand) {
9398 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
9399 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9400 ndlp = iocbq->context_un.ndlp;
9401 else
9402 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
9403 if (!iocbq->iocb.ulpLe) {
9404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9405 "2007 Only Limited Edition cmd Format"
9406 " supported 0x%x\n",
9407 iocbq->iocb.ulpCommand);
9408 return IOCB_ERROR;
9409 }
ff78d8f9 9410
5ffc266e 9411 wqe->els_req.payload_len = xmit_len;
4f774513
JS
9412 /* Els_reguest64 has a TMO */
9413 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9414 iocbq->iocb.ulpTimeout);
9415 /* Need a VF for word 4 set the vf bit*/
9416 bf_set(els_req64_vf, &wqe->els_req, 0);
9417 /* And a VFID for word 12 */
9418 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 9419 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
9420 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9421 iocbq->iocb.ulpContext);
9422 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9423 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 9424 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 9425 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
9426 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9427 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
9428 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9429 iocbq->context2)->virt);
1b51197d
JS
9430 if_type = bf_get(lpfc_sli_intf_if_type,
9431 &phba->sli4_hba.sli_intf);
27d6ac0a 9432 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 9433 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 9434 *pcmd == ELS_CMD_SCR ||
6b5151fd 9435 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 9436 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
9437 *pcmd == ELS_CMD_PLOGI)) {
9438 bf_set(els_req64_sp, &wqe->els_req, 1);
9439 bf_set(els_req64_sid, &wqe->els_req,
9440 iocbq->vport->fc_myDID);
939723a4
JS
9441 if ((*pcmd == ELS_CMD_FLOGI) &&
9442 !(phba->fc_topology ==
9443 LPFC_TOPOLOGY_LOOP))
9444 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
9445 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9446 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 9447 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 9448 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
9449 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9450 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9451 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9452 }
c868595d 9453 }
6d368e53
JS
9454 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9455 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
9456 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9457 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9458 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9459 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9460 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9461 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 9462 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 9463 break;
5ffc266e 9464 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
9465 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9466 iocbq->iocb.un.ulpWord[3]);
9467 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 9468 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
9469 /* The entire sequence is transmitted for this IOCB */
9470 xmit_len = total_len;
9471 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
9472 if (phba->link_flag & LS_LOOPBACK_MODE)
9473 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
5bd5f66c 9474 /* fall through */
4f774513 9475 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
9476 /* word3 iocb=io_tag32 wqe=reserved */
9477 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
9478 /* word4 relative_offset memcpy */
9479 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
9480 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9481 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9482 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9483 LPFC_WQE_IOD_WRITE);
9484 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9485 LPFC_WQE_LENLOC_WORD12);
9486 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
9487 wqe->xmit_sequence.xmit_len = xmit_len;
9488 command_type = OTHER_COMMAND;
7851fe2c 9489 break;
4f774513 9490 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
9491 /* word3 iocb=iotag32 wqe=seq_payload_len */
9492 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
9493 /* word4 iocb=rsvd wqe=rsvd */
9494 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9495 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 9496 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 9497 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
9498 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9499 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9500 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9501 LPFC_WQE_LENLOC_WORD3);
9502 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 9503 break;
4f774513
JS
9504 case CMD_FCP_IWRITE64_CR:
9505 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
9506 /* word3 iocb=iotag wqe=payload_offset_len */
9507 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9508 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9509 xmit_len + sizeof(struct fcp_rsp));
9510 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9511 0);
f0d9bccc
JS
9512 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9513 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9514 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9515 iocbq->iocb.ulpFCP2Rcvy);
9516 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9517 /* Always open the exchange */
f0d9bccc
JS
9518 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9519 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9520 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9521 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9522 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
9523 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9524 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
9525 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9526 if (iocbq->priority) {
9527 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9528 (iocbq->priority << 1));
9529 } else {
1ba981fd
JS
9530 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9531 (phba->cfg_XLanePriority << 1));
9532 }
9533 }
b5c53958
JS
9534 /* Note, word 10 is already initialized to 0 */
9535
414abe0a
JS
9536 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9537 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9538 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9539 else
9540 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9541
b5c53958 9542 if (phba->fcp_embed_io) {
c490850a 9543 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9544 struct sli4_sge *sgl;
b5c53958
JS
9545 struct fcp_cmnd *fcp_cmnd;
9546 uint32_t *ptr;
9547
9548 /* 128 byte wqe support here */
b5c53958
JS
9549
9550 lpfc_cmd = iocbq->context1;
0794d601 9551 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9552 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9553
9554 /* Word 0-2 - FCP_CMND */
205e8240 9555 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9556 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9557 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9558 wqe->generic.bde.addrHigh = 0;
9559 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9560
205e8240
JS
9561 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9562 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
b5c53958
JS
9563
9564 /* Word 22-29 FCP CMND Payload */
205e8240 9565 ptr = &wqe->words[22];
b5c53958
JS
9566 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9567 }
7851fe2c 9568 break;
4f774513 9569 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
9570 /* word3 iocb=iotag wqe=payload_offset_len */
9571 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9572 bf_set(payload_offset_len, &wqe->fcp_iread,
9573 xmit_len + sizeof(struct fcp_rsp));
9574 bf_set(cmd_buff_len, &wqe->fcp_iread,
9575 0);
f0d9bccc
JS
9576 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9577 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9578 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9579 iocbq->iocb.ulpFCP2Rcvy);
9580 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 9581 /* Always open the exchange */
f0d9bccc
JS
9582 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9583 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9584 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9585 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9586 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
9587 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9588 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
9589 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9590 if (iocbq->priority) {
9591 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9592 (iocbq->priority << 1));
9593 } else {
1ba981fd
JS
9594 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9595 (phba->cfg_XLanePriority << 1));
9596 }
9597 }
b5c53958
JS
9598 /* Note, word 10 is already initialized to 0 */
9599
414abe0a
JS
9600 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9601 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9602 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9603 else
9604 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9605
b5c53958 9606 if (phba->fcp_embed_io) {
c490850a 9607 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9608 struct sli4_sge *sgl;
b5c53958
JS
9609 struct fcp_cmnd *fcp_cmnd;
9610 uint32_t *ptr;
9611
9612 /* 128 byte wqe support here */
b5c53958
JS
9613
9614 lpfc_cmd = iocbq->context1;
0794d601 9615 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9616 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9617
9618 /* Word 0-2 - FCP_CMND */
205e8240 9619 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9620 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9621 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9622 wqe->generic.bde.addrHigh = 0;
9623 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9624
205e8240
JS
9625 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9626 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
b5c53958
JS
9627
9628 /* Word 22-29 FCP CMND Payload */
205e8240 9629 ptr = &wqe->words[22];
b5c53958
JS
9630 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9631 }
7851fe2c 9632 break;
4f774513 9633 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
9634 /* word3 iocb=iotag wqe=payload_offset_len */
9635 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9636 bf_set(payload_offset_len, &wqe->fcp_icmd,
9637 xmit_len + sizeof(struct fcp_rsp));
9638 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9639 0);
f0d9bccc 9640 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 9641 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 9642 /* Always open the exchange */
f0d9bccc
JS
9643 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9644 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9645 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9646 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9647 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
9648 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9649 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
9650 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9651 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
9652 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9653 if (iocbq->priority) {
9654 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9655 (iocbq->priority << 1));
9656 } else {
1ba981fd
JS
9657 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9658 (phba->cfg_XLanePriority << 1));
9659 }
9660 }
b5c53958
JS
9661 /* Note, word 10 is already initialized to 0 */
9662
9663 if (phba->fcp_embed_io) {
c490850a 9664 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9665 struct sli4_sge *sgl;
b5c53958
JS
9666 struct fcp_cmnd *fcp_cmnd;
9667 uint32_t *ptr;
9668
9669 /* 128 byte wqe support here */
b5c53958
JS
9670
9671 lpfc_cmd = iocbq->context1;
0794d601 9672 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9673 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9674
9675 /* Word 0-2 - FCP_CMND */
205e8240 9676 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9677 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9678 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9679 wqe->generic.bde.addrHigh = 0;
9680 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9681
205e8240
JS
9682 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9683 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
b5c53958
JS
9684
9685 /* Word 22-29 FCP CMND Payload */
205e8240 9686 ptr = &wqe->words[22];
b5c53958
JS
9687 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9688 }
7851fe2c 9689 break;
4f774513 9690 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
9691 /* For this command calculate the xmit length of the
9692 * request bde.
9693 */
9694 xmit_len = 0;
9695 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9696 sizeof(struct ulp_bde64);
9697 for (i = 0; i < numBdes; i++) {
63e801ce 9698 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
9699 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9700 break;
63e801ce
JS
9701 xmit_len += bde.tus.f.bdeSize;
9702 }
f0d9bccc
JS
9703 /* word3 iocb=IO_TAG wqe=request_payload_len */
9704 wqe->gen_req.request_payload_len = xmit_len;
9705 /* word4 iocb=parameter wqe=relative_offset memcpy */
9706 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
9707 /* word6 context tag copied in memcpy */
9708 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9709 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9710 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9711 "2015 Invalid CT %x command 0x%x\n",
9712 ct, iocbq->iocb.ulpCommand);
9713 return IOCB_ERROR;
9714 }
f0d9bccc
JS
9715 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9716 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9717 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9718 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9719 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9720 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9721 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9722 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 9723 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 9724 command_type = OTHER_COMMAND;
7851fe2c 9725 break;
4f774513 9726 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 9727 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 9728 /* words0-2 BDE memcpy */
f0d9bccc
JS
9729 /* word3 iocb=iotag32 wqe=response_payload_len */
9730 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
9731 /* word4 */
9732 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
9733 /* word5 iocb=rsvd wge=did */
9734 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
9735 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9736
9737 if_type = bf_get(lpfc_sli_intf_if_type,
9738 &phba->sli4_hba.sli_intf);
27d6ac0a 9739 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
939723a4
JS
9740 if (iocbq->vport->fc_flag & FC_PT2PT) {
9741 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9742 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9743 iocbq->vport->fc_myDID);
9744 if (iocbq->vport->fc_myDID == Fabric_DID) {
9745 bf_set(wqe_els_did,
9746 &wqe->xmit_els_rsp.wqe_dest, 0);
9747 }
9748 }
9749 }
f0d9bccc
JS
9750 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9751 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9752 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9753 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9754 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9755 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9756 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9757 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9758 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9759 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9760 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9761 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9762 LPFC_WQE_LENLOC_WORD3);
9763 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9764 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9765 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9766 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9767 iocbq->context2)->virt);
9768 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9769 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9770 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9771 iocbq->vport->fc_myDID);
939723a4
JS
9772 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9773 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9774 phba->vpi_ids[phba->pport->vpi]);
9775 }
4f774513 9776 command_type = OTHER_COMMAND;
7851fe2c 9777 break;
4f774513
JS
9778 case CMD_CLOSE_XRI_CN:
9779 case CMD_ABORT_XRI_CN:
9780 case CMD_ABORT_XRI_CX:
9781 /* words 0-2 memcpy should be 0 rserved */
9782 /* port will send abts */
dcf2a4e0
JS
9783 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9784 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9785 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9786 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9787 } else
9788 fip = 0;
9789
9790 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9791 /*
dcf2a4e0
JS
9792 * The link is down, or the command was ELS_FIP
9793 * so the fw does not need to send abts
4f774513
JS
9794 * on the wire.
9795 */
9796 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9797 else
9798 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9799 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9800 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9801 wqe->abort_cmd.rsrvd5 = 0;
9802 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9803 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9804 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9805 /*
9806 * The abort handler will send us CMD_ABORT_XRI_CN or
9807 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9808 */
f0d9bccc
JS
9809 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9810 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9811 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9812 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9813 cmnd = CMD_ABORT_XRI_CX;
9814 command_type = OTHER_COMMAND;
9815 xritag = 0;
7851fe2c 9816 break;
6669f9bb 9817 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9818 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9819 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9820 * we re-construct this WQE here based on information in
9821 * iocbq from scratch.
9822 */
9823 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 9824 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9825 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9826 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9827 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9828 LPFC_ABTS_UNSOL_INT) {
9829 /* ABTS sent by initiator to CT exchange, the
9830 * RX_ID field will be filled with the newly
9831 * allocated responder XRI.
9832 */
9833 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9834 iocbq->sli4_xritag);
9835 } else {
9836 /* ABTS sent by responder to CT exchange, the
9837 * RX_ID field will be filled with the responder
9838 * RX_ID from ABTS.
9839 */
9840 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9841 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9842 }
6669f9bb
JS
9843 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9844 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9845
9846 /* Use CT=VPI */
9847 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9848 ndlp->nlp_DID);
9849 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9850 iocbq->iocb.ulpContext);
9851 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9852 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9853 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9854 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9855 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9856 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9857 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9858 command_type = OTHER_COMMAND;
546fc854
JS
9859 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9860 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9861 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9862 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9863 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9864 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9865 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9866 }
9867
7851fe2c 9868 break;
ae9e28f3
JS
9869 case CMD_SEND_FRAME:
9870 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9871 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9872 return 0;
4f774513
JS
9873 case CMD_XRI_ABORTED_CX:
9874 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
9875 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9876 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9877 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9878 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9879 default:
9880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9881 "2014 Invalid command 0x%x\n",
9882 iocbq->iocb.ulpCommand);
9883 return IOCB_ERROR;
7851fe2c 9884 break;
4f774513 9885 }
6d368e53 9886
8012cc38
JS
9887 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9888 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9889 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9890 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9891 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9892 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9893 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9894 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
9895 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9896 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9897 wqe->generic.wqe_com.abort_tag = abort_tag;
9898 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9899 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9900 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9901 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
9902 return 0;
9903}
9904
9905/**
9906 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9907 * @phba: Pointer to HBA context object.
9908 * @ring_number: SLI ring number to issue iocb on.
9909 * @piocb: Pointer to command iocb.
9910 * @flag: Flag indicating if this command can be put into txq.
9911 *
9912 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9913 * an iocb command to an HBA with SLI-4 interface spec.
9914 *
9915 * This function is called with hbalock held. The function will return success
9916 * after it successfully submit the iocb to firmware or after adding to the
9917 * txq.
9918 **/
9919static int
9920__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9921 struct lpfc_iocbq *piocb, uint32_t flag)
9922{
9923 struct lpfc_sglq *sglq;
205e8240 9924 union lpfc_wqe128 wqe;
1ba981fd 9925 struct lpfc_queue *wq;
895427bd 9926 struct lpfc_sli_ring *pring;
4f774513 9927
895427bd
JS
9928 /* Get the WQ */
9929 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9930 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
cdb42bec 9931 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
895427bd
JS
9932 } else {
9933 wq = phba->sli4_hba.els_wq;
9934 }
9935
9936 /* Get corresponding ring */
9937 pring = wq->pring;
1c2ba475 9938
b5c53958
JS
9939 /*
9940 * The WQE can be either 64 or 128 bytes,
b5c53958 9941 */
b5c53958 9942
cda7fa18 9943 lockdep_assert_held(&pring->ring_lock);
895427bd 9944
4f774513
JS
9945 if (piocb->sli4_xritag == NO_XRI) {
9946 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 9947 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
9948 sglq = NULL;
9949 else {
0e9bb8d7 9950 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
9951 if (!(flag & SLI_IOCB_RET_IOCB)) {
9952 __lpfc_sli_ringtx_put(phba,
9953 pring, piocb);
9954 return IOCB_SUCCESS;
9955 } else {
9956 return IOCB_BUSY;
9957 }
9958 } else {
895427bd 9959 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
9960 if (!sglq) {
9961 if (!(flag & SLI_IOCB_RET_IOCB)) {
9962 __lpfc_sli_ringtx_put(phba,
9963 pring,
9964 piocb);
9965 return IOCB_SUCCESS;
9966 } else
9967 return IOCB_BUSY;
9968 }
9969 }
4f774513 9970 }
2ea259ee 9971 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
9972 /* These IO's already have an XRI and a mapped sgl. */
9973 sglq = NULL;
2ea259ee 9974 else {
6d368e53
JS
9975 /*
9976 * This is a continuation of a commandi,(CX) so this
4f774513
JS
9977 * sglq is on the active list
9978 */
edccdc17 9979 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
9980 if (!sglq)
9981 return IOCB_ERROR;
9982 }
9983
9984 if (sglq) {
6d368e53 9985 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 9986 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 9987 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
9988 return IOCB_ERROR;
9989 }
9990
205e8240 9991 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
4f774513
JS
9992 return IOCB_ERROR;
9993
205e8240 9994 if (lpfc_sli4_wq_put(wq, &wqe))
895427bd 9995 return IOCB_ERROR;
4f774513
JS
9996 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9997
9998 return 0;
9999}
10000
10001/**
10002 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10003 *
10004 * This routine wraps the actual lockless version for issusing IOCB function
10005 * pointer from the lpfc_hba struct.
10006 *
10007 * Return codes:
b5c53958
JS
10008 * IOCB_ERROR - Error
10009 * IOCB_SUCCESS - Success
10010 * IOCB_BUSY - Busy
4f774513 10011 **/
2a9bf3d0 10012int
4f774513
JS
10013__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10014 struct lpfc_iocbq *piocb, uint32_t flag)
10015{
10016 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10017}
10018
10019/**
25985edc 10020 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
10021 * @phba: The hba struct for which this call is being executed.
10022 * @dev_grp: The HBA PCI-Device group number.
10023 *
10024 * This routine sets up the SLI interface API function jump table in @phba
10025 * struct.
10026 * Returns: 0 - success, -ENODEV - failure.
10027 **/
10028int
10029lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10030{
10031
10032 switch (dev_grp) {
10033 case LPFC_PCI_DEV_LP:
10034 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10035 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10036 break;
10037 case LPFC_PCI_DEV_OC:
10038 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10039 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10040 break;
10041 default:
10042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10043 "1419 Invalid HBA PCI-device group: 0x%x\n",
10044 dev_grp);
10045 return -ENODEV;
10046 break;
10047 }
10048 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10049 return 0;
10050}
10051
a1efe163 10052/**
895427bd 10053 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 10054 * @phba: Pointer to HBA context object.
a1efe163
JS
10055 * @piocb: Pointer to command iocb.
10056 *
895427bd
JS
10057 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10058 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 10059 * Since ABORTS must go on the same WQ of the command they are
895427bd 10060 * aborting, we use command's hba_wqidx.
a1efe163 10061 */
895427bd
JS
10062struct lpfc_sli_ring *
10063lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 10064{
c490850a 10065 struct lpfc_io_buf *lpfc_cmd;
5e5b511d 10066
895427bd 10067 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
cdb42bec 10068 if (unlikely(!phba->sli4_hba.hdwq))
7370d10a
JS
10069 return NULL;
10070 /*
10071 * for abort iocb hba_wqidx should already
10072 * be setup based on what work queue we used.
10073 */
10074 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
c490850a 10075 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
1fbf9742 10076 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
9bd2bff5 10077 }
cdb42bec 10078 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
895427bd
JS
10079 } else {
10080 if (unlikely(!phba->sli4_hba.els_wq))
10081 return NULL;
10082 piocb->hba_wqidx = 0;
10083 return phba->sli4_hba.els_wq->pring;
9bd2bff5 10084 }
9bd2bff5
JS
10085}
10086
4f774513
JS
10087/**
10088 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10089 * @phba: Pointer to HBA context object.
10090 * @pring: Pointer to driver SLI ring object.
10091 * @piocb: Pointer to command iocb.
10092 * @flag: Flag indicating if this command can be put into txq.
10093 *
10094 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10095 * function. This function gets the hbalock and calls
10096 * __lpfc_sli_issue_iocb function and will return the error returned
10097 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10098 * functions which do not hold hbalock.
10099 **/
10100int
10101lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10102 struct lpfc_iocbq *piocb, uint32_t flag)
10103{
2a76a283 10104 struct lpfc_sli_ring *pring;
4f774513 10105 unsigned long iflags;
6a828b0f 10106 int rc;
4f774513 10107
7e56aa25 10108 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
10109 pring = lpfc_sli4_calc_ring(phba, piocb);
10110 if (unlikely(pring == NULL))
9bd2bff5 10111 return IOCB_ERROR;
ba20c853 10112
9bd2bff5
JS
10113 spin_lock_irqsave(&pring->ring_lock, iflags);
10114 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10115 spin_unlock_irqrestore(&pring->ring_lock, iflags);
7e56aa25
JS
10116 } else {
10117 /* For now, SLI2/3 will still use hbalock */
10118 spin_lock_irqsave(&phba->hbalock, iflags);
10119 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10120 spin_unlock_irqrestore(&phba->hbalock, iflags);
10121 }
4f774513
JS
10122 return rc;
10123}
10124
10125/**
10126 * lpfc_extra_ring_setup - Extra ring setup function
10127 * @phba: Pointer to HBA context object.
10128 *
10129 * This function is called while driver attaches with the
10130 * HBA to setup the extra ring. The extra ring is used
10131 * only when driver needs to support target mode functionality
10132 * or IP over FC functionalities.
10133 *
895427bd 10134 * This function is called with no lock held. SLI3 only.
4f774513
JS
10135 **/
10136static int
10137lpfc_extra_ring_setup( struct lpfc_hba *phba)
10138{
10139 struct lpfc_sli *psli;
10140 struct lpfc_sli_ring *pring;
10141
10142 psli = &phba->sli;
10143
10144 /* Adjust cmd/rsp ring iocb entries more evenly */
10145
10146 /* Take some away from the FCP ring */
895427bd 10147 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
10148 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10149 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10150 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10151 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 10152
a4bc3379 10153 /* and give them to the extra ring */
895427bd 10154 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 10155
7e56aa25
JS
10156 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10157 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10158 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10159 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
10160
10161 /* Setup default profile for this ring */
10162 pring->iotag_max = 4096;
10163 pring->num_mask = 1;
10164 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
10165 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10166 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
10167 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10168 return 0;
10169}
10170
cb69f7de
JS
10171/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10172 * @phba: Pointer to HBA context object.
10173 * @iocbq: Pointer to iocb object.
10174 *
10175 * The async_event handler calls this routine when it receives
10176 * an ASYNC_STATUS_CN event from the port. The port generates
10177 * this event when an Abort Sequence request to an rport fails
10178 * twice in succession. The abort could be originated by the
10179 * driver or by the port. The ABTS could have been for an ELS
10180 * or FCP IO. The port only generates this event when an ABTS
10181 * fails to complete after one retry.
10182 */
10183static void
10184lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10185 struct lpfc_iocbq *iocbq)
10186{
10187 struct lpfc_nodelist *ndlp = NULL;
10188 uint16_t rpi = 0, vpi = 0;
10189 struct lpfc_vport *vport = NULL;
10190
10191 /* The rpi in the ulpContext is vport-sensitive. */
10192 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10193 rpi = iocbq->iocb.ulpContext;
10194
10195 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10196 "3092 Port generated ABTS async event "
10197 "on vpi %d rpi %d status 0x%x\n",
10198 vpi, rpi, iocbq->iocb.ulpStatus);
10199
10200 vport = lpfc_find_vport_by_vpid(phba, vpi);
10201 if (!vport)
10202 goto err_exit;
10203 ndlp = lpfc_findnode_rpi(vport, rpi);
10204 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10205 goto err_exit;
10206
10207 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10208 lpfc_sli_abts_recover_port(vport, ndlp);
10209 return;
10210
10211 err_exit:
10212 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10213 "3095 Event Context not found, no "
10214 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10215 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10216 vpi, rpi);
10217}
10218
10219/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10220 * @phba: pointer to HBA context object.
10221 * @ndlp: nodelist pointer for the impacted rport.
10222 * @axri: pointer to the wcqe containing the failed exchange.
10223 *
10224 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10225 * port. The port generates this event when an abort exchange request to an
10226 * rport fails twice in succession with no reply. The abort could be originated
10227 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10228 */
10229void
10230lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10231 struct lpfc_nodelist *ndlp,
10232 struct sli4_wcqe_xri_aborted *axri)
10233{
10234 struct lpfc_vport *vport;
5c1db2ac 10235 uint32_t ext_status = 0;
cb69f7de 10236
6b5151fd 10237 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
10238 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10239 "3115 Node Context not found, driver "
10240 "ignoring abts err event\n");
6b5151fd
JS
10241 return;
10242 }
10243
cb69f7de
JS
10244 vport = ndlp->vport;
10245 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10246 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 10247 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 10248 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 10249 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
10250 bf_get(lpfc_wcqe_xa_status, axri),
10251 axri->parameter);
cb69f7de 10252
5c1db2ac
JS
10253 /*
10254 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10255 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10256 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10257 */
e3d2b802 10258 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
10259 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10260 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
10261 lpfc_sli_abts_recover_port(vport, ndlp);
10262}
10263
e59058c4 10264/**
3621a710 10265 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
10266 * @phba: Pointer to HBA context object.
10267 * @pring: Pointer to driver SLI ring object.
10268 * @iocbq: Pointer to iocb object.
10269 *
10270 * This function is called by the slow ring event handler
10271 * function when there is an ASYNC event iocb in the ring.
10272 * This function is called with no lock held.
10273 * Currently this function handles only temperature related
10274 * ASYNC events. The function decodes the temperature sensor
10275 * event message and posts events for the management applications.
10276 **/
98c9ea5c 10277static void
57127f15
JS
10278lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10279 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10280{
10281 IOCB_t *icmd;
10282 uint16_t evt_code;
57127f15
JS
10283 struct temp_event temp_event_data;
10284 struct Scsi_Host *shost;
a257bf90 10285 uint32_t *iocb_w;
57127f15
JS
10286
10287 icmd = &iocbq->iocb;
10288 evt_code = icmd->un.asyncstat.evt_code;
57127f15 10289
cb69f7de
JS
10290 switch (evt_code) {
10291 case ASYNC_TEMP_WARN:
10292 case ASYNC_TEMP_SAFE:
10293 temp_event_data.data = (uint32_t) icmd->ulpContext;
10294 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10295 if (evt_code == ASYNC_TEMP_WARN) {
10296 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10297 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10298 "0347 Adapter is very hot, please take "
10299 "corrective action. temperature : %d Celsius\n",
10300 (uint32_t) icmd->ulpContext);
10301 } else {
10302 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10303 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10304 "0340 Adapter temperature is OK now. "
10305 "temperature : %d Celsius\n",
10306 (uint32_t) icmd->ulpContext);
10307 }
10308
10309 /* Send temperature change event to applications */
10310 shost = lpfc_shost_from_vport(phba->pport);
10311 fc_host_post_vendor_event(shost, fc_get_event_number(),
10312 sizeof(temp_event_data), (char *) &temp_event_data,
10313 LPFC_NL_VENDOR_ID);
10314 break;
10315 case ASYNC_STATUS_CN:
10316 lpfc_sli_abts_err_handler(phba, iocbq);
10317 break;
10318 default:
a257bf90 10319 iocb_w = (uint32_t *) icmd;
cb69f7de 10320 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 10321 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 10322 " evt_code 0x%x\n"
a257bf90
JS
10323 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10324 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10325 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10326 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 10327 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
10328 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10329 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10330 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10331 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10332
cb69f7de 10333 break;
57127f15 10334 }
57127f15
JS
10335}
10336
10337
e59058c4 10338/**
895427bd 10339 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
10340 * @phba: Pointer to HBA context object.
10341 *
10342 * lpfc_sli_setup sets up rings of the SLI interface with
10343 * number of iocbs per ring and iotags. This function is
10344 * called while driver attach to the HBA and before the
10345 * interrupts are enabled. So there is no need for locking.
10346 *
10347 * This function always returns 0.
10348 **/
dea3101e 10349int
895427bd
JS
10350lpfc_sli4_setup(struct lpfc_hba *phba)
10351{
10352 struct lpfc_sli_ring *pring;
10353
10354 pring = phba->sli4_hba.els_wq->pring;
10355 pring->num_mask = LPFC_MAX_RING_MASK;
10356 pring->prt[0].profile = 0; /* Mask 0 */
10357 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10358 pring->prt[0].type = FC_TYPE_ELS;
10359 pring->prt[0].lpfc_sli_rcv_unsol_event =
10360 lpfc_els_unsol_event;
10361 pring->prt[1].profile = 0; /* Mask 1 */
10362 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10363 pring->prt[1].type = FC_TYPE_ELS;
10364 pring->prt[1].lpfc_sli_rcv_unsol_event =
10365 lpfc_els_unsol_event;
10366 pring->prt[2].profile = 0; /* Mask 2 */
10367 /* NameServer Inquiry */
10368 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10369 /* NameServer */
10370 pring->prt[2].type = FC_TYPE_CT;
10371 pring->prt[2].lpfc_sli_rcv_unsol_event =
10372 lpfc_ct_unsol_event;
10373 pring->prt[3].profile = 0; /* Mask 3 */
10374 /* NameServer response */
10375 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10376 /* NameServer */
10377 pring->prt[3].type = FC_TYPE_CT;
10378 pring->prt[3].lpfc_sli_rcv_unsol_event =
10379 lpfc_ct_unsol_event;
10380 return 0;
10381}
10382
10383/**
10384 * lpfc_sli_setup - SLI ring setup function
10385 * @phba: Pointer to HBA context object.
10386 *
10387 * lpfc_sli_setup sets up rings of the SLI interface with
10388 * number of iocbs per ring and iotags. This function is
10389 * called while driver attach to the HBA and before the
10390 * interrupts are enabled. So there is no need for locking.
10391 *
10392 * This function always returns 0. SLI3 only.
10393 **/
10394int
dea3101e 10395lpfc_sli_setup(struct lpfc_hba *phba)
10396{
ed957684 10397 int i, totiocbsize = 0;
dea3101e 10398 struct lpfc_sli *psli = &phba->sli;
10399 struct lpfc_sli_ring *pring;
10400
2a76a283 10401 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 10402 psli->sli_flag = 0;
dea3101e 10403
604a3e30
JB
10404 psli->iocbq_lookup = NULL;
10405 psli->iocbq_lookup_len = 0;
10406 psli->last_iotag = 0;
10407
dea3101e 10408 for (i = 0; i < psli->num_rings; i++) {
895427bd 10409 pring = &psli->sli3_ring[i];
dea3101e 10410 switch (i) {
10411 case LPFC_FCP_RING: /* ring 0 - FCP */
10412 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10413 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10414 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10415 pring->sli.sli3.numCiocb +=
10416 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10417 pring->sli.sli3.numRiocb +=
10418 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10419 pring->sli.sli3.numCiocb +=
10420 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10421 pring->sli.sli3.numRiocb +=
10422 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10423 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10424 SLI3_IOCB_CMD_SIZE :
10425 SLI2_IOCB_CMD_SIZE;
7e56aa25 10426 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10427 SLI3_IOCB_RSP_SIZE :
10428 SLI2_IOCB_RSP_SIZE;
dea3101e 10429 pring->iotag_ctr = 0;
10430 pring->iotag_max =
92d7f7b0 10431 (phba->cfg_hba_queue_depth * 2);
dea3101e 10432 pring->fast_iotag = pring->iotag_max;
10433 pring->num_mask = 0;
10434 break;
a4bc3379 10435 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 10436 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10437 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10438 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10439 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10440 SLI3_IOCB_CMD_SIZE :
10441 SLI2_IOCB_CMD_SIZE;
7e56aa25 10442 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10443 SLI3_IOCB_RSP_SIZE :
10444 SLI2_IOCB_RSP_SIZE;
2e0fef85 10445 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 10446 pring->num_mask = 0;
10447 break;
10448 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10449 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10450 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10451 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10452 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10453 SLI3_IOCB_CMD_SIZE :
10454 SLI2_IOCB_CMD_SIZE;
7e56aa25 10455 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10456 SLI3_IOCB_RSP_SIZE :
10457 SLI2_IOCB_RSP_SIZE;
dea3101e 10458 pring->fast_iotag = 0;
10459 pring->iotag_ctr = 0;
10460 pring->iotag_max = 4096;
57127f15
JS
10461 pring->lpfc_sli_rcv_async_status =
10462 lpfc_sli_async_event_handler;
6669f9bb 10463 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 10464 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
10465 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10466 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 10467 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 10468 lpfc_els_unsol_event;
dea3101e 10469 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
10470 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10471 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 10472 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 10473 lpfc_els_unsol_event;
dea3101e 10474 pring->prt[2].profile = 0; /* Mask 2 */
10475 /* NameServer Inquiry */
6a9c52cf 10476 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 10477 /* NameServer */
6a9c52cf 10478 pring->prt[2].type = FC_TYPE_CT;
dea3101e 10479 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 10480 lpfc_ct_unsol_event;
dea3101e 10481 pring->prt[3].profile = 0; /* Mask 3 */
10482 /* NameServer response */
6a9c52cf 10483 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 10484 /* NameServer */
6a9c52cf 10485 pring->prt[3].type = FC_TYPE_CT;
dea3101e 10486 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 10487 lpfc_ct_unsol_event;
dea3101e 10488 break;
10489 }
7e56aa25
JS
10490 totiocbsize += (pring->sli.sli3.numCiocb *
10491 pring->sli.sli3.sizeCiocb) +
10492 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 10493 }
ed957684 10494 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 10495 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
10496 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10497 "SLI2 SLIM Data: x%x x%lx\n",
10498 phba->brd_no, totiocbsize,
10499 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 10500 }
cf5bf97e
JW
10501 if (phba->cfg_multi_ring_support == 2)
10502 lpfc_extra_ring_setup(phba);
dea3101e 10503
10504 return 0;
10505}
10506
e59058c4 10507/**
895427bd 10508 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
10509 * @phba: Pointer to HBA context object.
10510 *
895427bd 10511 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
10512 * ring. This function also initializes ring indices of each ring.
10513 * This function is called during the initialization of the SLI
10514 * interface of an HBA.
10515 * This function is called with no lock held and always returns
10516 * 1.
10517 **/
895427bd
JS
10518void
10519lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e 10520{
10521 struct lpfc_sli *psli;
10522 struct lpfc_sli_ring *pring;
604a3e30 10523 int i;
dea3101e 10524
10525 psli = &phba->sli;
2e0fef85 10526 spin_lock_irq(&phba->hbalock);
dea3101e 10527 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10528 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10529 /* Initialize list headers for txq and txcmplq as double linked lists */
cdb42bec
JS
10530 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10531 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
895427bd
JS
10532 pring->flag = 0;
10533 pring->ringno = LPFC_FCP_RING;
c490850a 10534 pring->txcmplq_cnt = 0;
895427bd
JS
10535 INIT_LIST_HEAD(&pring->txq);
10536 INIT_LIST_HEAD(&pring->txcmplq);
10537 INIT_LIST_HEAD(&pring->iocb_continueq);
10538 spin_lock_init(&pring->ring_lock);
10539 }
10540 pring = phba->sli4_hba.els_wq->pring;
10541 pring->flag = 0;
10542 pring->ringno = LPFC_ELS_RING;
c490850a 10543 pring->txcmplq_cnt = 0;
895427bd
JS
10544 INIT_LIST_HEAD(&pring->txq);
10545 INIT_LIST_HEAD(&pring->txcmplq);
10546 INIT_LIST_HEAD(&pring->iocb_continueq);
10547 spin_lock_init(&pring->ring_lock);
dea3101e 10548
cdb42bec
JS
10549 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10550 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10551 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
c490850a 10552 pring->flag = 0;
cdb42bec 10553 pring->ringno = LPFC_FCP_RING;
c490850a 10554 pring->txcmplq_cnt = 0;
cdb42bec
JS
10555 INIT_LIST_HEAD(&pring->txq);
10556 INIT_LIST_HEAD(&pring->txcmplq);
10557 INIT_LIST_HEAD(&pring->iocb_continueq);
10558 spin_lock_init(&pring->ring_lock);
10559 }
895427bd
JS
10560 pring = phba->sli4_hba.nvmels_wq->pring;
10561 pring->flag = 0;
10562 pring->ringno = LPFC_ELS_RING;
c490850a 10563 pring->txcmplq_cnt = 0;
895427bd
JS
10564 INIT_LIST_HEAD(&pring->txq);
10565 INIT_LIST_HEAD(&pring->txcmplq);
10566 INIT_LIST_HEAD(&pring->iocb_continueq);
10567 spin_lock_init(&pring->ring_lock);
10568 }
10569
10570 spin_unlock_irq(&phba->hbalock);
10571}
10572
10573/**
10574 * lpfc_sli_queue_init - Queue initialization function
10575 * @phba: Pointer to HBA context object.
10576 *
10577 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10578 * ring. This function also initializes ring indices of each ring.
10579 * This function is called during the initialization of the SLI
10580 * interface of an HBA.
10581 * This function is called with no lock held and always returns
10582 * 1.
10583 **/
10584void
10585lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e 10586{
10587 struct lpfc_sli *psli;
10588 struct lpfc_sli_ring *pring;
604a3e30 10589 int i;
dea3101e 10590
10591 psli = &phba->sli;
2e0fef85 10592 spin_lock_irq(&phba->hbalock);
dea3101e 10593 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10594 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10595 /* Initialize list headers for txq and txcmplq as double linked lists */
10596 for (i = 0; i < psli->num_rings; i++) {
895427bd 10597 pring = &psli->sli3_ring[i];
dea3101e 10598 pring->ringno = i;
7e56aa25
JS
10599 pring->sli.sli3.next_cmdidx = 0;
10600 pring->sli.sli3.local_getidx = 0;
10601 pring->sli.sli3.cmdidx = 0;
dea3101e 10602 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 10603 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 10604 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
10605 pring->flag = 0;
10606 INIT_LIST_HEAD(&pring->txq);
10607 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 10608 spin_lock_init(&pring->ring_lock);
dea3101e 10609 }
2e0fef85 10610 spin_unlock_irq(&phba->hbalock);
dea3101e 10611}
10612
04c68496
JS
10613/**
10614 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10615 * @phba: Pointer to HBA context object.
10616 *
10617 * This routine flushes the mailbox command subsystem. It will unconditionally
10618 * flush all the mailbox commands in the three possible stages in the mailbox
10619 * command sub-system: pending mailbox command queue; the outstanding mailbox
10620 * command; and completed mailbox command queue. It is caller's responsibility
10621 * to make sure that the driver is in the proper state to flush the mailbox
10622 * command sub-system. Namely, the posting of mailbox commands into the
10623 * pending mailbox command queue from the various clients must be stopped;
10624 * either the HBA is in a state that it will never works on the outstanding
10625 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10626 * mailbox command has been completed.
10627 **/
10628static void
10629lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10630{
10631 LIST_HEAD(completions);
10632 struct lpfc_sli *psli = &phba->sli;
10633 LPFC_MBOXQ_t *pmb;
10634 unsigned long iflag;
10635
523128e5
JS
10636 /* Disable softirqs, including timers from obtaining phba->hbalock */
10637 local_bh_disable();
10638
04c68496
JS
10639 /* Flush all the mailbox commands in the mbox system */
10640 spin_lock_irqsave(&phba->hbalock, iflag);
523128e5 10641
04c68496
JS
10642 /* The pending mailbox command queue */
10643 list_splice_init(&phba->sli.mboxq, &completions);
10644 /* The outstanding active mailbox command */
10645 if (psli->mbox_active) {
10646 list_add_tail(&psli->mbox_active->list, &completions);
10647 psli->mbox_active = NULL;
10648 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10649 }
10650 /* The completed mailbox command queue */
10651 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10652 spin_unlock_irqrestore(&phba->hbalock, iflag);
10653
523128e5
JS
10654 /* Enable softirqs again, done with phba->hbalock */
10655 local_bh_enable();
10656
04c68496
JS
10657 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10658 while (!list_empty(&completions)) {
10659 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10660 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10661 if (pmb->mbox_cmpl)
10662 pmb->mbox_cmpl(phba, pmb);
10663 }
10664}
10665
e59058c4 10666/**
3621a710 10667 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
10668 * @vport: Pointer to virtual port object.
10669 *
10670 * lpfc_sli_host_down is called to clean up the resources
10671 * associated with a vport before destroying virtual
10672 * port data structures.
10673 * This function does following operations:
10674 * - Free discovery resources associated with this virtual
10675 * port.
10676 * - Free iocbs associated with this virtual port in
10677 * the txq.
10678 * - Send abort for all iocb commands associated with this
10679 * vport in txcmplq.
10680 *
10681 * This function is called with no lock held and always returns 1.
10682 **/
92d7f7b0
JS
10683int
10684lpfc_sli_host_down(struct lpfc_vport *vport)
10685{
858c9f6c 10686 LIST_HEAD(completions);
92d7f7b0
JS
10687 struct lpfc_hba *phba = vport->phba;
10688 struct lpfc_sli *psli = &phba->sli;
895427bd 10689 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
10690 struct lpfc_sli_ring *pring;
10691 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
10692 int i;
10693 unsigned long flags = 0;
10694 uint16_t prev_pring_flag;
10695
10696 lpfc_cleanup_discovery_resources(vport);
10697
10698 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 10699
895427bd
JS
10700 /*
10701 * Error everything on the txq since these iocbs
10702 * have not been given to the FW yet.
10703 * Also issue ABTS for everything on the txcmplq
10704 */
10705 if (phba->sli_rev != LPFC_SLI_REV4) {
10706 for (i = 0; i < psli->num_rings; i++) {
10707 pring = &psli->sli3_ring[i];
10708 prev_pring_flag = pring->flag;
10709 /* Only slow rings */
10710 if (pring->ringno == LPFC_ELS_RING) {
10711 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10712 /* Set the lpfc data pending flag */
10713 set_bit(LPFC_DATA_READY, &phba->data_flags);
10714 }
10715 list_for_each_entry_safe(iocb, next_iocb,
10716 &pring->txq, list) {
10717 if (iocb->vport != vport)
10718 continue;
10719 list_move_tail(&iocb->list, &completions);
10720 }
10721 list_for_each_entry_safe(iocb, next_iocb,
10722 &pring->txcmplq, list) {
10723 if (iocb->vport != vport)
10724 continue;
10725 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10726 }
10727 pring->flag = prev_pring_flag;
10728 }
10729 } else {
10730 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10731 pring = qp->pring;
10732 if (!pring)
92d7f7b0 10733 continue;
895427bd
JS
10734 if (pring == phba->sli4_hba.els_wq->pring) {
10735 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10736 /* Set the lpfc data pending flag */
10737 set_bit(LPFC_DATA_READY, &phba->data_flags);
10738 }
10739 prev_pring_flag = pring->flag;
10740 spin_lock_irq(&pring->ring_lock);
10741 list_for_each_entry_safe(iocb, next_iocb,
10742 &pring->txq, list) {
10743 if (iocb->vport != vport)
10744 continue;
10745 list_move_tail(&iocb->list, &completions);
10746 }
10747 spin_unlock_irq(&pring->ring_lock);
10748 list_for_each_entry_safe(iocb, next_iocb,
10749 &pring->txcmplq, list) {
10750 if (iocb->vport != vport)
10751 continue;
10752 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10753 }
10754 pring->flag = prev_pring_flag;
92d7f7b0 10755 }
92d7f7b0 10756 }
92d7f7b0
JS
10757 spin_unlock_irqrestore(&phba->hbalock, flags);
10758
a257bf90
JS
10759 /* Cancel all the IOCBs from the completions list */
10760 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10761 IOERR_SLI_DOWN);
92d7f7b0
JS
10762 return 1;
10763}
10764
e59058c4 10765/**
3621a710 10766 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10767 * @phba: Pointer to HBA context object.
10768 *
10769 * This function cleans up all iocb, buffers, mailbox commands
10770 * while shutting down the HBA. This function is called with no
10771 * lock held and always returns 1.
10772 * This function does the following to cleanup driver resources:
10773 * - Free discovery resources for each virtual port
10774 * - Cleanup any pending fabric iocbs
10775 * - Iterate through the iocb txq and free each entry
10776 * in the list.
10777 * - Free up any buffer posted to the HBA
10778 * - Free mailbox commands in the mailbox queue.
10779 **/
dea3101e 10780int
2e0fef85 10781lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10782{
2534ba75 10783 LIST_HEAD(completions);
2e0fef85 10784 struct lpfc_sli *psli = &phba->sli;
895427bd 10785 struct lpfc_queue *qp = NULL;
dea3101e 10786 struct lpfc_sli_ring *pring;
0ff10d46 10787 struct lpfc_dmabuf *buf_ptr;
dea3101e 10788 unsigned long flags = 0;
04c68496
JS
10789 int i;
10790
10791 /* Shutdown the mailbox command sub-system */
618a5230 10792 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10793
dea3101e 10794 lpfc_hba_down_prep(phba);
10795
523128e5
JS
10796 /* Disable softirqs, including timers from obtaining phba->hbalock */
10797 local_bh_disable();
10798
92d7f7b0
JS
10799 lpfc_fabric_abort_hba(phba);
10800
2e0fef85 10801 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10802
895427bd
JS
10803 /*
10804 * Error everything on the txq since these iocbs
10805 * have not been given to the FW yet.
10806 */
10807 if (phba->sli_rev != LPFC_SLI_REV4) {
10808 for (i = 0; i < psli->num_rings; i++) {
10809 pring = &psli->sli3_ring[i];
10810 /* Only slow rings */
10811 if (pring->ringno == LPFC_ELS_RING) {
10812 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10813 /* Set the lpfc data pending flag */
10814 set_bit(LPFC_DATA_READY, &phba->data_flags);
10815 }
10816 list_splice_init(&pring->txq, &completions);
10817 }
10818 } else {
10819 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10820 pring = qp->pring;
10821 if (!pring)
10822 continue;
10823 spin_lock_irq(&pring->ring_lock);
10824 list_splice_init(&pring->txq, &completions);
10825 spin_unlock_irq(&pring->ring_lock);
10826 if (pring == phba->sli4_hba.els_wq->pring) {
10827 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10828 /* Set the lpfc data pending flag */
10829 set_bit(LPFC_DATA_READY, &phba->data_flags);
10830 }
10831 }
2534ba75 10832 }
2e0fef85 10833 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10834
a257bf90
JS
10835 /* Cancel all the IOCBs from the completions list */
10836 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10837 IOERR_SLI_DOWN);
dea3101e 10838
0ff10d46
JS
10839 spin_lock_irqsave(&phba->hbalock, flags);
10840 list_splice_init(&phba->elsbuf, &completions);
10841 phba->elsbuf_cnt = 0;
10842 phba->elsbuf_prev_cnt = 0;
10843 spin_unlock_irqrestore(&phba->hbalock, flags);
10844
10845 while (!list_empty(&completions)) {
10846 list_remove_head(&completions, buf_ptr,
10847 struct lpfc_dmabuf, list);
10848 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10849 kfree(buf_ptr);
10850 }
10851
523128e5
JS
10852 /* Enable softirqs again, done with phba->hbalock */
10853 local_bh_enable();
10854
dea3101e 10855 /* Return any active mbox cmds */
10856 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10857
da0436e9 10858 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10859 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10860 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10861
da0436e9
JS
10862 return 1;
10863}
10864
e59058c4 10865/**
3621a710 10866 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10867 * @srcp: Source memory pointer.
10868 * @destp: Destination memory pointer.
10869 * @cnt: Number of words required to be copied.
10870 *
10871 * This function is used for copying data between driver memory
10872 * and the SLI memory. This function also changes the endianness
10873 * of each word if native endianness is different from SLI
10874 * endianness. This function can be called with or without
10875 * lock.
10876 **/
dea3101e 10877void
10878lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10879{
10880 uint32_t *src = srcp;
10881 uint32_t *dest = destp;
10882 uint32_t ldata;
10883 int i;
10884
10885 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10886 ldata = *src;
10887 ldata = le32_to_cpu(ldata);
10888 *dest = ldata;
10889 src++;
10890 dest++;
10891 }
10892}
10893
e59058c4 10894
a0c87cbd
JS
10895/**
10896 * lpfc_sli_bemem_bcopy - SLI memory copy function
10897 * @srcp: Source memory pointer.
10898 * @destp: Destination memory pointer.
10899 * @cnt: Number of words required to be copied.
10900 *
10901 * This function is used for copying data between a data structure
10902 * with big endian representation to local endianness.
10903 * This function can be called with or without lock.
10904 **/
10905void
10906lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10907{
10908 uint32_t *src = srcp;
10909 uint32_t *dest = destp;
10910 uint32_t ldata;
10911 int i;
10912
10913 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10914 ldata = *src;
10915 ldata = be32_to_cpu(ldata);
10916 *dest = ldata;
10917 src++;
10918 dest++;
10919 }
10920}
10921
e59058c4 10922/**
3621a710 10923 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
10924 * @phba: Pointer to HBA context object.
10925 * @pring: Pointer to driver SLI ring object.
10926 * @mp: Pointer to driver buffer object.
10927 *
10928 * This function is called with no lock held.
10929 * It always return zero after adding the buffer to the postbufq
10930 * buffer list.
10931 **/
dea3101e 10932int
2e0fef85
JS
10933lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10934 struct lpfc_dmabuf *mp)
dea3101e 10935{
10936 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10937 later */
2e0fef85 10938 spin_lock_irq(&phba->hbalock);
dea3101e 10939 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 10940 pring->postbufq_cnt++;
2e0fef85 10941 spin_unlock_irq(&phba->hbalock);
dea3101e 10942 return 0;
10943}
10944
e59058c4 10945/**
3621a710 10946 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
10947 * @phba: Pointer to HBA context object.
10948 *
10949 * When HBQ is enabled, buffers are searched based on tags. This function
10950 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10951 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10952 * does not conflict with tags of buffer posted for unsolicited events.
10953 * The function returns the allocated tag. The function is called with
10954 * no locks held.
10955 **/
76bb24ef
JS
10956uint32_t
10957lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10958{
10959 spin_lock_irq(&phba->hbalock);
10960 phba->buffer_tag_count++;
10961 /*
10962 * Always set the QUE_BUFTAG_BIT to distiguish between
10963 * a tag assigned by HBQ.
10964 */
10965 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10966 spin_unlock_irq(&phba->hbalock);
10967 return phba->buffer_tag_count;
10968}
10969
e59058c4 10970/**
3621a710 10971 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
10972 * @phba: Pointer to HBA context object.
10973 * @pring: Pointer to driver SLI ring object.
10974 * @tag: Buffer tag.
10975 *
10976 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10977 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10978 * iocb is posted to the response ring with the tag of the buffer.
10979 * This function searches the pring->postbufq list using the tag
10980 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10981 * iocb. If the buffer is found then lpfc_dmabuf object of the
10982 * buffer is returned to the caller else NULL is returned.
10983 * This function is called with no lock held.
10984 **/
76bb24ef
JS
10985struct lpfc_dmabuf *
10986lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10987 uint32_t tag)
10988{
10989 struct lpfc_dmabuf *mp, *next_mp;
10990 struct list_head *slp = &pring->postbufq;
10991
25985edc 10992 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
10993 spin_lock_irq(&phba->hbalock);
10994 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10995 if (mp->buffer_tag == tag) {
10996 list_del_init(&mp->list);
10997 pring->postbufq_cnt--;
10998 spin_unlock_irq(&phba->hbalock);
10999 return mp;
11000 }
11001 }
11002
11003 spin_unlock_irq(&phba->hbalock);
11004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 11005 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
11006 "ring %d Data x%lx x%p x%p x%x\n",
11007 pring->ringno, (unsigned long) tag,
11008 slp->next, slp->prev, pring->postbufq_cnt);
11009
11010 return NULL;
11011}
dea3101e 11012
e59058c4 11013/**
3621a710 11014 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
11015 * @phba: Pointer to HBA context object.
11016 * @pring: Pointer to driver SLI ring object.
11017 * @phys: DMA address of the buffer.
11018 *
11019 * This function searches the buffer list using the dma_address
11020 * of unsolicited event to find the driver's lpfc_dmabuf object
11021 * corresponding to the dma_address. The function returns the
11022 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11023 * This function is called by the ct and els unsolicited event
11024 * handlers to get the buffer associated with the unsolicited
11025 * event.
11026 *
11027 * This function is called with no lock held.
11028 **/
dea3101e 11029struct lpfc_dmabuf *
11030lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11031 dma_addr_t phys)
11032{
11033 struct lpfc_dmabuf *mp, *next_mp;
11034 struct list_head *slp = &pring->postbufq;
11035
25985edc 11036 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 11037 spin_lock_irq(&phba->hbalock);
dea3101e 11038 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11039 if (mp->phys == phys) {
11040 list_del_init(&mp->list);
11041 pring->postbufq_cnt--;
2e0fef85 11042 spin_unlock_irq(&phba->hbalock);
dea3101e 11043 return mp;
11044 }
11045 }
11046
2e0fef85 11047 spin_unlock_irq(&phba->hbalock);
dea3101e 11048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 11049 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 11050 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 11051 pring->ringno, (unsigned long long)phys,
dea3101e 11052 slp->next, slp->prev, pring->postbufq_cnt);
11053 return NULL;
11054}
11055
e59058c4 11056/**
3621a710 11057 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
11058 * @phba: Pointer to HBA context object.
11059 * @cmdiocb: Pointer to driver command iocb object.
11060 * @rspiocb: Pointer to driver response iocb object.
11061 *
11062 * This function is the completion handler for the abort iocbs for
11063 * ELS commands. This function is called from the ELS ring event
11064 * handler with no lock held. This function frees memory resources
11065 * associated with the abort iocb.
11066 **/
dea3101e 11067static void
2e0fef85
JS
11068lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11069 struct lpfc_iocbq *rspiocb)
dea3101e 11070{
2e0fef85 11071 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 11072 uint16_t abort_iotag, abort_context;
ff78d8f9 11073 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
11074
11075 if (irsp->ulpStatus) {
ff78d8f9
JS
11076
11077 /*
11078 * Assume that the port already completed and returned, or
11079 * will return the iocb. Just Log the message.
11080 */
2680eeaa
JS
11081 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11082 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11083
2e0fef85 11084 spin_lock_irq(&phba->hbalock);
45ed1190 11085 if (phba->sli_rev < LPFC_SLI_REV4) {
faa832e9
JS
11086 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11087 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11088 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11089 spin_unlock_irq(&phba->hbalock);
11090 goto release_iocb;
11091 }
45ed1190
JS
11092 if (abort_iotag != 0 &&
11093 abort_iotag <= phba->sli.last_iotag)
11094 abort_iocb =
11095 phba->sli.iocbq_lookup[abort_iotag];
11096 } else
11097 /* For sli4 the abort_tag is the XRI,
11098 * so the abort routine puts the iotag of the iocb
11099 * being aborted in the context field of the abort
11100 * IOCB.
11101 */
11102 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 11103
2a9bf3d0
JS
11104 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11105 "0327 Cannot abort els iocb %p "
11106 "with tag %x context %x, abort status %x, "
11107 "abort code %x\n",
11108 abort_iocb, abort_iotag, abort_context,
11109 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 11110
ff78d8f9 11111 spin_unlock_irq(&phba->hbalock);
2680eeaa 11112 }
faa832e9 11113release_iocb:
604a3e30 11114 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 11115 return;
11116}
11117
e59058c4 11118/**
3621a710 11119 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
11120 * @phba: Pointer to HBA context object.
11121 * @cmdiocb: Pointer to driver command iocb object.
11122 * @rspiocb: Pointer to driver response iocb object.
11123 *
11124 * The function is called from SLI ring event handler with no
11125 * lock held. This function is the completion handler for ELS commands
11126 * which are aborted. The function frees memory resources used for
11127 * the aborted ELS commands.
11128 **/
92d7f7b0
JS
11129static void
11130lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11131 struct lpfc_iocbq *rspiocb)
11132{
11133 IOCB_t *irsp = &rspiocb->iocb;
11134
11135 /* ELS cmd tag <ulpIoTag> completes */
11136 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 11137 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 11138 "x%x x%x x%x\n",
e8b62011 11139 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 11140 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
11141 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11142 lpfc_ct_free_iocb(phba, cmdiocb);
11143 else
11144 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
11145 return;
11146}
11147
e59058c4 11148/**
5af5eee7 11149 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
11150 * @phba: Pointer to HBA context object.
11151 * @pring: Pointer to driver SLI ring object.
11152 * @cmdiocb: Pointer to driver command iocb object.
11153 *
5af5eee7
JS
11154 * This function issues an abort iocb for the provided command iocb down to
11155 * the port. Other than the case the outstanding command iocb is an abort
11156 * request, this function issues abort out unconditionally. This function is
11157 * called with hbalock held. The function returns 0 when it fails due to
11158 * memory allocation failure or when the command iocb is an abort request.
e59058c4 11159 **/
5af5eee7
JS
11160static int
11161lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 11162 struct lpfc_iocbq *cmdiocb)
dea3101e 11163{
2e0fef85 11164 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 11165 struct lpfc_iocbq *abtsiocbp;
dea3101e 11166 IOCB_t *icmd = NULL;
11167 IOCB_t *iabt = NULL;
5af5eee7 11168 int retval;
7e56aa25 11169 unsigned long iflags;
faa832e9 11170 struct lpfc_nodelist *ndlp;
07951076 11171
1c2ba475
JT
11172 lockdep_assert_held(&phba->hbalock);
11173
92d7f7b0
JS
11174 /*
11175 * There are certain command types we don't want to abort. And we
11176 * don't want to abort commands that are already in the process of
11177 * being aborted.
07951076
JS
11178 */
11179 icmd = &cmdiocb->iocb;
2e0fef85 11180 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
11181 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11182 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
11183 return 0;
11184
dea3101e 11185 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 11186 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 11187 if (abtsiocbp == NULL)
11188 return 0;
dea3101e 11189
07951076 11190 /* This signals the response to set the correct status
341af102 11191 * before calling the completion handler
07951076
JS
11192 */
11193 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11194
dea3101e 11195 iabt = &abtsiocbp->iocb;
07951076
JS
11196 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11197 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 11198 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 11199 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190 11200 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
faa832e9 11201 } else {
da0436e9 11202 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
faa832e9
JS
11203 if (pring->ringno == LPFC_ELS_RING) {
11204 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11205 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11206 }
11207 }
07951076
JS
11208 iabt->ulpLe = 1;
11209 iabt->ulpClass = icmd->ulpClass;
dea3101e 11210
5ffc266e 11211 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11212 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
11213 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11214 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11215 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11216 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11217
2e0fef85 11218 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
11219 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11220 else
11221 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 11222
07951076 11223 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 11224 abtsiocbp->vport = vport;
5b8bd0c9 11225
e8b62011
JS
11226 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11227 "0339 Abort xri x%x, original iotag x%x, "
11228 "abort cmd iotag x%x\n",
2a9bf3d0 11229 iabt->un.acxri.abortIoTag,
e8b62011 11230 iabt->un.acxri.abortContextTag,
2a9bf3d0 11231 abtsiocbp->iotag);
7e56aa25
JS
11232
11233 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11234 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11235 if (unlikely(pring == NULL))
9bd2bff5 11236 return 0;
7e56aa25
JS
11237 /* Note: both hbalock and ring_lock need to be set here */
11238 spin_lock_irqsave(&pring->ring_lock, iflags);
11239 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11240 abtsiocbp, 0);
11241 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11242 } else {
11243 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11244 abtsiocbp, 0);
11245 }
dea3101e 11246
d7c255b2
JS
11247 if (retval)
11248 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
11249
11250 /*
11251 * Caller to this routine should check for IOCB_ERROR
11252 * and handle it properly. This routine no longer removes
11253 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11254 */
11255 return retval;
11256}
11257
11258/**
11259 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11260 * @phba: Pointer to HBA context object.
11261 * @pring: Pointer to driver SLI ring object.
11262 * @cmdiocb: Pointer to driver command iocb object.
11263 *
11264 * This function issues an abort iocb for the provided command iocb. In case
11265 * of unloading, the abort iocb will not be issued to commands on the ELS
11266 * ring. Instead, the callback function shall be changed to those commands
11267 * so that nothing happens when them finishes. This function is called with
11268 * hbalock held. The function returns 0 when the command iocb is an abort
11269 * request.
11270 **/
11271int
11272lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11273 struct lpfc_iocbq *cmdiocb)
11274{
11275 struct lpfc_vport *vport = cmdiocb->vport;
11276 int retval = IOCB_ERROR;
11277 IOCB_t *icmd = NULL;
11278
1c2ba475
JT
11279 lockdep_assert_held(&phba->hbalock);
11280
5af5eee7
JS
11281 /*
11282 * There are certain command types we don't want to abort. And we
11283 * don't want to abort commands that are already in the process of
11284 * being aborted.
11285 */
11286 icmd = &cmdiocb->iocb;
11287 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11288 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11289 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11290 return 0;
11291
1234a6d5
DK
11292 if (!pring) {
11293 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11294 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11295 else
11296 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11297 goto abort_iotag_exit;
11298 }
11299
5af5eee7
JS
11300 /*
11301 * If we're unloading, don't abort iocb on the ELS ring, but change
11302 * the callback so that nothing happens when it finishes.
11303 */
11304 if ((vport->load_flag & FC_UNLOADING) &&
11305 (pring->ringno == LPFC_ELS_RING)) {
11306 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11307 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11308 else
11309 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11310 goto abort_iotag_exit;
11311 }
11312
11313 /* Now, we try to issue the abort to the cmdiocb out */
11314 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11315
07951076 11316abort_iotag_exit:
2e0fef85
JS
11317 /*
11318 * Caller to this routine should check for IOCB_ERROR
11319 * and handle it properly. This routine no longer removes
11320 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 11321 */
2e0fef85 11322 return retval;
dea3101e 11323}
11324
895427bd
JS
11325/**
11326 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11327 * @phba: Pointer to HBA context object.
11328 * @pring: Pointer to driver SLI ring object.
11329 * @cmdiocb: Pointer to driver command iocb object.
11330 *
11331 * This function issues an abort iocb for the provided command iocb down to
11332 * the port. Other than the case the outstanding command iocb is an abort
11333 * request, this function issues abort out unconditionally. This function is
11334 * called with hbalock held. The function returns 0 when it fails due to
11335 * memory allocation failure or when the command iocb is an abort request.
11336 **/
11337static int
11338lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11339 struct lpfc_iocbq *cmdiocb)
11340{
11341 struct lpfc_vport *vport = cmdiocb->vport;
11342 struct lpfc_iocbq *abtsiocbp;
205e8240 11343 union lpfc_wqe128 *abts_wqe;
895427bd 11344 int retval;
1fbf9742 11345 int idx = cmdiocb->hba_wqidx;
895427bd
JS
11346
11347 /*
11348 * There are certain command types we don't want to abort. And we
11349 * don't want to abort commands that are already in the process of
11350 * being aborted.
11351 */
11352 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11353 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11354 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11355 return 0;
11356
11357 /* issue ABTS for this io based on iotag */
11358 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11359 if (abtsiocbp == NULL)
11360 return 0;
11361
11362 /* This signals the response to set the correct status
11363 * before calling the completion handler
11364 */
11365 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11366
11367 /* Complete prepping the abort wqe and issue to the FW. */
11368 abts_wqe = &abtsiocbp->wqe;
895427bd 11369
1c36833d
JS
11370 /* Clear any stale WQE contents */
11371 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11372 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
895427bd
JS
11373
11374 /* word 7 */
895427bd
JS
11375 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11376 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11377 cmdiocb->iocb.ulpClass);
11378
11379 /* word 8 - tell the FW to abort the IO associated with this
11380 * outstanding exchange ID.
11381 */
11382 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11383
11384 /* word 9 - this is the iotag for the abts_wqe completion. */
11385 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11386 abtsiocbp->iotag);
11387
11388 /* word 10 */
895427bd
JS
11389 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11390 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11391
11392 /* word 11 */
11393 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11394 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11395 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11396
11397 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11398 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11399 abtsiocbp->vport = vport;
01649561 11400 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1fbf9742
JS
11401 retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
11402 abtsiocbp);
cd22d605 11403 if (retval) {
895427bd
JS
11404 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11405 "6147 Failed abts issue_wqe with status x%x "
11406 "for oxid x%x\n",
11407 retval, cmdiocb->sli4_xritag);
11408 lpfc_sli_release_iocbq(phba, abtsiocbp);
11409 return retval;
11410 }
11411
11412 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11413 "6148 Drv Abort NVME Request Issued for "
11414 "ox_id x%x on reqtag x%x\n",
11415 cmdiocb->sli4_xritag,
11416 abtsiocbp->iotag);
11417
11418 return retval;
11419}
11420
5af5eee7
JS
11421/**
11422 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11423 * @phba: pointer to lpfc HBA data structure.
11424 *
11425 * This routine will abort all pending and outstanding iocbs to an HBA.
11426 **/
11427void
11428lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11429{
11430 struct lpfc_sli *psli = &phba->sli;
11431 struct lpfc_sli_ring *pring;
895427bd 11432 struct lpfc_queue *qp = NULL;
5af5eee7
JS
11433 int i;
11434
895427bd
JS
11435 if (phba->sli_rev != LPFC_SLI_REV4) {
11436 for (i = 0; i < psli->num_rings; i++) {
11437 pring = &psli->sli3_ring[i];
11438 lpfc_sli_abort_iocb_ring(phba, pring);
11439 }
11440 return;
11441 }
11442 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11443 pring = qp->pring;
11444 if (!pring)
11445 continue;
db55fba8 11446 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
11447 }
11448}
11449
e59058c4 11450/**
3621a710 11451 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
11452 * @iocbq: Pointer to driver iocb object.
11453 * @vport: Pointer to driver virtual port object.
11454 * @tgt_id: SCSI ID of the target.
11455 * @lun_id: LUN ID of the scsi device.
11456 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11457 *
3621a710 11458 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
11459 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11460 * 0 if the filtering criteria is met for the given iocb and will return
11461 * 1 if the filtering criteria is not met.
11462 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11463 * given iocb is for the SCSI device specified by vport, tgt_id and
11464 * lun_id parameter.
11465 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11466 * given iocb is for the SCSI target specified by vport and tgt_id
11467 * parameters.
11468 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11469 * given iocb is for the SCSI host associated with the given vport.
11470 * This function is called with no locks held.
11471 **/
dea3101e 11472static int
51ef4c26
JS
11473lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11474 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 11475 lpfc_ctx_cmd ctx_cmd)
dea3101e 11476{
c490850a 11477 struct lpfc_io_buf *lpfc_cmd;
dea3101e 11478 int rc = 1;
11479
b0e83012 11480 if (iocbq->vport != vport)
0bd4ca25
JSEC
11481 return rc;
11482
b0e83012
JS
11483 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11484 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
51ef4c26
JS
11485 return rc;
11486
c490850a 11487 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
0bd4ca25 11488
495a714c 11489 if (lpfc_cmd->pCmd == NULL)
dea3101e 11490 return rc;
11491
11492 switch (ctx_cmd) {
11493 case LPFC_CTX_LUN:
b0e83012 11494 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c
JS
11495 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11496 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 11497 rc = 0;
11498 break;
11499 case LPFC_CTX_TGT:
b0e83012 11500 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c 11501 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 11502 rc = 0;
11503 break;
dea3101e 11504 case LPFC_CTX_HOST:
11505 rc = 0;
11506 break;
11507 default:
11508 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 11509 __func__, ctx_cmd);
dea3101e 11510 break;
11511 }
11512
11513 return rc;
11514}
11515
e59058c4 11516/**
3621a710 11517 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
11518 * @vport: Pointer to virtual port.
11519 * @tgt_id: SCSI ID of the target.
11520 * @lun_id: LUN ID of the scsi device.
11521 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11522 *
11523 * This function returns number of FCP commands pending for the vport.
11524 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11525 * commands pending on the vport associated with SCSI device specified
11526 * by tgt_id and lun_id parameters.
11527 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11528 * commands pending on the vport associated with SCSI target specified
11529 * by tgt_id parameter.
11530 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11531 * commands pending on the vport.
11532 * This function returns the number of iocbs which satisfy the filter.
11533 * This function is called without any lock held.
11534 **/
dea3101e 11535int
51ef4c26
JS
11536lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11537 lpfc_ctx_cmd ctx_cmd)
dea3101e 11538{
51ef4c26 11539 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11540 struct lpfc_iocbq *iocbq;
11541 int sum, i;
dea3101e 11542
31979008 11543 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
11544 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11545 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11546
51ef4c26
JS
11547 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11548 ctx_cmd) == 0)
0bd4ca25 11549 sum++;
dea3101e 11550 }
31979008 11551 spin_unlock_irq(&phba->hbalock);
0bd4ca25 11552
dea3101e 11553 return sum;
11554}
11555
e59058c4 11556/**
3621a710 11557 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
11558 * @phba: Pointer to HBA context object
11559 * @cmdiocb: Pointer to command iocb object.
11560 * @rspiocb: Pointer to response iocb object.
11561 *
11562 * This function is called when an aborted FCP iocb completes. This
11563 * function is called by the ring event handler with no lock held.
11564 * This function frees the iocb.
11565 **/
5eb95af0 11566void
2e0fef85
JS
11567lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11568 struct lpfc_iocbq *rspiocb)
5eb95af0 11569{
cb69f7de 11570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 11571 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
11572 "original iotag x%x, abort cmd iotag x%x "
11573 "status 0x%x, reason 0x%x\n",
11574 cmdiocb->iocb.un.acxri.abortContextTag,
11575 cmdiocb->iocb.un.acxri.abortIoTag,
11576 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11577 rspiocb->iocb.un.ulpWord[4]);
604a3e30 11578 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
11579 return;
11580}
11581
e59058c4 11582/**
3621a710 11583 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
11584 * @vport: Pointer to virtual port.
11585 * @pring: Pointer to driver SLI ring object.
11586 * @tgt_id: SCSI ID of the target.
11587 * @lun_id: LUN ID of the scsi device.
11588 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11589 *
11590 * This function sends an abort command for every SCSI command
11591 * associated with the given virtual port pending on the ring
11592 * filtered by lpfc_sli_validate_fcp_iocb function.
11593 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11594 * FCP iocbs associated with lun specified by tgt_id and lun_id
11595 * parameters
11596 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11597 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11598 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11599 * FCP iocbs associated with virtual port.
11600 * This function returns number of iocbs it failed to abort.
11601 * This function is called with no locks held.
11602 **/
dea3101e 11603int
51ef4c26
JS
11604lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11605 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 11606{
51ef4c26 11607 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11608 struct lpfc_iocbq *iocbq;
11609 struct lpfc_iocbq *abtsiocb;
ecbb227e 11610 struct lpfc_sli_ring *pring_s4;
dea3101e 11611 IOCB_t *cmd = NULL;
dea3101e 11612 int errcnt = 0, ret_val = 0;
0bd4ca25 11613 int i;
dea3101e 11614
b0e83012
JS
11615 /* all I/Os are in process of being flushed */
11616 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11617 return errcnt;
11618
0bd4ca25
JSEC
11619 for (i = 1; i <= phba->sli.last_iotag; i++) {
11620 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11621
51ef4c26 11622 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 11623 abort_cmd) != 0)
dea3101e 11624 continue;
11625
afbd8d88
JS
11626 /*
11627 * If the iocbq is already being aborted, don't take a second
11628 * action, but do count it.
11629 */
11630 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11631 continue;
11632
dea3101e 11633 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 11634 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 11635 if (abtsiocb == NULL) {
11636 errcnt++;
11637 continue;
11638 }
dea3101e 11639
afbd8d88
JS
11640 /* indicate the IO is being aborted by the driver. */
11641 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11642
0bd4ca25 11643 cmd = &iocbq->iocb;
dea3101e 11644 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11645 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
11646 if (phba->sli_rev == LPFC_SLI_REV4)
11647 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11648 else
11649 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 11650 abtsiocb->iocb.ulpLe = 1;
11651 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 11652 abtsiocb->vport = vport;
dea3101e 11653
5ffc266e 11654 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11655 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
11656 if (iocbq->iocb_flag & LPFC_IO_FCP)
11657 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11658 if (iocbq->iocb_flag & LPFC_IO_FOF)
11659 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11660
2e0fef85 11661 if (lpfc_is_link_up(phba))
dea3101e 11662 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11663 else
11664 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11665
5eb95af0
JSEC
11666 /* Setup callback routine and issue the command. */
11667 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ecbb227e
JS
11668 if (phba->sli_rev == LPFC_SLI_REV4) {
11669 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11670 if (!pring_s4)
11671 continue;
11672 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11673 abtsiocb, 0);
11674 } else
11675 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11676 abtsiocb, 0);
dea3101e 11677 if (ret_val == IOCB_ERROR) {
604a3e30 11678 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 11679 errcnt++;
11680 continue;
11681 }
11682 }
11683
11684 return errcnt;
11685}
11686
98912dda
JS
11687/**
11688 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11689 * @vport: Pointer to virtual port.
11690 * @pring: Pointer to driver SLI ring object.
11691 * @tgt_id: SCSI ID of the target.
11692 * @lun_id: LUN ID of the scsi device.
11693 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11694 *
11695 * This function sends an abort command for every SCSI command
11696 * associated with the given virtual port pending on the ring
11697 * filtered by lpfc_sli_validate_fcp_iocb function.
11698 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11699 * FCP iocbs associated with lun specified by tgt_id and lun_id
11700 * parameters
11701 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11702 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11703 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11704 * FCP iocbs associated with virtual port.
11705 * This function returns number of iocbs it aborted .
11706 * This function is called with no locks held right after a taskmgmt
11707 * command is sent.
11708 **/
11709int
11710lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11711 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11712{
11713 struct lpfc_hba *phba = vport->phba;
c490850a 11714 struct lpfc_io_buf *lpfc_cmd;
98912dda 11715 struct lpfc_iocbq *abtsiocbq;
8c50d25c 11716 struct lpfc_nodelist *ndlp;
98912dda
JS
11717 struct lpfc_iocbq *iocbq;
11718 IOCB_t *icmd;
11719 int sum, i, ret_val;
11720 unsigned long iflags;
c2017260 11721 struct lpfc_sli_ring *pring_s4 = NULL;
98912dda 11722
59c68eaa 11723 spin_lock_irqsave(&phba->hbalock, iflags);
98912dda
JS
11724
11725 /* all I/Os are in process of being flushed */
11726 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
59c68eaa 11727 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11728 return 0;
11729 }
11730 sum = 0;
11731
11732 for (i = 1; i <= phba->sli.last_iotag; i++) {
11733 iocbq = phba->sli.iocbq_lookup[i];
11734
11735 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11736 cmd) != 0)
11737 continue;
11738
c2017260
JS
11739 /* Guard against IO completion being called at same time */
11740 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11741 spin_lock(&lpfc_cmd->buf_lock);
11742
11743 if (!lpfc_cmd->pCmd) {
11744 spin_unlock(&lpfc_cmd->buf_lock);
11745 continue;
11746 }
11747
11748 if (phba->sli_rev == LPFC_SLI_REV4) {
11749 pring_s4 =
11750 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11751 if (!pring_s4) {
11752 spin_unlock(&lpfc_cmd->buf_lock);
11753 continue;
11754 }
11755 /* Note: both hbalock and ring_lock must be set here */
11756 spin_lock(&pring_s4->ring_lock);
11757 }
11758
98912dda
JS
11759 /*
11760 * If the iocbq is already being aborted, don't take a second
11761 * action, but do count it.
11762 */
c2017260
JS
11763 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11764 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11765 if (phba->sli_rev == LPFC_SLI_REV4)
11766 spin_unlock(&pring_s4->ring_lock);
11767 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11768 continue;
c2017260 11769 }
98912dda
JS
11770
11771 /* issue ABTS for this IOCB based on iotag */
11772 abtsiocbq = __lpfc_sli_get_iocbq(phba);
c2017260
JS
11773 if (!abtsiocbq) {
11774 if (phba->sli_rev == LPFC_SLI_REV4)
11775 spin_unlock(&pring_s4->ring_lock);
11776 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11777 continue;
c2017260 11778 }
98912dda
JS
11779
11780 icmd = &iocbq->iocb;
11781 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11782 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11783 if (phba->sli_rev == LPFC_SLI_REV4)
11784 abtsiocbq->iocb.un.acxri.abortIoTag =
11785 iocbq->sli4_xritag;
11786 else
11787 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11788 abtsiocbq->iocb.ulpLe = 1;
11789 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11790 abtsiocbq->vport = vport;
11791
11792 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11793 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11794 if (iocbq->iocb_flag & LPFC_IO_FCP)
11795 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11796 if (iocbq->iocb_flag & LPFC_IO_FOF)
11797 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11798
8c50d25c
JS
11799 ndlp = lpfc_cmd->rdata->pnode;
11800
11801 if (lpfc_is_link_up(phba) &&
11802 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11803 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11804 else
11805 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11806
11807 /* Setup callback routine and issue the command. */
11808 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11809
11810 /*
11811 * Indicate the IO is being aborted by the driver and set
11812 * the caller's flag into the aborted IO.
11813 */
11814 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11815
11816 if (phba->sli_rev == LPFC_SLI_REV4) {
98912dda
JS
11817 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11818 abtsiocbq, 0);
59c68eaa 11819 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
11820 } else {
11821 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11822 abtsiocbq, 0);
11823 }
11824
c2017260 11825 spin_unlock(&lpfc_cmd->buf_lock);
98912dda
JS
11826
11827 if (ret_val == IOCB_ERROR)
11828 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11829 else
11830 sum++;
11831 }
59c68eaa 11832 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11833 return sum;
11834}
11835
e59058c4 11836/**
3621a710 11837 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11838 * @phba: Pointer to HBA context object.
11839 * @cmdiocbq: Pointer to command iocb.
11840 * @rspiocbq: Pointer to response iocb.
11841 *
11842 * This function is the completion handler for iocbs issued using
11843 * lpfc_sli_issue_iocb_wait function. This function is called by the
11844 * ring event handler function without any lock held. This function
11845 * can be called from both worker thread context and interrupt
11846 * context. This function also can be called from other thread which
11847 * cleans up the SLI layer objects.
11848 * This function copy the contents of the response iocb to the
11849 * response iocb memory object provided by the caller of
11850 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11851 * sleeps for the iocb completion.
11852 **/
68876920
JSEC
11853static void
11854lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11855 struct lpfc_iocbq *cmdiocbq,
11856 struct lpfc_iocbq *rspiocbq)
dea3101e 11857{
68876920
JSEC
11858 wait_queue_head_t *pdone_q;
11859 unsigned long iflags;
c490850a 11860 struct lpfc_io_buf *lpfc_cmd;
dea3101e 11861
2e0fef85 11862 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11863 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11864
11865 /*
11866 * A time out has occurred for the iocb. If a time out
11867 * completion handler has been supplied, call it. Otherwise,
11868 * just free the iocbq.
11869 */
11870
11871 spin_unlock_irqrestore(&phba->hbalock, iflags);
11872 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11873 cmdiocbq->wait_iocb_cmpl = NULL;
11874 if (cmdiocbq->iocb_cmpl)
11875 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11876 else
11877 lpfc_sli_release_iocbq(phba, cmdiocbq);
11878 return;
11879 }
11880
68876920
JSEC
11881 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11882 if (cmdiocbq->context2 && rspiocbq)
11883 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11884 &rspiocbq->iocb, sizeof(IOCB_t));
11885
0f65ff68
JS
11886 /* Set the exchange busy flag for task management commands */
11887 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11888 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
c490850a 11889 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
0f65ff68
JS
11890 cur_iocbq);
11891 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11892 }
11893
68876920 11894 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11895 if (pdone_q)
11896 wake_up(pdone_q);
858c9f6c 11897 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 11898 return;
11899}
11900
d11e31dd
JS
11901/**
11902 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11903 * @phba: Pointer to HBA context object..
11904 * @piocbq: Pointer to command iocb.
11905 * @flag: Flag to test.
11906 *
11907 * This routine grabs the hbalock and then test the iocb_flag to
11908 * see if the passed in flag is set.
11909 * Returns:
11910 * 1 if flag is set.
11911 * 0 if flag is not set.
11912 **/
11913static int
11914lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11915 struct lpfc_iocbq *piocbq, uint32_t flag)
11916{
11917 unsigned long iflags;
11918 int ret;
11919
11920 spin_lock_irqsave(&phba->hbalock, iflags);
11921 ret = piocbq->iocb_flag & flag;
11922 spin_unlock_irqrestore(&phba->hbalock, iflags);
11923 return ret;
11924
11925}
11926
e59058c4 11927/**
3621a710 11928 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11929 * @phba: Pointer to HBA context object..
11930 * @pring: Pointer to sli ring.
11931 * @piocb: Pointer to command iocb.
11932 * @prspiocbq: Pointer to response iocb.
11933 * @timeout: Timeout in number of seconds.
11934 *
11935 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11936 * iocb to complete. The iocb_cmpl field of the shall be used
11937 * to handle iocbs which time out. If the field is NULL, the
11938 * function shall free the iocbq structure. If more clean up is
11939 * needed, the caller is expected to provide a completion function
11940 * that will provide the needed clean up. If the iocb command is
11941 * not completed within timeout seconds, the function will either
11942 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11943 * completion function set in the iocb_cmpl field and then return
11944 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11945 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11946 * The function waits for the iocb completion using an
11947 * non-interruptible wait.
11948 * This function will sleep while waiting for iocb completion.
11949 * So, this function should not be called from any context which
11950 * does not allow sleeping. Due to the same reason, this function
11951 * cannot be called with interrupt disabled.
11952 * This function assumes that the iocb completions occur while
11953 * this function sleep. So, this function cannot be called from
11954 * the thread which process iocb completion for this ring.
11955 * This function clears the iocb_flag of the iocb object before
11956 * issuing the iocb and the iocb completion handler sets this
11957 * flag and wakes this thread when the iocb completes.
11958 * The contents of the response iocb will be copied to prspiocbq
11959 * by the completion handler when the command completes.
11960 * This function returns IOCB_SUCCESS when success.
11961 * This function is called with no lock held.
11962 **/
dea3101e 11963int
2e0fef85 11964lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11965 uint32_t ring_number,
2e0fef85
JS
11966 struct lpfc_iocbq *piocb,
11967 struct lpfc_iocbq *prspiocbq,
68876920 11968 uint32_t timeout)
dea3101e 11969{
7259f0d0 11970 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
11971 long timeleft, timeout_req = 0;
11972 int retval = IOCB_SUCCESS;
875fbdfe 11973 uint32_t creg_val;
0e9bb8d7
JS
11974 struct lpfc_iocbq *iocb;
11975 int txq_cnt = 0;
11976 int txcmplq_cnt = 0;
895427bd 11977 struct lpfc_sli_ring *pring;
5a0916b4
JS
11978 unsigned long iflags;
11979 bool iocb_completed = true;
11980
895427bd
JS
11981 if (phba->sli_rev >= LPFC_SLI_REV4)
11982 pring = lpfc_sli4_calc_ring(phba, piocb);
11983 else
11984 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 11985 /*
68876920
JSEC
11986 * If the caller has provided a response iocbq buffer, then context2
11987 * is NULL or its an error.
dea3101e 11988 */
68876920
JSEC
11989 if (prspiocbq) {
11990 if (piocb->context2)
11991 return IOCB_ERROR;
11992 piocb->context2 = prspiocbq;
dea3101e 11993 }
11994
5a0916b4 11995 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
11996 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11997 piocb->context_un.wait_queue = &done_q;
5a0916b4 11998 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 11999
875fbdfe 12000 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12001 if (lpfc_readl(phba->HCregaddr, &creg_val))
12002 return IOCB_ERROR;
875fbdfe
JSEC
12003 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12004 writel(creg_val, phba->HCregaddr);
12005 readl(phba->HCregaddr); /* flush */
12006 }
12007
2a9bf3d0
JS
12008 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12009 SLI_IOCB_RET_IOCB);
68876920 12010 if (retval == IOCB_SUCCESS) {
256ec0d0 12011 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 12012 timeleft = wait_event_timeout(done_q,
d11e31dd 12013 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 12014 timeout_req);
5a0916b4
JS
12015 spin_lock_irqsave(&phba->hbalock, iflags);
12016 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12017
12018 /*
12019 * IOCB timed out. Inform the wake iocb wait
12020 * completion function and set local status
12021 */
dea3101e 12022
5a0916b4
JS
12023 iocb_completed = false;
12024 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12025 }
12026 spin_unlock_irqrestore(&phba->hbalock, iflags);
12027 if (iocb_completed) {
7054a606 12028 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 12029 "0331 IOCB wake signaled\n");
53151bbb
JS
12030 /* Note: we are not indicating if the IOCB has a success
12031 * status or not - that's for the caller to check.
12032 * IOCB_SUCCESS means just that the command was sent and
12033 * completed. Not that it completed successfully.
12034 * */
7054a606 12035 } else if (timeleft == 0) {
68876920 12036 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
12037 "0338 IOCB wait timeout error - no "
12038 "wake response Data x%x\n", timeout);
68876920 12039 retval = IOCB_TIMEDOUT;
7054a606 12040 } else {
68876920 12041 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
12042 "0330 IOCB wake NOT set, "
12043 "Data x%x x%lx\n",
68876920
JSEC
12044 timeout, (timeleft / jiffies));
12045 retval = IOCB_TIMEDOUT;
dea3101e 12046 }
2a9bf3d0 12047 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
12048 if (phba->cfg_log_verbose & LOG_SLI) {
12049 list_for_each_entry(iocb, &pring->txq, list) {
12050 txq_cnt++;
12051 }
12052 list_for_each_entry(iocb, &pring->txcmplq, list) {
12053 txcmplq_cnt++;
12054 }
12055 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12056 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12057 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12058 }
2a9bf3d0 12059 return retval;
68876920
JSEC
12060 } else {
12061 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 12062 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 12063 retval);
68876920 12064 retval = IOCB_ERROR;
dea3101e 12065 }
12066
875fbdfe 12067 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12068 if (lpfc_readl(phba->HCregaddr, &creg_val))
12069 return IOCB_ERROR;
875fbdfe
JSEC
12070 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12071 writel(creg_val, phba->HCregaddr);
12072 readl(phba->HCregaddr); /* flush */
12073 }
12074
68876920
JSEC
12075 if (prspiocbq)
12076 piocb->context2 = NULL;
12077
12078 piocb->context_un.wait_queue = NULL;
12079 piocb->iocb_cmpl = NULL;
dea3101e 12080 return retval;
12081}
68876920 12082
e59058c4 12083/**
3621a710 12084 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
12085 * @phba: Pointer to HBA context object.
12086 * @pmboxq: Pointer to driver mailbox object.
12087 * @timeout: Timeout in number of seconds.
12088 *
12089 * This function issues the mailbox to firmware and waits for the
12090 * mailbox command to complete. If the mailbox command is not
12091 * completed within timeout seconds, it returns MBX_TIMEOUT.
12092 * The function waits for the mailbox completion using an
12093 * interruptible wait. If the thread is woken up due to a
12094 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12095 * should not free the mailbox resources, if this function returns
12096 * MBX_TIMEOUT.
12097 * This function will sleep while waiting for mailbox completion.
12098 * So, this function should not be called from any context which
12099 * does not allow sleeping. Due to the same reason, this function
12100 * cannot be called with interrupt disabled.
12101 * This function assumes that the mailbox completion occurs while
12102 * this function sleep. So, this function cannot be called from
12103 * the worker thread which processes mailbox completion.
12104 * This function is called in the context of HBA management
12105 * applications.
12106 * This function returns MBX_SUCCESS when successful.
12107 * This function is called with no lock held.
12108 **/
dea3101e 12109int
2e0fef85 12110lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 12111 uint32_t timeout)
12112{
e29d74f8 12113 struct completion mbox_done;
dea3101e 12114 int retval;
858c9f6c 12115 unsigned long flag;
dea3101e 12116
495a714c 12117 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 12118 /* setup wake call as IOCB callback */
12119 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea3101e 12120
e29d74f8
JS
12121 /* setup context3 field to pass wait_queue pointer to wake function */
12122 init_completion(&mbox_done);
12123 pmboxq->context3 = &mbox_done;
dea3101e 12124 /* now issue the command */
12125 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 12126 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
e29d74f8
JS
12127 wait_for_completion_timeout(&mbox_done,
12128 msecs_to_jiffies(timeout * 1000));
7054a606 12129
858c9f6c 12130 spin_lock_irqsave(&phba->hbalock, flag);
e29d74f8 12131 pmboxq->context3 = NULL;
7054a606
JS
12132 /*
12133 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12134 * else do not free the resources.
12135 */
d7c47992 12136 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 12137 retval = MBX_SUCCESS;
d7c47992 12138 } else {
7054a606 12139 retval = MBX_TIMEOUT;
858c9f6c
JS
12140 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12141 }
12142 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 12143 }
dea3101e 12144 return retval;
12145}
12146
e59058c4 12147/**
3772a991 12148 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
12149 * @phba: Pointer to HBA context.
12150 *
3772a991
JS
12151 * This function is called to shutdown the driver's mailbox sub-system.
12152 * It first marks the mailbox sub-system is in a block state to prevent
12153 * the asynchronous mailbox command from issued off the pending mailbox
12154 * command queue. If the mailbox command sub-system shutdown is due to
12155 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12156 * the mailbox sub-system flush routine to forcefully bring down the
12157 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12158 * as with offline or HBA function reset), this routine will wait for the
12159 * outstanding mailbox command to complete before invoking the mailbox
12160 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 12161 **/
3772a991 12162void
618a5230 12163lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 12164{
3772a991 12165 struct lpfc_sli *psli = &phba->sli;
3772a991 12166 unsigned long timeout;
b4c02652 12167
618a5230
JS
12168 if (mbx_action == LPFC_MBX_NO_WAIT) {
12169 /* delay 100ms for port state */
12170 msleep(100);
12171 lpfc_sli_mbox_sys_flush(phba);
12172 return;
12173 }
a183a15f 12174 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 12175
523128e5
JS
12176 /* Disable softirqs, including timers from obtaining phba->hbalock */
12177 local_bh_disable();
12178
3772a991
JS
12179 spin_lock_irq(&phba->hbalock);
12180 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 12181
3772a991 12182 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
12183 /* Determine how long we might wait for the active mailbox
12184 * command to be gracefully completed by firmware.
12185 */
a183a15f
JS
12186 if (phba->sli.mbox_active)
12187 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12188 phba->sli.mbox_active) *
12189 1000) + jiffies;
12190 spin_unlock_irq(&phba->hbalock);
12191
523128e5
JS
12192 /* Enable softirqs again, done with phba->hbalock */
12193 local_bh_enable();
12194
3772a991
JS
12195 while (phba->sli.mbox_active) {
12196 /* Check active mailbox complete status every 2ms */
12197 msleep(2);
12198 if (time_after(jiffies, timeout))
12199 /* Timeout, let the mailbox flush routine to
12200 * forcefully release active mailbox command
12201 */
12202 break;
12203 }
523128e5 12204 } else {
d7069f09
JS
12205 spin_unlock_irq(&phba->hbalock);
12206
523128e5
JS
12207 /* Enable softirqs again, done with phba->hbalock */
12208 local_bh_enable();
12209 }
12210
3772a991
JS
12211 lpfc_sli_mbox_sys_flush(phba);
12212}
ed957684 12213
3772a991
JS
12214/**
12215 * lpfc_sli_eratt_read - read sli-3 error attention events
12216 * @phba: Pointer to HBA context.
12217 *
12218 * This function is called to read the SLI3 device error attention registers
12219 * for possible error attention events. The caller must hold the hostlock
12220 * with spin_lock_irq().
12221 *
25985edc 12222 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
12223 * Register and returns 0 otherwise.
12224 **/
12225static int
12226lpfc_sli_eratt_read(struct lpfc_hba *phba)
12227{
12228 uint32_t ha_copy;
b4c02652 12229
3772a991 12230 /* Read chip Host Attention (HA) register */
9940b97b
JS
12231 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12232 goto unplug_err;
12233
3772a991
JS
12234 if (ha_copy & HA_ERATT) {
12235 /* Read host status register to retrieve error event */
9940b97b
JS
12236 if (lpfc_sli_read_hs(phba))
12237 goto unplug_err;
b4c02652 12238
3772a991
JS
12239 /* Check if there is a deferred error condition is active */
12240 if ((HS_FFER1 & phba->work_hs) &&
12241 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 12242 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 12243 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
12244 /* Clear all interrupt enable conditions */
12245 writel(0, phba->HCregaddr);
12246 readl(phba->HCregaddr);
12247 }
12248
12249 /* Set the driver HA work bitmap */
3772a991
JS
12250 phba->work_ha |= HA_ERATT;
12251 /* Indicate polling handles this ERATT */
12252 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
12253 return 1;
12254 }
12255 return 0;
9940b97b
JS
12256
12257unplug_err:
12258 /* Set the driver HS work bitmap */
12259 phba->work_hs |= UNPLUG_ERR;
12260 /* Set the driver HA work bitmap */
12261 phba->work_ha |= HA_ERATT;
12262 /* Indicate polling handles this ERATT */
12263 phba->hba_flag |= HBA_ERATT_HANDLED;
12264 return 1;
b4c02652
JS
12265}
12266
da0436e9
JS
12267/**
12268 * lpfc_sli4_eratt_read - read sli-4 error attention events
12269 * @phba: Pointer to HBA context.
12270 *
12271 * This function is called to read the SLI4 device error attention registers
12272 * for possible error attention events. The caller must hold the hostlock
12273 * with spin_lock_irq().
12274 *
25985edc 12275 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
12276 * Register and returns 0 otherwise.
12277 **/
12278static int
12279lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12280{
12281 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
12282 uint32_t if_type, portsmphr;
12283 struct lpfc_register portstat_reg;
da0436e9 12284
2fcee4bf
JS
12285 /*
12286 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
12287 * registers for error attention. This can be changed later.
12288 */
2fcee4bf
JS
12289 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12290 switch (if_type) {
12291 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
12292 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12293 &uerr_sta_lo) ||
12294 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12295 &uerr_sta_hi)) {
12296 phba->work_hs |= UNPLUG_ERR;
12297 phba->work_ha |= HA_ERATT;
12298 phba->hba_flag |= HBA_ERATT_HANDLED;
12299 return 1;
12300 }
2fcee4bf
JS
12301 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12302 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12304 "1423 HBA Unrecoverable error: "
12305 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12306 "ue_mask_lo_reg=0x%x, "
12307 "ue_mask_hi_reg=0x%x\n",
12308 uerr_sta_lo, uerr_sta_hi,
12309 phba->sli4_hba.ue_mask_lo,
12310 phba->sli4_hba.ue_mask_hi);
12311 phba->work_status[0] = uerr_sta_lo;
12312 phba->work_status[1] = uerr_sta_hi;
12313 phba->work_ha |= HA_ERATT;
12314 phba->hba_flag |= HBA_ERATT_HANDLED;
12315 return 1;
12316 }
12317 break;
12318 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 12319 case LPFC_SLI_INTF_IF_TYPE_6:
9940b97b
JS
12320 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12321 &portstat_reg.word0) ||
12322 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12323 &portsmphr)){
12324 phba->work_hs |= UNPLUG_ERR;
12325 phba->work_ha |= HA_ERATT;
12326 phba->hba_flag |= HBA_ERATT_HANDLED;
12327 return 1;
12328 }
2fcee4bf
JS
12329 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12330 phba->work_status[0] =
12331 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12332 phba->work_status[1] =
12333 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 12335 "2885 Port Status Event: "
2fcee4bf
JS
12336 "port status reg 0x%x, "
12337 "port smphr reg 0x%x, "
12338 "error 1=0x%x, error 2=0x%x\n",
12339 portstat_reg.word0,
12340 portsmphr,
12341 phba->work_status[0],
12342 phba->work_status[1]);
12343 phba->work_ha |= HA_ERATT;
12344 phba->hba_flag |= HBA_ERATT_HANDLED;
12345 return 1;
12346 }
12347 break;
12348 case LPFC_SLI_INTF_IF_TYPE_1:
12349 default:
a747c9ce 12350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
12351 "2886 HBA Error Attention on unsupported "
12352 "if type %d.", if_type);
a747c9ce 12353 return 1;
da0436e9 12354 }
2fcee4bf 12355
da0436e9
JS
12356 return 0;
12357}
12358
e59058c4 12359/**
3621a710 12360 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
12361 * @phba: Pointer to HBA context.
12362 *
3772a991 12363 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
12364 * error attention register bit for error attention events.
12365 *
25985edc 12366 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
12367 * Register and returns 0 otherwise.
12368 **/
12369int
12370lpfc_sli_check_eratt(struct lpfc_hba *phba)
12371{
12372 uint32_t ha_copy;
12373
12374 /* If somebody is waiting to handle an eratt, don't process it
12375 * here. The brdkill function will do this.
12376 */
12377 if (phba->link_flag & LS_IGNORE_ERATT)
12378 return 0;
12379
12380 /* Check if interrupt handler handles this ERATT */
12381 spin_lock_irq(&phba->hbalock);
12382 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12383 /* Interrupt handler has handled ERATT */
12384 spin_unlock_irq(&phba->hbalock);
12385 return 0;
12386 }
12387
a257bf90
JS
12388 /*
12389 * If there is deferred error attention, do not check for error
12390 * attention
12391 */
12392 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12393 spin_unlock_irq(&phba->hbalock);
12394 return 0;
12395 }
12396
3772a991
JS
12397 /* If PCI channel is offline, don't process it */
12398 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 12399 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12400 return 0;
12401 }
12402
12403 switch (phba->sli_rev) {
12404 case LPFC_SLI_REV2:
12405 case LPFC_SLI_REV3:
12406 /* Read chip Host Attention (HA) register */
12407 ha_copy = lpfc_sli_eratt_read(phba);
12408 break;
da0436e9 12409 case LPFC_SLI_REV4:
2fcee4bf 12410 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
12411 ha_copy = lpfc_sli4_eratt_read(phba);
12412 break;
3772a991
JS
12413 default:
12414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12415 "0299 Invalid SLI revision (%d)\n",
12416 phba->sli_rev);
12417 ha_copy = 0;
12418 break;
9399627f
JS
12419 }
12420 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12421
12422 return ha_copy;
12423}
12424
12425/**
12426 * lpfc_intr_state_check - Check device state for interrupt handling
12427 * @phba: Pointer to HBA context.
12428 *
12429 * This inline routine checks whether a device or its PCI slot is in a state
12430 * that the interrupt should be handled.
12431 *
12432 * This function returns 0 if the device or the PCI slot is in a state that
12433 * interrupt should be handled, otherwise -EIO.
12434 */
12435static inline int
12436lpfc_intr_state_check(struct lpfc_hba *phba)
12437{
12438 /* If the pci channel is offline, ignore all the interrupts */
12439 if (unlikely(pci_channel_offline(phba->pcidev)))
12440 return -EIO;
12441
12442 /* Update device level interrupt statistics */
12443 phba->sli.slistat.sli_intr++;
12444
12445 /* Ignore all interrupts during initialization. */
12446 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12447 return -EIO;
12448
9399627f
JS
12449 return 0;
12450}
12451
12452/**
3772a991 12453 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
12454 * @irq: Interrupt number.
12455 * @dev_id: The device context pointer.
12456 *
9399627f 12457 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12458 * service routine when device with SLI-3 interface spec is enabled with
12459 * MSI-X multi-message interrupt mode and there are slow-path events in
12460 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12461 * interrupt mode, this function is called as part of the device-level
12462 * interrupt handler. When the PCI slot is in error recovery or the HBA
12463 * is undergoing initialization, the interrupt handler will not process
12464 * the interrupt. The link attention and ELS ring attention events are
12465 * handled by the worker thread. The interrupt handler signals the worker
12466 * thread and returns for these events. This function is called without
12467 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
12468 * structures.
12469 *
12470 * This function returns IRQ_HANDLED when interrupt is handled else it
12471 * returns IRQ_NONE.
e59058c4 12472 **/
dea3101e 12473irqreturn_t
3772a991 12474lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 12475{
2e0fef85 12476 struct lpfc_hba *phba;
a747c9ce 12477 uint32_t ha_copy, hc_copy;
dea3101e 12478 uint32_t work_ha_copy;
12479 unsigned long status;
5b75da2f 12480 unsigned long iflag;
dea3101e 12481 uint32_t control;
12482
92d7f7b0 12483 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
12484 struct lpfc_vport *vport;
12485 struct lpfc_nodelist *ndlp;
12486 struct lpfc_dmabuf *mp;
92d7f7b0
JS
12487 LPFC_MBOXQ_t *pmb;
12488 int rc;
12489
dea3101e 12490 /*
12491 * Get the driver's phba structure from the dev_id and
12492 * assume the HBA is not interrupting.
12493 */
9399627f 12494 phba = (struct lpfc_hba *)dev_id;
dea3101e 12495
12496 if (unlikely(!phba))
12497 return IRQ_NONE;
12498
dea3101e 12499 /*
9399627f
JS
12500 * Stuff needs to be attented to when this function is invoked as an
12501 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 12502 */
9399627f 12503 if (phba->intr_type == MSIX) {
3772a991
JS
12504 /* Check device state for handling interrupt */
12505 if (lpfc_intr_state_check(phba))
9399627f
JS
12506 return IRQ_NONE;
12507 /* Need to read HA REG for slow-path events */
5b75da2f 12508 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12509 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12510 goto unplug_error;
9399627f
JS
12511 /* If somebody is waiting to handle an eratt don't process it
12512 * here. The brdkill function will do this.
12513 */
12514 if (phba->link_flag & LS_IGNORE_ERATT)
12515 ha_copy &= ~HA_ERATT;
12516 /* Check the need for handling ERATT in interrupt handler */
12517 if (ha_copy & HA_ERATT) {
12518 if (phba->hba_flag & HBA_ERATT_HANDLED)
12519 /* ERATT polling has handled ERATT */
12520 ha_copy &= ~HA_ERATT;
12521 else
12522 /* Indicate interrupt handler handles ERATT */
12523 phba->hba_flag |= HBA_ERATT_HANDLED;
12524 }
a257bf90
JS
12525
12526 /*
12527 * If there is deferred error attention, do not check for any
12528 * interrupt.
12529 */
12530 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12531 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12532 return IRQ_NONE;
12533 }
12534
9399627f 12535 /* Clear up only attention source related to slow-path */
9940b97b
JS
12536 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12537 goto unplug_error;
12538
a747c9ce
JS
12539 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12540 HC_LAINT_ENA | HC_ERINT_ENA),
12541 phba->HCregaddr);
9399627f
JS
12542 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12543 phba->HAregaddr);
a747c9ce 12544 writel(hc_copy, phba->HCregaddr);
9399627f 12545 readl(phba->HAregaddr); /* flush */
5b75da2f 12546 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12547 } else
12548 ha_copy = phba->ha_copy;
dea3101e 12549
dea3101e 12550 work_ha_copy = ha_copy & phba->work_ha_mask;
12551
9399627f 12552 if (work_ha_copy) {
dea3101e 12553 if (work_ha_copy & HA_LATT) {
12554 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12555 /*
12556 * Turn off Link Attention interrupts
12557 * until CLEAR_LA done
12558 */
5b75da2f 12559 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12560 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
12561 if (lpfc_readl(phba->HCregaddr, &control))
12562 goto unplug_error;
dea3101e 12563 control &= ~HC_LAINT_ENA;
12564 writel(control, phba->HCregaddr);
12565 readl(phba->HCregaddr); /* flush */
5b75da2f 12566 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12567 }
12568 else
12569 work_ha_copy &= ~HA_LATT;
12570 }
12571
9399627f 12572 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
12573 /*
12574 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12575 * the only slow ring.
12576 */
12577 status = (work_ha_copy &
12578 (HA_RXMASK << (4*LPFC_ELS_RING)));
12579 status >>= (4*LPFC_ELS_RING);
12580 if (status & HA_RXMASK) {
5b75da2f 12581 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12582 if (lpfc_readl(phba->HCregaddr, &control))
12583 goto unplug_error;
a58cbd52
JS
12584
12585 lpfc_debugfs_slow_ring_trc(phba,
12586 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12587 control, status,
12588 (uint32_t)phba->sli.slistat.sli_intr);
12589
858c9f6c 12590 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
12591 lpfc_debugfs_slow_ring_trc(phba,
12592 "ISR Disable ring:"
12593 "pwork:x%x hawork:x%x wait:x%x",
12594 phba->work_ha, work_ha_copy,
12595 (uint32_t)((unsigned long)
5e9d9b82 12596 &phba->work_waitq));
a58cbd52 12597
858c9f6c
JS
12598 control &=
12599 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 12600 writel(control, phba->HCregaddr);
12601 readl(phba->HCregaddr); /* flush */
dea3101e 12602 }
a58cbd52
JS
12603 else {
12604 lpfc_debugfs_slow_ring_trc(phba,
12605 "ISR slow ring: pwork:"
12606 "x%x hawork:x%x wait:x%x",
12607 phba->work_ha, work_ha_copy,
12608 (uint32_t)((unsigned long)
5e9d9b82 12609 &phba->work_waitq));
a58cbd52 12610 }
5b75da2f 12611 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12612 }
12613 }
5b75da2f 12614 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 12615 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
12616 if (lpfc_sli_read_hs(phba))
12617 goto unplug_error;
a257bf90
JS
12618 /*
12619 * Check if there is a deferred error condition
12620 * is active
12621 */
12622 if ((HS_FFER1 & phba->work_hs) &&
12623 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
12624 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12625 phba->work_hs)) {
a257bf90
JS
12626 phba->hba_flag |= DEFER_ERATT;
12627 /* Clear all interrupt enable conditions */
12628 writel(0, phba->HCregaddr);
12629 readl(phba->HCregaddr);
12630 }
12631 }
12632
9399627f 12633 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 12634 pmb = phba->sli.mbox_active;
04c68496 12635 pmbox = &pmb->u.mb;
34b02dcd 12636 mbox = phba->mbox;
858c9f6c 12637 vport = pmb->vport;
92d7f7b0
JS
12638
12639 /* First check out the status word */
12640 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12641 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 12642 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
12643 /*
12644 * Stray Mailbox Interrupt, mbxCommand <cmd>
12645 * mbxStatus <status>
12646 */
09372820 12647 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 12648 LOG_SLI,
e8b62011 12649 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
12650 "Interrupt mbxCommand x%x "
12651 "mbxStatus x%x\n",
e8b62011 12652 (vport ? vport->vpi : 0),
92d7f7b0
JS
12653 pmbox->mbxCommand,
12654 pmbox->mbxStatus);
09372820
JS
12655 /* clear mailbox attention bit */
12656 work_ha_copy &= ~HA_MBATT;
12657 } else {
97eab634 12658 phba->sli.mbox_active = NULL;
5b75da2f 12659 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
12660 phba->last_completion_time = jiffies;
12661 del_timer(&phba->sli.mbox_tmo);
09372820
JS
12662 if (pmb->mbox_cmpl) {
12663 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12664 MAILBOX_CMD_SIZE);
7a470277 12665 if (pmb->out_ext_byte_len &&
3e1f0718 12666 pmb->ctx_buf)
7a470277
JS
12667 lpfc_sli_pcimem_bcopy(
12668 phba->mbox_ext,
3e1f0718 12669 pmb->ctx_buf,
7a470277 12670 pmb->out_ext_byte_len);
09372820
JS
12671 }
12672 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12673 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12674
12675 lpfc_debugfs_disc_trc(vport,
12676 LPFC_DISC_TRC_MBOX_VPORT,
12677 "MBOX dflt rpi: : "
12678 "status:x%x rpi:x%x",
12679 (uint32_t)pmbox->mbxStatus,
12680 pmbox->un.varWords[0], 0);
12681
12682 if (!pmbox->mbxStatus) {
12683 mp = (struct lpfc_dmabuf *)
3e1f0718 12684 (pmb->ctx_buf);
09372820 12685 ndlp = (struct lpfc_nodelist *)
3e1f0718 12686 pmb->ctx_ndlp;
09372820
JS
12687
12688 /* Reg_LOGIN of dflt RPI was
12689 * successful. new lets get
12690 * rid of the RPI using the
12691 * same mbox buffer.
12692 */
12693 lpfc_unreg_login(phba,
12694 vport->vpi,
12695 pmbox->un.varWords[0],
12696 pmb);
12697 pmb->mbox_cmpl =
12698 lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
12699 pmb->ctx_buf = mp;
12700 pmb->ctx_ndlp = ndlp;
09372820 12701 pmb->vport = vport;
58da1ffb
JS
12702 rc = lpfc_sli_issue_mbox(phba,
12703 pmb,
12704 MBX_NOWAIT);
12705 if (rc != MBX_BUSY)
12706 lpfc_printf_log(phba,
12707 KERN_ERR,
12708 LOG_MBOX | LOG_SLI,
d7c255b2 12709 "0350 rc should have"
6a9c52cf 12710 "been MBX_BUSY\n");
3772a991
JS
12711 if (rc != MBX_NOT_FINISHED)
12712 goto send_current_mbox;
09372820 12713 }
858c9f6c 12714 }
5b75da2f
JS
12715 spin_lock_irqsave(
12716 &phba->pport->work_port_lock,
12717 iflag);
09372820
JS
12718 phba->pport->work_port_events &=
12719 ~WORKER_MBOX_TMO;
5b75da2f
JS
12720 spin_unlock_irqrestore(
12721 &phba->pport->work_port_lock,
12722 iflag);
09372820 12723 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 12724 }
97eab634 12725 } else
5b75da2f 12726 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 12727
92d7f7b0
JS
12728 if ((work_ha_copy & HA_MBATT) &&
12729 (phba->sli.mbox_active == NULL)) {
858c9f6c 12730send_current_mbox:
92d7f7b0 12731 /* Process next mailbox command if there is one */
58da1ffb
JS
12732 do {
12733 rc = lpfc_sli_issue_mbox(phba, NULL,
12734 MBX_NOWAIT);
12735 } while (rc == MBX_NOT_FINISHED);
12736 if (rc != MBX_SUCCESS)
12737 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12738 LOG_SLI, "0349 rc should be "
6a9c52cf 12739 "MBX_SUCCESS\n");
92d7f7b0
JS
12740 }
12741
5b75da2f 12742 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12743 phba->work_ha |= work_ha_copy;
5b75da2f 12744 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 12745 lpfc_worker_wake_up(phba);
dea3101e 12746 }
9399627f 12747 return IRQ_HANDLED;
9940b97b
JS
12748unplug_error:
12749 spin_unlock_irqrestore(&phba->hbalock, iflag);
12750 return IRQ_HANDLED;
dea3101e 12751
3772a991 12752} /* lpfc_sli_sp_intr_handler */
9399627f
JS
12753
12754/**
3772a991 12755 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
12756 * @irq: Interrupt number.
12757 * @dev_id: The device context pointer.
12758 *
12759 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12760 * service routine when device with SLI-3 interface spec is enabled with
12761 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12762 * ring event in the HBA. However, when the device is enabled with either
12763 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12764 * device-level interrupt handler. When the PCI slot is in error recovery
12765 * or the HBA is undergoing initialization, the interrupt handler will not
12766 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12767 * the intrrupt context. This function is called without any lock held.
12768 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
12769 *
12770 * This function returns IRQ_HANDLED when interrupt is handled else it
12771 * returns IRQ_NONE.
12772 **/
12773irqreturn_t
3772a991 12774lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12775{
12776 struct lpfc_hba *phba;
12777 uint32_t ha_copy;
12778 unsigned long status;
5b75da2f 12779 unsigned long iflag;
895427bd 12780 struct lpfc_sli_ring *pring;
9399627f
JS
12781
12782 /* Get the driver's phba structure from the dev_id and
12783 * assume the HBA is not interrupting.
12784 */
12785 phba = (struct lpfc_hba *) dev_id;
12786
12787 if (unlikely(!phba))
12788 return IRQ_NONE;
12789
12790 /*
12791 * Stuff needs to be attented to when this function is invoked as an
12792 * individual interrupt handler in MSI-X multi-message interrupt mode
12793 */
12794 if (phba->intr_type == MSIX) {
3772a991
JS
12795 /* Check device state for handling interrupt */
12796 if (lpfc_intr_state_check(phba))
9399627f
JS
12797 return IRQ_NONE;
12798 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12799 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12800 return IRQ_HANDLED;
9399627f 12801 /* Clear up only attention source related to fast-path */
5b75da2f 12802 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12803 /*
12804 * If there is deferred error attention, do not check for
12805 * any interrupt.
12806 */
12807 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12808 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12809 return IRQ_NONE;
12810 }
9399627f
JS
12811 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12812 phba->HAregaddr);
12813 readl(phba->HAregaddr); /* flush */
5b75da2f 12814 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12815 } else
12816 ha_copy = phba->ha_copy;
dea3101e 12817
12818 /*
9399627f 12819 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12820 */
9399627f
JS
12821 ha_copy &= ~(phba->work_ha_mask);
12822
12823 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12824 status >>= (4*LPFC_FCP_RING);
895427bd 12825 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12826 if (status & HA_RXMASK)
895427bd 12827 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12828
12829 if (phba->cfg_multi_ring_support == 2) {
12830 /*
9399627f
JS
12831 * Process all events on extra ring. Take the optimized path
12832 * for extra ring IO.
a4bc3379 12833 */
9399627f 12834 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12835 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12836 if (status & HA_RXMASK) {
a4bc3379 12837 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12838 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12839 status);
12840 }
12841 }
dea3101e 12842 return IRQ_HANDLED;
3772a991 12843} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12844
12845/**
3772a991 12846 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12847 * @irq: Interrupt number.
12848 * @dev_id: The device context pointer.
12849 *
3772a991
JS
12850 * This function is the HBA device-level interrupt handler to device with
12851 * SLI-3 interface spec, called from the PCI layer when either MSI or
12852 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12853 * requires driver attention. This function invokes the slow-path interrupt
12854 * attention handling function and fast-path interrupt attention handling
12855 * function in turn to process the relevant HBA attention events. This
12856 * function is called without any lock held. It gets the hbalock to access
12857 * and update SLI data structures.
9399627f
JS
12858 *
12859 * This function returns IRQ_HANDLED when interrupt is handled, else it
12860 * returns IRQ_NONE.
12861 **/
12862irqreturn_t
3772a991 12863lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12864{
12865 struct lpfc_hba *phba;
12866 irqreturn_t sp_irq_rc, fp_irq_rc;
12867 unsigned long status1, status2;
a747c9ce 12868 uint32_t hc_copy;
9399627f
JS
12869
12870 /*
12871 * Get the driver's phba structure from the dev_id and
12872 * assume the HBA is not interrupting.
12873 */
12874 phba = (struct lpfc_hba *) dev_id;
12875
12876 if (unlikely(!phba))
12877 return IRQ_NONE;
12878
3772a991
JS
12879 /* Check device state for handling interrupt */
12880 if (lpfc_intr_state_check(phba))
9399627f
JS
12881 return IRQ_NONE;
12882
12883 spin_lock(&phba->hbalock);
9940b97b
JS
12884 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12885 spin_unlock(&phba->hbalock);
12886 return IRQ_HANDLED;
12887 }
12888
9399627f
JS
12889 if (unlikely(!phba->ha_copy)) {
12890 spin_unlock(&phba->hbalock);
12891 return IRQ_NONE;
12892 } else if (phba->ha_copy & HA_ERATT) {
12893 if (phba->hba_flag & HBA_ERATT_HANDLED)
12894 /* ERATT polling has handled ERATT */
12895 phba->ha_copy &= ~HA_ERATT;
12896 else
12897 /* Indicate interrupt handler handles ERATT */
12898 phba->hba_flag |= HBA_ERATT_HANDLED;
12899 }
12900
a257bf90
JS
12901 /*
12902 * If there is deferred error attention, do not check for any interrupt.
12903 */
12904 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12905 spin_unlock(&phba->hbalock);
a257bf90
JS
12906 return IRQ_NONE;
12907 }
12908
9399627f 12909 /* Clear attention sources except link and error attentions */
9940b97b
JS
12910 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12911 spin_unlock(&phba->hbalock);
12912 return IRQ_HANDLED;
12913 }
a747c9ce
JS
12914 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12915 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12916 phba->HCregaddr);
9399627f 12917 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12918 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12919 readl(phba->HAregaddr); /* flush */
12920 spin_unlock(&phba->hbalock);
12921
12922 /*
12923 * Invokes slow-path host attention interrupt handling as appropriate.
12924 */
12925
12926 /* status of events with mailbox and link attention */
12927 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12928
12929 /* status of events with ELS ring */
12930 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12931 status2 >>= (4*LPFC_ELS_RING);
12932
12933 if (status1 || (status2 & HA_RXMASK))
3772a991 12934 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12935 else
12936 sp_irq_rc = IRQ_NONE;
12937
12938 /*
12939 * Invoke fast-path host attention interrupt handling as appropriate.
12940 */
12941
12942 /* status of events with FCP ring */
12943 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12944 status1 >>= (4*LPFC_FCP_RING);
12945
12946 /* status of events with extra ring */
12947 if (phba->cfg_multi_ring_support == 2) {
12948 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12949 status2 >>= (4*LPFC_EXTRA_RING);
12950 } else
12951 status2 = 0;
12952
12953 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12954 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12955 else
12956 fp_irq_rc = IRQ_NONE;
dea3101e 12957
9399627f
JS
12958 /* Return device-level interrupt handling status */
12959 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12960} /* lpfc_sli_intr_handler */
4f774513
JS
12961
12962/**
4f774513 12963 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
4f774513
JS
12964 * @phba: pointer to lpfc hba data structure.
12965 *
12966 * This routine is invoked by the worker thread to process all the pending
4f774513 12967 * SLI4 els abort xri events.
4f774513 12968 **/
4f774513 12969void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
4f774513
JS
12970{
12971 struct lpfc_cq_event *cq_event;
12972
4f774513 12973 /* First, declare the els xri abort event has been handled */
4f774513 12974 spin_lock_irq(&phba->hbalock);
4f774513 12975 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
4f774513 12976 spin_unlock_irq(&phba->hbalock);
4f774513
JS
12977 /* Now, handle all the els xri abort events */
12978 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12979 /* Get the first event from the head of the event queue */
12980 spin_lock_irq(&phba->hbalock);
12981 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12982 cq_event, struct lpfc_cq_event, list);
12983 spin_unlock_irq(&phba->hbalock);
12984 /* Notify aborted XRI for ELS work queue */
12985 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12986 /* Free the event processed back to the free pool */
12987 lpfc_sli4_cq_event_release(phba, cq_event);
12988 }
12989}
12990
341af102
JS
12991/**
12992 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12993 * @phba: pointer to lpfc hba data structure
12994 * @pIocbIn: pointer to the rspiocbq
12995 * @pIocbOut: pointer to the cmdiocbq
12996 * @wcqe: pointer to the complete wcqe
12997 *
12998 * This routine transfers the fields of a command iocbq to a response iocbq
12999 * by copying all the IOCB fields from command iocbq and transferring the
13000 * completion status information from the complete wcqe.
13001 **/
4f774513 13002static void
341af102
JS
13003lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13004 struct lpfc_iocbq *pIocbIn,
4f774513
JS
13005 struct lpfc_iocbq *pIocbOut,
13006 struct lpfc_wcqe_complete *wcqe)
13007{
af22741c 13008 int numBdes, i;
341af102 13009 unsigned long iflags;
af22741c
JS
13010 uint32_t status, max_response;
13011 struct lpfc_dmabuf *dmabuf;
13012 struct ulp_bde64 *bpl, bde;
4f774513
JS
13013 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13014
13015 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13016 sizeof(struct lpfc_iocbq) - offset);
4f774513 13017 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
13018 status = bf_get(lpfc_wcqe_c_status, wcqe);
13019 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
13020 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13021 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13022 pIocbIn->iocb.un.fcpi.fcpi_parm =
13023 pIocbOut->iocb.un.fcpi.fcpi_parm -
13024 wcqe->total_data_placed;
13025 else
13026 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 13027 else {
4f774513 13028 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
13029 switch (pIocbOut->iocb.ulpCommand) {
13030 case CMD_ELS_REQUEST64_CR:
13031 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13032 bpl = (struct ulp_bde64 *)dmabuf->virt;
13033 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13034 max_response = bde.tus.f.bdeSize;
13035 break;
13036 case CMD_GEN_REQUEST64_CR:
13037 max_response = 0;
13038 if (!pIocbOut->context3)
13039 break;
13040 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13041 sizeof(struct ulp_bde64);
13042 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13043 bpl = (struct ulp_bde64 *)dmabuf->virt;
13044 for (i = 0; i < numBdes; i++) {
13045 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13046 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13047 max_response += bde.tus.f.bdeSize;
13048 }
13049 break;
13050 default:
13051 max_response = wcqe->total_data_placed;
13052 break;
13053 }
13054 if (max_response < wcqe->total_data_placed)
13055 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13056 else
13057 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13058 wcqe->total_data_placed;
695a814e 13059 }
341af102 13060
acd6859b
JS
13061 /* Convert BG errors for completion status */
13062 if (status == CQE_STATUS_DI_ERROR) {
13063 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13064
13065 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13066 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13067 else
13068 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13069
13070 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13071 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13072 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13073 BGS_GUARD_ERR_MASK;
13074 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13075 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13076 BGS_APPTAG_ERR_MASK;
13077 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13078 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13079 BGS_REFTAG_ERR_MASK;
13080
13081 /* Check to see if there was any good data before the error */
13082 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13083 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13084 BGS_HI_WATER_MARK_PRESENT_MASK;
13085 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13086 wcqe->total_data_placed;
13087 }
13088
13089 /*
13090 * Set ALL the error bits to indicate we don't know what
13091 * type of error it is.
13092 */
13093 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13094 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13095 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13096 BGS_GUARD_ERR_MASK);
13097 }
13098
341af102
JS
13099 /* Pick up HBA exchange busy condition */
13100 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13101 spin_lock_irqsave(&phba->hbalock, iflags);
13102 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13103 spin_unlock_irqrestore(&phba->hbalock, iflags);
13104 }
4f774513
JS
13105}
13106
45ed1190
JS
13107/**
13108 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13109 * @phba: Pointer to HBA context object.
13110 * @wcqe: Pointer to work-queue completion queue entry.
13111 *
13112 * This routine handles an ELS work-queue completion event and construct
13113 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13114 * discovery engine to handle.
13115 *
13116 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13117 **/
13118static struct lpfc_iocbq *
13119lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13120 struct lpfc_iocbq *irspiocbq)
13121{
895427bd 13122 struct lpfc_sli_ring *pring;
45ed1190
JS
13123 struct lpfc_iocbq *cmdiocbq;
13124 struct lpfc_wcqe_complete *wcqe;
13125 unsigned long iflags;
13126
895427bd 13127 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
13128 if (unlikely(!pring))
13129 return NULL;
895427bd 13130
45ed1190 13131 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 13132 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
13133 pring->stats.iocb_event++;
13134 /* Look up the ELS command IOCB and create pseudo response IOCB */
13135 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13136 bf_get(lpfc_wcqe_c_request_tag, wcqe));
45ed1190 13137 if (unlikely(!cmdiocbq)) {
401bb416 13138 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
13139 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13140 "0386 ELS complete with no corresponding "
401bb416
DK
13141 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13142 wcqe->word0, wcqe->total_data_placed,
13143 wcqe->parameter, wcqe->word3);
45ed1190
JS
13144 lpfc_sli_release_iocbq(phba, irspiocbq);
13145 return NULL;
13146 }
13147
401bb416
DK
13148 /* Put the iocb back on the txcmplq */
13149 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13150 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13151
45ed1190 13152 /* Fake the irspiocbq and copy necessary response information */
341af102 13153 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
13154
13155 return irspiocbq;
13156}
13157
8a5ca109
JS
13158inline struct lpfc_cq_event *
13159lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13160{
13161 struct lpfc_cq_event *cq_event;
13162
13163 /* Allocate a new internal CQ_EVENT entry */
13164 cq_event = lpfc_sli4_cq_event_alloc(phba);
13165 if (!cq_event) {
13166 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13167 "0602 Failed to alloc CQ_EVENT entry\n");
13168 return NULL;
13169 }
13170
13171 /* Move the CQE into the event */
13172 memcpy(&cq_event->cqe, entry, size);
13173 return cq_event;
13174}
13175
04c68496
JS
13176/**
13177 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13178 * @phba: Pointer to HBA context object.
13179 * @cqe: Pointer to mailbox completion queue entry.
13180 *
13181 * This routine process a mailbox completion queue entry with asynchrous
13182 * event.
13183 *
13184 * Return: true if work posted to worker thread, otherwise false.
13185 **/
13186static bool
13187lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13188{
13189 struct lpfc_cq_event *cq_event;
13190 unsigned long iflags;
13191
13192 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13193 "0392 Async Event: word0:x%x, word1:x%x, "
13194 "word2:x%x, word3:x%x\n", mcqe->word0,
13195 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13196
8a5ca109
JS
13197 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13198 if (!cq_event)
04c68496 13199 return false;
04c68496
JS
13200 spin_lock_irqsave(&phba->hbalock, iflags);
13201 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13202 /* Set the async event flag */
13203 phba->hba_flag |= ASYNC_EVENT;
13204 spin_unlock_irqrestore(&phba->hbalock, iflags);
13205
13206 return true;
13207}
13208
13209/**
13210 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13211 * @phba: Pointer to HBA context object.
13212 * @cqe: Pointer to mailbox completion queue entry.
13213 *
13214 * This routine process a mailbox completion queue entry with mailbox
13215 * completion event.
13216 *
13217 * Return: true if work posted to worker thread, otherwise false.
13218 **/
13219static bool
13220lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13221{
13222 uint32_t mcqe_status;
13223 MAILBOX_t *mbox, *pmbox;
13224 struct lpfc_mqe *mqe;
13225 struct lpfc_vport *vport;
13226 struct lpfc_nodelist *ndlp;
13227 struct lpfc_dmabuf *mp;
13228 unsigned long iflags;
13229 LPFC_MBOXQ_t *pmb;
13230 bool workposted = false;
13231 int rc;
13232
13233 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13234 if (!bf_get(lpfc_trailer_completed, mcqe))
13235 goto out_no_mqe_complete;
13236
13237 /* Get the reference to the active mbox command */
13238 spin_lock_irqsave(&phba->hbalock, iflags);
13239 pmb = phba->sli.mbox_active;
13240 if (unlikely(!pmb)) {
13241 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13242 "1832 No pending MBOX command to handle\n");
13243 spin_unlock_irqrestore(&phba->hbalock, iflags);
13244 goto out_no_mqe_complete;
13245 }
13246 spin_unlock_irqrestore(&phba->hbalock, iflags);
13247 mqe = &pmb->u.mqe;
13248 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13249 mbox = phba->mbox;
13250 vport = pmb->vport;
13251
13252 /* Reset heartbeat timer */
13253 phba->last_completion_time = jiffies;
13254 del_timer(&phba->sli.mbox_tmo);
13255
13256 /* Move mbox data to caller's mailbox region, do endian swapping */
13257 if (pmb->mbox_cmpl && mbox)
48f8fdb4 13258 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 13259
73d91e50
JS
13260 /*
13261 * For mcqe errors, conditionally move a modified error code to
13262 * the mbox so that the error will not be missed.
13263 */
13264 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13265 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13266 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13267 bf_set(lpfc_mqe_status, mqe,
13268 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13269 }
04c68496
JS
13270 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13271 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13272 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13273 "MBOX dflt rpi: status:x%x rpi:x%x",
13274 mcqe_status,
13275 pmbox->un.varWords[0], 0);
13276 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
3e1f0718
JS
13277 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13278 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
04c68496
JS
13279 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13280 * RID of the PPI using the same mbox buffer.
13281 */
13282 lpfc_unreg_login(phba, vport->vpi,
13283 pmbox->un.varWords[0], pmb);
13284 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
13285 pmb->ctx_buf = mp;
13286 pmb->ctx_ndlp = ndlp;
04c68496
JS
13287 pmb->vport = vport;
13288 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13289 if (rc != MBX_BUSY)
13290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13291 LOG_SLI, "0385 rc should "
13292 "have been MBX_BUSY\n");
13293 if (rc != MBX_NOT_FINISHED)
13294 goto send_current_mbox;
13295 }
13296 }
13297 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13298 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13299 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13300
13301 /* There is mailbox completion work to do */
13302 spin_lock_irqsave(&phba->hbalock, iflags);
13303 __lpfc_mbox_cmpl_put(phba, pmb);
13304 phba->work_ha |= HA_MBATT;
13305 spin_unlock_irqrestore(&phba->hbalock, iflags);
13306 workposted = true;
13307
13308send_current_mbox:
13309 spin_lock_irqsave(&phba->hbalock, iflags);
13310 /* Release the mailbox command posting token */
13311 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13312 /* Setting active mailbox pointer need to be in sync to flag clear */
13313 phba->sli.mbox_active = NULL;
13314 spin_unlock_irqrestore(&phba->hbalock, iflags);
13315 /* Wake up worker thread to post the next pending mailbox command */
13316 lpfc_worker_wake_up(phba);
13317out_no_mqe_complete:
13318 if (bf_get(lpfc_trailer_consumed, mcqe))
13319 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13320 return workposted;
13321}
13322
13323/**
13324 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13325 * @phba: Pointer to HBA context object.
13326 * @cqe: Pointer to mailbox completion queue entry.
13327 *
13328 * This routine process a mailbox completion queue entry, it invokes the
13329 * proper mailbox complete handling or asynchrous event handling routine
13330 * according to the MCQE's async bit.
13331 *
13332 * Return: true if work posted to worker thread, otherwise false.
13333 **/
13334static bool
32517fc0
JS
13335lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13336 struct lpfc_cqe *cqe)
04c68496
JS
13337{
13338 struct lpfc_mcqe mcqe;
13339 bool workposted;
13340
32517fc0
JS
13341 cq->CQ_mbox++;
13342
04c68496 13343 /* Copy the mailbox MCQE and convert endian order as needed */
48f8fdb4 13344 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
04c68496
JS
13345
13346 /* Invoke the proper event handling routine */
13347 if (!bf_get(lpfc_trailer_async, &mcqe))
13348 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13349 else
13350 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13351 return workposted;
13352}
13353
4f774513
JS
13354/**
13355 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13356 * @phba: Pointer to HBA context object.
2a76a283 13357 * @cq: Pointer to associated CQ
4f774513
JS
13358 * @wcqe: Pointer to work-queue completion queue entry.
13359 *
13360 * This routine handles an ELS work-queue completion event.
13361 *
13362 * Return: true if work posted to worker thread, otherwise false.
13363 **/
13364static bool
2a76a283 13365lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13366 struct lpfc_wcqe_complete *wcqe)
13367{
4f774513
JS
13368 struct lpfc_iocbq *irspiocbq;
13369 unsigned long iflags;
2a76a283 13370 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
13371 int txq_cnt = 0;
13372 int txcmplq_cnt = 0;
13373 int fcp_txcmplq_cnt = 0;
4f774513 13374
11f0e34f
JS
13375 /* Check for response status */
13376 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13377 /* Log the error status */
13378 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13379 "0357 ELS CQE error: status=x%x: "
13380 "CQE: %08x %08x %08x %08x\n",
13381 bf_get(lpfc_wcqe_c_status, wcqe),
13382 wcqe->word0, wcqe->total_data_placed,
13383 wcqe->parameter, wcqe->word3);
13384 }
13385
45ed1190 13386 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
13387 irspiocbq = lpfc_sli_get_iocbq(phba);
13388 if (!irspiocbq) {
0e9bb8d7
JS
13389 if (!list_empty(&pring->txq))
13390 txq_cnt++;
13391 if (!list_empty(&pring->txcmplq))
13392 txcmplq_cnt++;
4f774513 13393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
13394 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13395 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
13396 txq_cnt, phba->iocb_cnt,
13397 fcp_txcmplq_cnt,
13398 txcmplq_cnt);
45ed1190 13399 return false;
4f774513 13400 }
4f774513 13401
45ed1190
JS
13402 /* Save off the slow-path queue event for work thread to process */
13403 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 13404 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 13405 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
13406 &phba->sli4_hba.sp_queue_event);
13407 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 13408 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 13409
45ed1190 13410 return true;
4f774513
JS
13411}
13412
13413/**
13414 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13415 * @phba: Pointer to HBA context object.
13416 * @wcqe: Pointer to work-queue completion queue entry.
13417 *
3f8b6fb7 13418 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
13419 * proper WQ release routine to the slow-path WQ.
13420 **/
13421static void
13422lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13423 struct lpfc_wcqe_release *wcqe)
13424{
2e90f4b5
JS
13425 /* sanity check on queue memory */
13426 if (unlikely(!phba->sli4_hba.els_wq))
13427 return;
4f774513
JS
13428 /* Check for the slow-path ELS work queue */
13429 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13430 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13431 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13432 else
13433 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13434 "2579 Slow-path wqe consume event carries "
13435 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13436 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13437 phba->sli4_hba.els_wq->queue_id);
13438}
13439
13440/**
13441 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13442 * @phba: Pointer to HBA context object.
13443 * @cq: Pointer to a WQ completion queue.
13444 * @wcqe: Pointer to work-queue completion queue entry.
13445 *
13446 * This routine handles an XRI abort event.
13447 *
13448 * Return: true if work posted to worker thread, otherwise false.
13449 **/
13450static bool
13451lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13452 struct lpfc_queue *cq,
13453 struct sli4_wcqe_xri_aborted *wcqe)
13454{
13455 bool workposted = false;
13456 struct lpfc_cq_event *cq_event;
13457 unsigned long iflags;
13458
4f774513
JS
13459 switch (cq->subtype) {
13460 case LPFC_FCP:
5e5b511d
JS
13461 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13462 workposted = false;
4f774513 13463 break;
422c4cb7 13464 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
4f774513 13465 case LPFC_ELS:
8a5ca109
JS
13466 cq_event = lpfc_cq_event_setup(
13467 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13468 if (!cq_event)
13469 return false;
5e5b511d 13470 cq_event->hdwq = cq->hdwq;
4f774513
JS
13471 spin_lock_irqsave(&phba->hbalock, iflags);
13472 list_add_tail(&cq_event->list,
13473 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13474 /* Set the els xri abort event flag */
13475 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13476 spin_unlock_irqrestore(&phba->hbalock, iflags);
13477 workposted = true;
13478 break;
318083ad 13479 case LPFC_NVME:
8a5ca109
JS
13480 /* Notify aborted XRI for NVME work queue */
13481 if (phba->nvmet_support)
13482 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13483 else
5e5b511d 13484 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
8a5ca109
JS
13485
13486 workposted = false;
318083ad 13487 break;
4f774513
JS
13488 default:
13489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
13490 "0603 Invalid CQ subtype %d: "
13491 "%08x %08x %08x %08x\n",
13492 cq->subtype, wcqe->word0, wcqe->parameter,
13493 wcqe->word2, wcqe->word3);
4f774513
JS
13494 workposted = false;
13495 break;
13496 }
13497 return workposted;
13498}
13499
e817e5d7
JS
13500#define FC_RCTL_MDS_DIAGS 0xF4
13501
4f774513
JS
13502/**
13503 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13504 * @phba: Pointer to HBA context object.
13505 * @rcqe: Pointer to receive-queue completion queue entry.
13506 *
13507 * This routine process a receive-queue completion queue entry.
13508 *
13509 * Return: true if work posted to worker thread, otherwise false.
13510 **/
13511static bool
4d9ab994 13512lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 13513{
4f774513 13514 bool workposted = false;
e817e5d7 13515 struct fc_frame_header *fc_hdr;
4f774513
JS
13516 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13517 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 13518 struct lpfc_nvmet_tgtport *tgtp;
4f774513 13519 struct hbq_dmabuf *dma_buf;
7851fe2c 13520 uint32_t status, rq_id;
4f774513
JS
13521 unsigned long iflags;
13522
2e90f4b5
JS
13523 /* sanity check on queue memory */
13524 if (unlikely(!hrq) || unlikely(!drq))
13525 return workposted;
13526
7851fe2c
JS
13527 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13528 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13529 else
13530 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13531 if (rq_id != hrq->queue_id)
4f774513
JS
13532 goto out;
13533
4d9ab994 13534 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
13535 switch (status) {
13536 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13538 "2537 Receive Frame Truncated!!\n");
5bd5f66c 13539 /* fall through */
4f774513
JS
13540 case FC_STATUS_RQ_SUCCESS:
13541 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13542 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
13543 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13544 if (!dma_buf) {
b84daac9 13545 hrq->RQ_no_buf_found++;
4f774513
JS
13546 spin_unlock_irqrestore(&phba->hbalock, iflags);
13547 goto out;
13548 }
b84daac9 13549 hrq->RQ_rcv_buf++;
547077a4 13550 hrq->RQ_buf_posted--;
4d9ab994 13551 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd 13552
e817e5d7
JS
13553 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13554
13555 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13556 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13557 spin_unlock_irqrestore(&phba->hbalock, iflags);
13558 /* Handle MDS Loopback frames */
13559 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13560 break;
13561 }
13562
13563 /* save off the frame for the work thread to process */
4d9ab994 13564 list_add_tail(&dma_buf->cq_event.list,
45ed1190 13565 &phba->sli4_hba.sp_queue_event);
4f774513 13566 /* Frame received */
45ed1190 13567 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
13568 spin_unlock_irqrestore(&phba->hbalock, iflags);
13569 workposted = true;
13570 break;
4f774513 13571 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13572 if (phba->nvmet_support) {
13573 tgtp = phba->targetport->private;
13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13575 "6402 RQE Error x%x, posted %d err_cnt "
13576 "%d: %x %x %x\n",
13577 status, hrq->RQ_buf_posted,
13578 hrq->RQ_no_posted_buf,
13579 atomic_read(&tgtp->rcv_fcp_cmd_in),
13580 atomic_read(&tgtp->rcv_fcp_cmd_out),
13581 atomic_read(&tgtp->xmt_fcp_release));
13582 }
13583 /* fallthrough */
13584
13585 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 13586 hrq->RQ_no_posted_buf++;
4f774513
JS
13587 /* Post more buffers if possible */
13588 spin_lock_irqsave(&phba->hbalock, iflags);
13589 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13590 spin_unlock_irqrestore(&phba->hbalock, iflags);
13591 workposted = true;
13592 break;
13593 }
13594out:
13595 return workposted;
4f774513
JS
13596}
13597
4d9ab994
JS
13598/**
13599 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13600 * @phba: Pointer to HBA context object.
13601 * @cq: Pointer to the completion queue.
32517fc0 13602 * @cqe: Pointer to a completion queue entry.
4d9ab994 13603 *
25985edc 13604 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
13605 * entry.
13606 *
13607 * Return: true if work posted to worker thread, otherwise false.
13608 **/
13609static bool
13610lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13611 struct lpfc_cqe *cqe)
13612{
45ed1190 13613 struct lpfc_cqe cqevt;
4d9ab994
JS
13614 bool workposted = false;
13615
13616 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13617 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
13618
13619 /* Check and process for different type of WCQE and dispatch */
45ed1190 13620 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 13621 case CQE_CODE_COMPL_WQE:
45ed1190 13622 /* Process the WQ/RQ complete event */
bc73905a 13623 phba->last_completion_time = jiffies;
2a76a283 13624 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 13625 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
13626 break;
13627 case CQE_CODE_RELEASE_WQE:
13628 /* Process the WQ release event */
13629 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 13630 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
13631 break;
13632 case CQE_CODE_XRI_ABORTED:
13633 /* Process the WQ XRI abort event */
bc73905a 13634 phba->last_completion_time = jiffies;
4d9ab994 13635 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 13636 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
13637 break;
13638 case CQE_CODE_RECEIVE:
7851fe2c 13639 case CQE_CODE_RECEIVE_V1:
4d9ab994 13640 /* Process the RQ event */
bc73905a 13641 phba->last_completion_time = jiffies;
4d9ab994 13642 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 13643 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
13644 break;
13645 default:
13646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13647 "0388 Not a valid WCQE code: x%x\n",
45ed1190 13648 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
13649 break;
13650 }
13651 return workposted;
13652}
13653
4f774513
JS
13654/**
13655 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13656 * @phba: Pointer to HBA context object.
13657 * @eqe: Pointer to fast-path event queue entry.
13658 *
13659 * This routine process a event queue entry from the slow-path event queue.
13660 * It will check the MajorCode and MinorCode to determine this is for a
13661 * completion event on a completion queue, if not, an error shall be logged
13662 * and just return. Otherwise, it will get to the corresponding completion
13663 * queue and process all the entries on that completion queue, rearm the
13664 * completion queue, and then return.
13665 *
13666 **/
f485c18d 13667static void
67d12733
JS
13668lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13669 struct lpfc_queue *speq)
4f774513 13670{
67d12733 13671 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
13672 uint16_t cqid;
13673
4f774513 13674 /* Get the reference to the corresponding CQ */
cb5172ea 13675 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 13676
4f774513
JS
13677 list_for_each_entry(childq, &speq->child_list, list) {
13678 if (childq->queue_id == cqid) {
13679 cq = childq;
13680 break;
13681 }
13682 }
13683 if (unlikely(!cq)) {
75baf696
JS
13684 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13686 "0365 Slow-path CQ identifier "
13687 "(%d) does not exist\n", cqid);
f485c18d 13688 return;
4f774513
JS
13689 }
13690
895427bd
JS
13691 /* Save EQ associated with this CQ */
13692 cq->assoc_qp = speq;
13693
6a828b0f 13694 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
f485c18d
DK
13695 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13696 "0390 Cannot schedule soft IRQ "
13697 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13698 cqid, cq->queue_id, smp_processor_id());
13699}
13700
13701/**
32517fc0 13702 * __lpfc_sli4_process_cq - Process elements of a CQ
f485c18d 13703 * @phba: Pointer to HBA context object.
32517fc0
JS
13704 * @cq: Pointer to CQ to be processed
13705 * @handler: Routine to process each cqe
13706 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
f485c18d 13707 *
32517fc0
JS
13708 * This routine processes completion queue entries in a CQ. While a valid
13709 * queue element is found, the handler is called. During processing checks
13710 * are made for periodic doorbell writes to let the hardware know of
13711 * element consumption.
13712 *
13713 * If the max limit on cqes to process is hit, or there are no more valid
13714 * entries, the loop stops. If we processed a sufficient number of elements,
13715 * meaning there is sufficient load, rather than rearming and generating
13716 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13717 * indicates no rescheduling.
f485c18d 13718 *
32517fc0 13719 * Returns True if work scheduled, False otherwise.
f485c18d 13720 **/
32517fc0
JS
13721static bool
13722__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13723 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13724 struct lpfc_cqe *), unsigned long *delay)
f485c18d 13725{
f485c18d
DK
13726 struct lpfc_cqe *cqe;
13727 bool workposted = false;
32517fc0
JS
13728 int count = 0, consumed = 0;
13729 bool arm = true;
13730
13731 /* default - no reschedule */
13732 *delay = 0;
13733
13734 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13735 goto rearm_and_exit;
f485c18d 13736
4f774513 13737 /* Process all the entries to the CQ */
32517fc0
JS
13738 cqe = lpfc_sli4_cq_get(cq);
13739 while (cqe) {
13740#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
13741 if (phba->ktime_on)
13742 cq->isr_timestamp = ktime_get_ns();
13743 else
13744 cq->isr_timestamp = 0;
13745#endif
13746 workposted |= handler(phba, cq, cqe);
13747 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13748
13749 consumed++;
13750 if (!(++count % cq->max_proc_limit))
13751 break;
13752
13753 if (!(count % cq->notify_interval)) {
13754 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13755 LPFC_QUEUE_NOARM);
13756 consumed = 0;
13757 }
13758
13759 cqe = lpfc_sli4_cq_get(cq);
13760 }
13761 if (count >= phba->cfg_cq_poll_threshold) {
13762 *delay = 1;
13763 arm = false;
13764 }
13765
13766 /* Track the max number of CQEs processed in 1 EQ */
13767 if (count > cq->CQ_max_cqe)
13768 cq->CQ_max_cqe = count;
13769
13770 cq->assoc_qp->EQ_cqe_cnt += count;
13771
13772 /* Catch the no cq entry condition */
13773 if (unlikely(count == 0))
13774 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13775 "0369 No entry from completion queue "
13776 "qid=%d\n", cq->queue_id);
13777
13778 cq->queue_claimed = 0;
13779
13780rearm_and_exit:
13781 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13782 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13783
13784 return workposted;
13785}
13786
13787/**
13788 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13789 * @cq: pointer to CQ to process
13790 *
13791 * This routine calls the cq processing routine with a handler specific
13792 * to the type of queue bound to it.
13793 *
13794 * The CQ routine returns two values: the first is the calling status,
13795 * which indicates whether work was queued to the background discovery
13796 * thread. If true, the routine should wakeup the discovery thread;
13797 * the second is the delay parameter. If non-zero, rather than rearming
13798 * the CQ and yet another interrupt, the CQ handler should be queued so
13799 * that it is processed in a subsequent polling action. The value of
13800 * the delay indicates when to reschedule it.
13801 **/
13802static void
13803__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13804{
13805 struct lpfc_hba *phba = cq->phba;
13806 unsigned long delay;
13807 bool workposted = false;
13808
13809 /* Process and rearm the CQ */
4f774513
JS
13810 switch (cq->type) {
13811 case LPFC_MCQ:
32517fc0
JS
13812 workposted |= __lpfc_sli4_process_cq(phba, cq,
13813 lpfc_sli4_sp_handle_mcqe,
13814 &delay);
4f774513
JS
13815 break;
13816 case LPFC_WCQ:
32517fc0
JS
13817 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13818 workposted |= __lpfc_sli4_process_cq(phba, cq,
13819 lpfc_sli4_fp_handle_cqe,
13820 &delay);
13821 else
13822 workposted |= __lpfc_sli4_process_cq(phba, cq,
13823 lpfc_sli4_sp_handle_cqe,
13824 &delay);
4f774513
JS
13825 break;
13826 default:
13827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13828 "0370 Invalid completion queue type (%d)\n",
13829 cq->type);
f485c18d 13830 return;
4f774513
JS
13831 }
13832
32517fc0
JS
13833 if (delay) {
13834 if (!queue_delayed_work_on(cq->chann, phba->wq,
13835 &cq->sched_spwork, delay))
13836 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13837 "0394 Cannot schedule soft IRQ "
13838 "for cqid=%d on CPU %d\n",
13839 cq->queue_id, cq->chann);
13840 }
4f774513
JS
13841
13842 /* wake up worker thread if there are works to be done */
13843 if (workposted)
13844 lpfc_worker_wake_up(phba);
13845}
13846
32517fc0
JS
13847/**
13848 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13849 * interrupt
13850 * @work: pointer to work element
13851 *
13852 * translates from the work handler and calls the slow-path handler.
13853 **/
13854static void
13855lpfc_sli4_sp_process_cq(struct work_struct *work)
13856{
13857 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13858
13859 __lpfc_sli4_sp_process_cq(cq);
13860}
13861
13862/**
13863 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13864 * @work: pointer to work element
13865 *
13866 * translates from the work handler and calls the slow-path handler.
13867 **/
13868static void
13869lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13870{
13871 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13872 struct lpfc_queue, sched_spwork);
13873
13874 __lpfc_sli4_sp_process_cq(cq);
13875}
13876
4f774513
JS
13877/**
13878 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13879 * @phba: Pointer to HBA context object.
13880 * @cq: Pointer to associated CQ
13881 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13882 *
13883 * This routine process a fast-path work queue completion entry from fast-path
13884 * event queue for FCP command response completion.
13885 **/
13886static void
2a76a283 13887lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13888 struct lpfc_wcqe_complete *wcqe)
13889{
2a76a283 13890 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13891 struct lpfc_iocbq *cmdiocbq;
13892 struct lpfc_iocbq irspiocbq;
13893 unsigned long iflags;
13894
4f774513
JS
13895 /* Check for response status */
13896 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13897 /* If resource errors reported from HBA, reduce queue
13898 * depth of the SCSI device.
13899 */
e3d2b802
JS
13900 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13901 IOSTAT_LOCAL_REJECT)) &&
13902 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13903 IOERR_NO_RESOURCES))
4f774513 13904 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13905
4f774513 13906 /* Log the error status */
11f0e34f
JS
13907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13908 "0373 FCP CQE error: status=x%x: "
13909 "CQE: %08x %08x %08x %08x\n",
4f774513 13910 bf_get(lpfc_wcqe_c_status, wcqe),
11f0e34f
JS
13911 wcqe->word0, wcqe->total_data_placed,
13912 wcqe->parameter, wcqe->word3);
4f774513
JS
13913 }
13914
13915 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13916 spin_lock_irqsave(&pring->ring_lock, iflags);
13917 pring->stats.iocb_event++;
4f774513
JS
13918 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13919 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 13920 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13921 if (unlikely(!cmdiocbq)) {
13922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13923 "0374 FCP complete with no corresponding "
13924 "cmdiocb: iotag (%d)\n",
13925 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13926 return;
13927 }
c8a4ce0b
DK
13928#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13929 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13930#endif
895427bd
JS
13931 if (cmdiocbq->iocb_cmpl == NULL) {
13932 if (cmdiocbq->wqe_cmpl) {
13933 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13934 spin_lock_irqsave(&phba->hbalock, iflags);
13935 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13936 spin_unlock_irqrestore(&phba->hbalock, iflags);
13937 }
13938
13939 /* Pass the cmd_iocb and the wcqe to the upper layer */
13940 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13941 return;
13942 }
4f774513
JS
13943 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13944 "0375 FCP cmdiocb not callback function "
13945 "iotag: (%d)\n",
13946 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13947 return;
13948 }
13949
13950 /* Fake the irspiocb and copy necessary response information */
341af102 13951 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13952
0f65ff68
JS
13953 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13954 spin_lock_irqsave(&phba->hbalock, iflags);
13955 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13956 spin_unlock_irqrestore(&phba->hbalock, iflags);
13957 }
13958
4f774513
JS
13959 /* Pass the cmd_iocb and the rsp state to the upper layer */
13960 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13961}
13962
13963/**
13964 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13965 * @phba: Pointer to HBA context object.
13966 * @cq: Pointer to completion queue.
13967 * @wcqe: Pointer to work-queue completion queue entry.
13968 *
3f8b6fb7 13969 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
13970 * proper WQ release routine to the slow-path WQ.
13971 **/
13972static void
13973lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13974 struct lpfc_wcqe_release *wcqe)
13975{
13976 struct lpfc_queue *childwq;
13977 bool wqid_matched = false;
895427bd 13978 uint16_t hba_wqid;
4f774513
JS
13979
13980 /* Check for fast-path FCP work queue release */
895427bd 13981 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 13982 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 13983 if (childwq->queue_id == hba_wqid) {
4f774513
JS
13984 lpfc_sli4_wq_release(childwq,
13985 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
6e8e1c14
JS
13986 if (childwq->q_flag & HBA_NVMET_WQFULL)
13987 lpfc_nvmet_wqfull_process(phba, childwq);
4f774513
JS
13988 wqid_matched = true;
13989 break;
13990 }
13991 }
13992 /* Report warning log message if no match found */
13993 if (wqid_matched != true)
13994 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13995 "2580 Fast-path wqe consume event carries "
895427bd 13996 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
13997}
13998
13999/**
2d7dbc4c
JS
14000 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14001 * @phba: Pointer to HBA context object.
14002 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 14003 *
2d7dbc4c
JS
14004 * This routine process a receive-queue completion queue entry.
14005 *
14006 * Return: true if work posted to worker thread, otherwise false.
14007 **/
14008static bool
14009lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14010 struct lpfc_rcqe *rcqe)
14011{
14012 bool workposted = false;
14013 struct lpfc_queue *hrq;
14014 struct lpfc_queue *drq;
14015 struct rqb_dmabuf *dma_buf;
14016 struct fc_frame_header *fc_hdr;
547077a4 14017 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
14018 uint32_t status, rq_id;
14019 unsigned long iflags;
14020 uint32_t fctl, idx;
14021
14022 if ((phba->nvmet_support == 0) ||
14023 (phba->sli4_hba.nvmet_cqset == NULL))
14024 return workposted;
14025
14026 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14027 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14028 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14029
14030 /* sanity check on queue memory */
14031 if (unlikely(!hrq) || unlikely(!drq))
14032 return workposted;
14033
14034 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14035 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14036 else
14037 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14038
14039 if ((phba->nvmet_support == 0) ||
14040 (rq_id != hrq->queue_id))
14041 return workposted;
14042
14043 status = bf_get(lpfc_rcqe_status, rcqe);
14044 switch (status) {
14045 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14047 "6126 Receive Frame Truncated!!\n");
5bd5f66c 14048 /* fall through */
2d7dbc4c 14049 case FC_STATUS_RQ_SUCCESS:
2d7dbc4c 14050 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 14051 lpfc_sli4_rq_release(hrq, drq);
2d7dbc4c
JS
14052 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14053 if (!dma_buf) {
14054 hrq->RQ_no_buf_found++;
14055 spin_unlock_irqrestore(&phba->hbalock, iflags);
14056 goto out;
14057 }
14058 spin_unlock_irqrestore(&phba->hbalock, iflags);
14059 hrq->RQ_rcv_buf++;
547077a4 14060 hrq->RQ_buf_posted--;
2d7dbc4c
JS
14061 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14062
14063 /* Just some basic sanity checks on FCP Command frame */
14064 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14065 fc_hdr->fh_f_ctl[1] << 8 |
14066 fc_hdr->fh_f_ctl[2]);
14067 if (((fctl &
14068 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14069 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14070 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14071 goto drop;
14072
14073 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14074 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7 14075 lpfc_nvmet_unsol_fcp_event(
66d7ce93 14076 phba, idx, dma_buf,
c8a4ce0b 14077 cq->isr_timestamp);
2d7dbc4c
JS
14078 return false;
14079 }
14080drop:
22b738ac 14081 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
2d7dbc4c 14082 break;
2d7dbc4c 14083 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
14084 if (phba->nvmet_support) {
14085 tgtp = phba->targetport->private;
14086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14087 "6401 RQE Error x%x, posted %d err_cnt "
14088 "%d: %x %x %x\n",
14089 status, hrq->RQ_buf_posted,
14090 hrq->RQ_no_posted_buf,
14091 atomic_read(&tgtp->rcv_fcp_cmd_in),
14092 atomic_read(&tgtp->rcv_fcp_cmd_out),
14093 atomic_read(&tgtp->xmt_fcp_release));
14094 }
14095 /* fallthrough */
14096
14097 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
14098 hrq->RQ_no_posted_buf++;
14099 /* Post more buffers if possible */
2d7dbc4c
JS
14100 break;
14101 }
14102out:
14103 return workposted;
14104}
14105
4f774513 14106/**
895427bd 14107 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
32517fc0 14108 * @phba: adapter with cq
4f774513
JS
14109 * @cq: Pointer to the completion queue.
14110 * @eqe: Pointer to fast-path completion queue entry.
14111 *
14112 * This routine process a fast-path work queue completion entry from fast-path
14113 * event queue for FCP command response completion.
32517fc0
JS
14114 *
14115 * Return: true if work posted to worker thread, otherwise false.
4f774513 14116 **/
32517fc0 14117static bool
895427bd 14118lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
14119 struct lpfc_cqe *cqe)
14120{
14121 struct lpfc_wcqe_release wcqe;
14122 bool workposted = false;
14123
14124 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 14125 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
4f774513
JS
14126
14127 /* Check and process for different type of WCQE and dispatch */
14128 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14129 case CQE_CODE_COMPL_WQE:
895427bd 14130 case CQE_CODE_NVME_ERSP:
b84daac9 14131 cq->CQ_wq++;
4f774513 14132 /* Process the WQ complete event */
98fc5dd9 14133 phba->last_completion_time = jiffies;
895427bd
JS
14134 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14135 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14136 (struct lpfc_wcqe_complete *)&wcqe);
14137 if (cq->subtype == LPFC_NVME_LS)
14138 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
14139 (struct lpfc_wcqe_complete *)&wcqe);
14140 break;
14141 case CQE_CODE_RELEASE_WQE:
b84daac9 14142 cq->CQ_release_wqe++;
4f774513
JS
14143 /* Process the WQ release event */
14144 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14145 (struct lpfc_wcqe_release *)&wcqe);
14146 break;
14147 case CQE_CODE_XRI_ABORTED:
b84daac9 14148 cq->CQ_xri_aborted++;
4f774513 14149 /* Process the WQ XRI abort event */
bc73905a 14150 phba->last_completion_time = jiffies;
4f774513
JS
14151 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14152 (struct sli4_wcqe_xri_aborted *)&wcqe);
14153 break;
895427bd
JS
14154 case CQE_CODE_RECEIVE_V1:
14155 case CQE_CODE_RECEIVE:
14156 phba->last_completion_time = jiffies;
2d7dbc4c
JS
14157 if (cq->subtype == LPFC_NVMET) {
14158 workposted = lpfc_sli4_nvmet_handle_rcqe(
14159 phba, cq, (struct lpfc_rcqe *)&wcqe);
14160 }
895427bd 14161 break;
4f774513
JS
14162 default:
14163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 14164 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
14165 bf_get(lpfc_wcqe_c_code, &wcqe));
14166 break;
14167 }
14168 return workposted;
14169}
14170
14171/**
67d12733 14172 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
14173 * @phba: Pointer to HBA context object.
14174 * @eqe: Pointer to fast-path event queue entry.
14175 *
14176 * This routine process a event queue entry from the fast-path event queue.
14177 * It will check the MajorCode and MinorCode to determine this is for a
14178 * completion event on a completion queue, if not, an error shall be logged
14179 * and just return. Otherwise, it will get to the corresponding completion
14180 * queue and process all the entries on the completion queue, rearm the
14181 * completion queue, and then return.
14182 **/
f485c18d 14183static void
32517fc0
JS
14184lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14185 struct lpfc_eqe *eqe)
4f774513 14186{
895427bd 14187 struct lpfc_queue *cq = NULL;
32517fc0 14188 uint32_t qidx = eq->hdwq;
2d7dbc4c 14189 uint16_t cqid, id;
4f774513 14190
cb5172ea 14191 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 14192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 14193 "0366 Not a valid completion "
4f774513 14194 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
14195 bf_get_le32(lpfc_eqe_major_code, eqe),
14196 bf_get_le32(lpfc_eqe_minor_code, eqe));
f485c18d 14197 return;
4f774513
JS
14198 }
14199
67d12733
JS
14200 /* Get the reference to the corresponding CQ */
14201 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14202
6a828b0f
JS
14203 /* Use the fast lookup method first */
14204 if (cqid <= phba->sli4_hba.cq_max) {
14205 cq = phba->sli4_hba.cq_lookup[cqid];
14206 if (cq)
14207 goto work_cq;
cdb42bec
JS
14208 }
14209
14210 /* Next check for NVMET completion */
2d7dbc4c
JS
14211 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14212 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14213 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14214 /* Process NVMET unsol rcv */
14215 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14216 goto process_cq;
14217 }
67d12733
JS
14218 }
14219
895427bd
JS
14220 if (phba->sli4_hba.nvmels_cq &&
14221 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14222 /* Process NVME unsol rcv */
14223 cq = phba->sli4_hba.nvmels_cq;
14224 }
14225
14226 /* Otherwise this is a Slow path event */
14227 if (cq == NULL) {
cdb42bec
JS
14228 lpfc_sli4_sp_handle_eqe(phba, eqe,
14229 phba->sli4_hba.hdwq[qidx].hba_eq);
f485c18d 14230 return;
4f774513
JS
14231 }
14232
895427bd 14233process_cq:
4f774513
JS
14234 if (unlikely(cqid != cq->queue_id)) {
14235 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14236 "0368 Miss-matched fast-path completion "
14237 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14238 cqid, cq->queue_id);
f485c18d 14239 return;
4f774513
JS
14240 }
14241
6a828b0f 14242work_cq:
45aa312e 14243 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
f485c18d
DK
14244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14245 "0363 Cannot schedule soft IRQ "
14246 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14247 cqid, cq->queue_id, smp_processor_id());
14248}
14249
14250/**
32517fc0
JS
14251 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14252 * @cq: Pointer to CQ to be processed
f485c18d 14253 *
32517fc0
JS
14254 * This routine calls the cq processing routine with the handler for
14255 * fast path CQEs.
14256 *
14257 * The CQ routine returns two values: the first is the calling status,
14258 * which indicates whether work was queued to the background discovery
14259 * thread. If true, the routine should wakeup the discovery thread;
14260 * the second is the delay parameter. If non-zero, rather than rearming
14261 * the CQ and yet another interrupt, the CQ handler should be queued so
14262 * that it is processed in a subsequent polling action. The value of
14263 * the delay indicates when to reschedule it.
f485c18d
DK
14264 **/
14265static void
32517fc0 14266__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
f485c18d 14267{
f485c18d 14268 struct lpfc_hba *phba = cq->phba;
32517fc0 14269 unsigned long delay;
f485c18d 14270 bool workposted = false;
f485c18d 14271
32517fc0
JS
14272 /* process and rearm the CQ */
14273 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14274 &delay);
4f774513 14275
32517fc0
JS
14276 if (delay) {
14277 if (!queue_delayed_work_on(cq->chann, phba->wq,
14278 &cq->sched_irqwork, delay))
14279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14280 "0367 Cannot schedule soft IRQ "
14281 "for cqid=%d on CPU %d\n",
14282 cq->queue_id, cq->chann);
14283 }
4f774513
JS
14284
14285 /* wake up worker thread if there are works to be done */
14286 if (workposted)
14287 lpfc_worker_wake_up(phba);
14288}
14289
1ba981fd 14290/**
32517fc0
JS
14291 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14292 * interrupt
14293 * @work: pointer to work element
1ba981fd 14294 *
32517fc0 14295 * translates from the work handler and calls the fast-path handler.
1ba981fd
JS
14296 **/
14297static void
32517fc0 14298lpfc_sli4_hba_process_cq(struct work_struct *work)
1ba981fd 14299{
32517fc0 14300 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
1ba981fd 14301
32517fc0 14302 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14303}
14304
14305/**
32517fc0
JS
14306 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14307 * @work: pointer to work element
1ba981fd 14308 *
32517fc0 14309 * translates from the work handler and calls the fast-path handler.
1ba981fd 14310 **/
32517fc0
JS
14311static void
14312lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
1ba981fd 14313{
32517fc0
JS
14314 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14315 struct lpfc_queue, sched_irqwork);
1ba981fd 14316
32517fc0 14317 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14318}
14319
4f774513 14320/**
67d12733 14321 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
14322 * @irq: Interrupt number.
14323 * @dev_id: The device context pointer.
14324 *
14325 * This function is directly called from the PCI layer as an interrupt
14326 * service routine when device with SLI-4 interface spec is enabled with
14327 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14328 * ring event in the HBA. However, when the device is enabled with either
14329 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14330 * device-level interrupt handler. When the PCI slot is in error recovery
14331 * or the HBA is undergoing initialization, the interrupt handler will not
14332 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14333 * the intrrupt context. This function is called without any lock held.
14334 * It gets the hbalock to access and update SLI data structures. Note that,
14335 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14336 * equal to that of FCP CQ index.
14337 *
67d12733
JS
14338 * The link attention and ELS ring attention events are handled
14339 * by the worker thread. The interrupt handler signals the worker thread
14340 * and returns for these events. This function is called without any lock
14341 * held. It gets the hbalock to access and update SLI data structures.
14342 *
4f774513
JS
14343 * This function returns IRQ_HANDLED when interrupt is handled else it
14344 * returns IRQ_NONE.
14345 **/
14346irqreturn_t
67d12733 14347lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
14348{
14349 struct lpfc_hba *phba;
895427bd 14350 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513 14351 struct lpfc_queue *fpeq;
4f774513
JS
14352 unsigned long iflag;
14353 int ecount = 0;
895427bd 14354 int hba_eqidx;
32517fc0
JS
14355 struct lpfc_eq_intr_info *eqi;
14356 uint32_t icnt;
4f774513
JS
14357
14358 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14359 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14360 phba = hba_eq_hdl->phba;
14361 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
14362
14363 if (unlikely(!phba))
14364 return IRQ_NONE;
cdb42bec 14365 if (unlikely(!phba->sli4_hba.hdwq))
5350d872 14366 return IRQ_NONE;
4f774513
JS
14367
14368 /* Get to the EQ struct associated with this vector */
cdb42bec 14369 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
2e90f4b5
JS
14370 if (unlikely(!fpeq))
14371 return IRQ_NONE;
4f774513
JS
14372
14373 /* Check device state for handling interrupt */
14374 if (unlikely(lpfc_intr_state_check(phba))) {
14375 /* Check again for link_state with lock held */
14376 spin_lock_irqsave(&phba->hbalock, iflag);
14377 if (phba->link_state < LPFC_LINK_DOWN)
14378 /* Flush, clear interrupt, and rearm the EQ */
14379 lpfc_sli4_eq_flush(phba, fpeq);
14380 spin_unlock_irqrestore(&phba->hbalock, iflag);
14381 return IRQ_NONE;
14382 }
14383
32517fc0
JS
14384 eqi = phba->sli4_hba.eq_info;
14385 icnt = this_cpu_inc_return(eqi->icnt);
14386 fpeq->last_cpu = smp_processor_id();
4f774513 14387
32517fc0
JS
14388 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14389 phba->cfg_irq_chann == 1 &&
14390 phba->cfg_auto_imax &&
14391 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14392 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14393 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
b84daac9 14394
32517fc0
JS
14395 /* process and rearm the EQ */
14396 ecount = lpfc_sli4_process_eq(phba, fpeq);
4f774513
JS
14397
14398 if (unlikely(ecount == 0)) {
b84daac9 14399 fpeq->EQ_no_entry++;
4f774513
JS
14400 if (phba->intr_type == MSIX)
14401 /* MSI-X treated interrupt served as no EQ share INT */
14402 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14403 "0358 MSI-X interrupt with no EQE\n");
14404 else
14405 /* Non MSI-X treated on interrupt as EQ share INT */
14406 return IRQ_NONE;
14407 }
14408
14409 return IRQ_HANDLED;
14410} /* lpfc_sli4_fp_intr_handler */
14411
14412/**
14413 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14414 * @irq: Interrupt number.
14415 * @dev_id: The device context pointer.
14416 *
14417 * This function is the device-level interrupt handler to device with SLI-4
14418 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14419 * interrupt mode is enabled and there is an event in the HBA which requires
14420 * driver attention. This function invokes the slow-path interrupt attention
14421 * handling function and fast-path interrupt attention handling function in
14422 * turn to process the relevant HBA attention events. This function is called
14423 * without any lock held. It gets the hbalock to access and update SLI data
14424 * structures.
14425 *
14426 * This function returns IRQ_HANDLED when interrupt is handled, else it
14427 * returns IRQ_NONE.
14428 **/
14429irqreturn_t
14430lpfc_sli4_intr_handler(int irq, void *dev_id)
14431{
14432 struct lpfc_hba *phba;
67d12733
JS
14433 irqreturn_t hba_irq_rc;
14434 bool hba_handled = false;
895427bd 14435 int qidx;
4f774513
JS
14436
14437 /* Get the driver's phba structure from the dev_id */
14438 phba = (struct lpfc_hba *)dev_id;
14439
14440 if (unlikely(!phba))
14441 return IRQ_NONE;
14442
4f774513
JS
14443 /*
14444 * Invoke fast-path host attention interrupt handling as appropriate.
14445 */
6a828b0f 14446 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
67d12733 14447 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 14448 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733
JS
14449 if (hba_irq_rc == IRQ_HANDLED)
14450 hba_handled |= true;
4f774513
JS
14451 }
14452
67d12733 14453 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
14454} /* lpfc_sli4_intr_handler */
14455
14456/**
14457 * lpfc_sli4_queue_free - free a queue structure and associated memory
14458 * @queue: The queue structure to free.
14459 *
b595076a 14460 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
14461 * the host resident queue. This function must be called after destroying the
14462 * queue on the HBA.
14463 **/
14464void
14465lpfc_sli4_queue_free(struct lpfc_queue *queue)
14466{
14467 struct lpfc_dmabuf *dmabuf;
14468
14469 if (!queue)
14470 return;
14471
4645f7b5
JS
14472 if (!list_empty(&queue->wq_list))
14473 list_del(&queue->wq_list);
14474
4f774513
JS
14475 while (!list_empty(&queue->page_list)) {
14476 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14477 list);
81b96eda 14478 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
4f774513
JS
14479 dmabuf->virt, dmabuf->phys);
14480 kfree(dmabuf);
14481 }
895427bd
JS
14482 if (queue->rqbp) {
14483 lpfc_free_rq_buffer(queue->phba, queue);
14484 kfree(queue->rqbp);
14485 }
d1f525aa 14486
32517fc0
JS
14487 if (!list_empty(&queue->cpu_list))
14488 list_del(&queue->cpu_list);
14489
4f774513
JS
14490 kfree(queue);
14491 return;
14492}
14493
14494/**
14495 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14496 * @phba: The HBA that this queue is being created on.
81b96eda 14497 * @page_size: The size of a queue page
4f774513
JS
14498 * @entry_size: The size of each queue entry for this queue.
14499 * @entry count: The number of entries that this queue will handle.
c1a21ebc 14500 * @cpu: The cpu that will primarily utilize this queue.
4f774513
JS
14501 *
14502 * This function allocates a queue structure and the DMAable memory used for
14503 * the host resident queue. This function must be called before creating the
14504 * queue on the HBA.
14505 **/
14506struct lpfc_queue *
81b96eda 14507lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
c1a21ebc 14508 uint32_t entry_size, uint32_t entry_count, int cpu)
4f774513
JS
14509{
14510 struct lpfc_queue *queue;
14511 struct lpfc_dmabuf *dmabuf;
cb5172ea 14512 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9afbee3d 14513 uint16_t x, pgcnt;
4f774513 14514
cb5172ea 14515 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 14516 hw_page_size = page_size;
cb5172ea 14517
9afbee3d
JS
14518 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14519
14520 /* If needed, Adjust page count to match the max the adapter supports */
14521 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14522 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14523
c1a21ebc
JS
14524 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14525 GFP_KERNEL, cpu_to_node(cpu));
4f774513
JS
14526 if (!queue)
14527 return NULL;
895427bd 14528
4f774513 14529 INIT_LIST_HEAD(&queue->list);
895427bd 14530 INIT_LIST_HEAD(&queue->wq_list);
6e8e1c14 14531 INIT_LIST_HEAD(&queue->wqfull_list);
4f774513
JS
14532 INIT_LIST_HEAD(&queue->page_list);
14533 INIT_LIST_HEAD(&queue->child_list);
32517fc0 14534 INIT_LIST_HEAD(&queue->cpu_list);
81b96eda
JS
14535
14536 /* Set queue parameters now. If the system cannot provide memory
14537 * resources, the free routine needs to know what was allocated.
14538 */
9afbee3d
JS
14539 queue->page_count = pgcnt;
14540 queue->q_pgs = (void **)&queue[1];
14541 queue->entry_cnt_per_pg = hw_page_size / entry_size;
81b96eda
JS
14542 queue->entry_size = entry_size;
14543 queue->entry_count = entry_count;
14544 queue->page_size = hw_page_size;
14545 queue->phba = phba;
14546
9afbee3d 14547 for (x = 0; x < queue->page_count; x++) {
c1a21ebc
JS
14548 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14549 dev_to_node(&phba->pcidev->dev));
4f774513
JS
14550 if (!dmabuf)
14551 goto out_fail;
750afb08
LC
14552 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14553 hw_page_size, &dmabuf->phys,
14554 GFP_KERNEL);
4f774513
JS
14555 if (!dmabuf->virt) {
14556 kfree(dmabuf);
14557 goto out_fail;
14558 }
14559 dmabuf->buffer_tag = x;
14560 list_add_tail(&dmabuf->list, &queue->page_list);
9afbee3d
JS
14561 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14562 queue->q_pgs[x] = dmabuf->virt;
4f774513 14563 }
f485c18d
DK
14564 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14565 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
32517fc0
JS
14566 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14567 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
4f774513 14568
32517fc0 14569 /* notify_interval will be set during q creation */
64eb4dcb 14570
4f774513
JS
14571 return queue;
14572out_fail:
14573 lpfc_sli4_queue_free(queue);
14574 return NULL;
14575}
14576
962bc51b
JS
14577/**
14578 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14579 * @phba: HBA structure that indicates port to create a queue on.
14580 * @pci_barset: PCI BAR set flag.
14581 *
14582 * This function shall perform iomap of the specified PCI BAR address to host
14583 * memory address if not already done so and return it. The returned host
14584 * memory address can be NULL.
14585 */
14586static void __iomem *
14587lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14588{
962bc51b
JS
14589 if (!phba->pcidev)
14590 return NULL;
962bc51b
JS
14591
14592 switch (pci_barset) {
14593 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
14594 return phba->pci_bar0_memmap_p;
14595 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
14596 return phba->pci_bar2_memmap_p;
14597 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
14598 return phba->pci_bar4_memmap_p;
14599 default:
14600 break;
14601 }
14602 return NULL;
14603}
14604
173edbb2 14605/**
cb733e35
JS
14606 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14607 * @phba: HBA structure that EQs are on.
14608 * @startq: The starting EQ index to modify
14609 * @numq: The number of EQs (consecutive indexes) to modify
14610 * @usdelay: amount of delay
173edbb2 14611 *
cb733e35
JS
14612 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14613 * is set either by writing to a register (if supported by the SLI Port)
14614 * or by mailbox command. The mailbox command allows several EQs to be
14615 * updated at once.
173edbb2 14616 *
cb733e35
JS
14617 * The @phba struct is used to send a mailbox command to HBA. The @startq
14618 * is used to get the starting EQ index to change. The @numq value is
14619 * used to specify how many consecutive EQ indexes, starting at EQ index,
14620 * are to be changed. This function is asynchronous and will wait for any
14621 * mailbox commands to finish before returning.
173edbb2 14622 *
cb733e35
JS
14623 * On success this function will return a zero. If unable to allocate
14624 * enough memory this function will return -ENOMEM. If a mailbox command
14625 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14626 * have had their delay multipler changed.
173edbb2 14627 **/
cb733e35 14628void
0cf07f84 14629lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
cb733e35 14630 uint32_t numq, uint32_t usdelay)
173edbb2
JS
14631{
14632 struct lpfc_mbx_modify_eq_delay *eq_delay;
14633 LPFC_MBOXQ_t *mbox;
14634 struct lpfc_queue *eq;
cb733e35 14635 int cnt = 0, rc, length;
173edbb2 14636 uint32_t shdr_status, shdr_add_status;
cb733e35 14637 uint32_t dmult;
895427bd 14638 int qidx;
173edbb2 14639 union lpfc_sli4_cfg_shdr *shdr;
173edbb2 14640
6a828b0f 14641 if (startq >= phba->cfg_irq_chann)
cb733e35
JS
14642 return;
14643
14644 if (usdelay > 0xFFFF) {
14645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14646 "6429 usdelay %d too large. Scaled down to "
14647 "0xFFFF.\n", usdelay);
14648 usdelay = 0xFFFF;
14649 }
14650
14651 /* set values by EQ_DELAY register if supported */
14652 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14653 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14654 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
14655 if (!eq)
14656 continue;
14657
32517fc0 14658 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
cb733e35
JS
14659
14660 if (++cnt >= numq)
14661 break;
14662 }
14663
14664 return;
14665 }
14666
14667 /* Otherwise, set values by mailbox cmd */
173edbb2
JS
14668
14669 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cb733e35
JS
14670 if (!mbox) {
14671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14672 "6428 Failed allocating mailbox cmd buffer."
14673 " EQ delay was not set.\n");
14674 return;
14675 }
173edbb2
JS
14676 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14677 sizeof(struct lpfc_sli4_cfg_mhdr));
14678 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14679 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14680 length, LPFC_SLI4_MBX_EMBED);
14681 eq_delay = &mbox->u.mqe.un.eq_delay;
14682
14683 /* Calculate delay multiper from maximum interrupt per second */
cb733e35
JS
14684 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14685 if (dmult)
14686 dmult--;
0cf07f84
JS
14687 if (dmult > LPFC_DMULT_MAX)
14688 dmult = LPFC_DMULT_MAX;
173edbb2 14689
6a828b0f 14690 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
cdb42bec 14691 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
173edbb2
JS
14692 if (!eq)
14693 continue;
cb733e35 14694 eq->q_mode = usdelay;
173edbb2
JS
14695 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14696 eq_delay->u.request.eq[cnt].phase = 0;
14697 eq_delay->u.request.eq[cnt].delay_multi = dmult;
0cf07f84 14698
cb733e35 14699 if (++cnt >= numq)
173edbb2
JS
14700 break;
14701 }
14702 eq_delay->u.request.num_eq = cnt;
14703
14704 mbox->vport = phba->pport;
14705 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14706 mbox->ctx_buf = NULL;
14707 mbox->ctx_ndlp = NULL;
173edbb2
JS
14708 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14709 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14710 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14711 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14712 if (shdr_status || shdr_add_status || rc) {
14713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14714 "2512 MODIFY_EQ_DELAY mailbox failed with "
14715 "status x%x add_status x%x, mbx status x%x\n",
14716 shdr_status, shdr_add_status, rc);
173edbb2
JS
14717 }
14718 mempool_free(mbox, phba->mbox_mem_pool);
cb733e35 14719 return;
173edbb2
JS
14720}
14721
4f774513
JS
14722/**
14723 * lpfc_eq_create - Create an Event Queue on the HBA
14724 * @phba: HBA structure that indicates port to create a queue on.
14725 * @eq: The queue structure to use to create the event queue.
14726 * @imax: The maximum interrupt per second limit.
14727 *
14728 * This function creates an event queue, as detailed in @eq, on a port,
14729 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14730 *
14731 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14732 * is used to get the entry count and entry size that are necessary to
14733 * determine the number of pages to allocate and use for this queue. This
14734 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14735 * event queue. This function is asynchronous and will wait for the mailbox
14736 * command to finish before continuing.
14737 *
14738 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14739 * memory this function will return -ENOMEM. If the queue create mailbox command
14740 * fails this function will return -ENXIO.
4f774513 14741 **/
a2fc4aef 14742int
ee02006b 14743lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14744{
14745 struct lpfc_mbx_eq_create *eq_create;
14746 LPFC_MBOXQ_t *mbox;
14747 int rc, length, status = 0;
14748 struct lpfc_dmabuf *dmabuf;
14749 uint32_t shdr_status, shdr_add_status;
14750 union lpfc_sli4_cfg_shdr *shdr;
14751 uint16_t dmult;
49198b37
JS
14752 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14753
2e90f4b5
JS
14754 /* sanity check on queue memory */
14755 if (!eq)
14756 return -ENODEV;
49198b37
JS
14757 if (!phba->sli4_hba.pc_sli4_params.supported)
14758 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14759
14760 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14761 if (!mbox)
14762 return -ENOMEM;
14763 length = (sizeof(struct lpfc_mbx_eq_create) -
14764 sizeof(struct lpfc_sli4_cfg_mhdr));
14765 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14766 LPFC_MBOX_OPCODE_EQ_CREATE,
14767 length, LPFC_SLI4_MBX_EMBED);
14768 eq_create = &mbox->u.mqe.un.eq_create;
7365f6fd 14769 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
4f774513
JS
14770 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14771 eq->page_count);
14772 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14773 LPFC_EQE_SIZE);
14774 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
7365f6fd
JS
14775
14776 /* Use version 2 of CREATE_EQ if eqav is set */
14777 if (phba->sli4_hba.pc_sli4_params.eqav) {
14778 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14779 LPFC_Q_CREATE_VERSION_2);
14780 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14781 phba->sli4_hba.pc_sli4_params.eqav);
14782 }
14783
2c9c5a00
JS
14784 /* don't setup delay multiplier using EQ_CREATE */
14785 dmult = 0;
4f774513
JS
14786 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14787 dmult);
14788 switch (eq->entry_count) {
14789 default:
14790 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14791 "0360 Unsupported EQ count. (%d)\n",
14792 eq->entry_count);
14793 if (eq->entry_count < 256)
14794 return -EINVAL;
5bd5f66c 14795 /* fall through - otherwise default to smallest count */
4f774513
JS
14796 case 256:
14797 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14798 LPFC_EQ_CNT_256);
14799 break;
14800 case 512:
14801 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14802 LPFC_EQ_CNT_512);
14803 break;
14804 case 1024:
14805 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14806 LPFC_EQ_CNT_1024);
14807 break;
14808 case 2048:
14809 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14810 LPFC_EQ_CNT_2048);
14811 break;
14812 case 4096:
14813 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14814 LPFC_EQ_CNT_4096);
14815 break;
14816 }
14817 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14818 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14819 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14820 putPaddrLow(dmabuf->phys);
14821 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14822 putPaddrHigh(dmabuf->phys);
14823 }
14824 mbox->vport = phba->pport;
14825 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14826 mbox->ctx_buf = NULL;
14827 mbox->ctx_ndlp = NULL;
4f774513 14828 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4f774513
JS
14829 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14830 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14831 if (shdr_status || shdr_add_status || rc) {
14832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14833 "2500 EQ_CREATE mailbox failed with "
14834 "status x%x add_status x%x, mbx status x%x\n",
14835 shdr_status, shdr_add_status, rc);
14836 status = -ENXIO;
14837 }
14838 eq->type = LPFC_EQ;
14839 eq->subtype = LPFC_NONE;
14840 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14841 if (eq->queue_id == 0xFFFF)
14842 status = -ENXIO;
14843 eq->host_index = 0;
32517fc0
JS
14844 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14845 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
4f774513 14846
8fa38513 14847 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14848 return status;
14849}
14850
14851/**
14852 * lpfc_cq_create - Create a Completion Queue on the HBA
14853 * @phba: HBA structure that indicates port to create a queue on.
14854 * @cq: The queue structure to use to create the completion queue.
14855 * @eq: The event queue to bind this completion queue to.
14856 *
14857 * This function creates a completion queue, as detailed in @wq, on a port,
14858 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14859 *
14860 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14861 * is used to get the entry count and entry size that are necessary to
14862 * determine the number of pages to allocate and use for this queue. The @eq
14863 * is used to indicate which event queue to bind this completion queue to. This
14864 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14865 * completion queue. This function is asynchronous and will wait for the mailbox
14866 * command to finish before continuing.
14867 *
14868 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14869 * memory this function will return -ENOMEM. If the queue create mailbox command
14870 * fails this function will return -ENXIO.
4f774513 14871 **/
a2fc4aef 14872int
4f774513
JS
14873lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14874 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14875{
14876 struct lpfc_mbx_cq_create *cq_create;
14877 struct lpfc_dmabuf *dmabuf;
14878 LPFC_MBOXQ_t *mbox;
14879 int rc, length, status = 0;
14880 uint32_t shdr_status, shdr_add_status;
14881 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14882
2e90f4b5
JS
14883 /* sanity check on queue memory */
14884 if (!cq || !eq)
14885 return -ENODEV;
49198b37 14886
4f774513
JS
14887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14888 if (!mbox)
14889 return -ENOMEM;
14890 length = (sizeof(struct lpfc_mbx_cq_create) -
14891 sizeof(struct lpfc_sli4_cfg_mhdr));
14892 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14893 LPFC_MBOX_OPCODE_CQ_CREATE,
14894 length, LPFC_SLI4_MBX_EMBED);
14895 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 14896 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
14897 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14898 cq->page_count);
14899 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14900 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
14901 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14902 phba->sli4_hba.pc_sli4_params.cqv);
14903 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
81b96eda
JS
14904 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14905 (cq->page_size / SLI4_PAGE_SIZE));
5a6f133e
JS
14906 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14907 eq->queue_id);
7365f6fd
JS
14908 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14909 phba->sli4_hba.pc_sli4_params.cqav);
5a6f133e
JS
14910 } else {
14911 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14912 eq->queue_id);
14913 }
4f774513 14914 switch (cq->entry_count) {
81b96eda
JS
14915 case 2048:
14916 case 4096:
14917 if (phba->sli4_hba.pc_sli4_params.cqv ==
14918 LPFC_Q_CREATE_VERSION_2) {
14919 cq_create->u.request.context.lpfc_cq_context_count =
14920 cq->entry_count;
14921 bf_set(lpfc_cq_context_count,
14922 &cq_create->u.request.context,
14923 LPFC_CQ_CNT_WORD7);
14924 break;
14925 }
5bd5f66c 14926 /* fall through */
4f774513
JS
14927 default:
14928 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee 14929 "0361 Unsupported CQ count: "
64eb4dcb 14930 "entry cnt %d sz %d pg cnt %d\n",
2ea259ee 14931 cq->entry_count, cq->entry_size,
64eb4dcb 14932 cq->page_count);
4f4c1863
JS
14933 if (cq->entry_count < 256) {
14934 status = -EINVAL;
14935 goto out;
14936 }
5bd5f66c 14937 /* fall through - otherwise default to smallest count */
4f774513
JS
14938 case 256:
14939 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14940 LPFC_CQ_CNT_256);
14941 break;
14942 case 512:
14943 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14944 LPFC_CQ_CNT_512);
14945 break;
14946 case 1024:
14947 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14948 LPFC_CQ_CNT_1024);
14949 break;
14950 }
14951 list_for_each_entry(dmabuf, &cq->page_list, list) {
81b96eda 14952 memset(dmabuf->virt, 0, cq->page_size);
4f774513
JS
14953 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14954 putPaddrLow(dmabuf->phys);
14955 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14956 putPaddrHigh(dmabuf->phys);
14957 }
14958 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14959
14960 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
14961 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14962 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14963 if (shdr_status || shdr_add_status || rc) {
14964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14965 "2501 CQ_CREATE mailbox failed with "
14966 "status x%x add_status x%x, mbx status x%x\n",
14967 shdr_status, shdr_add_status, rc);
14968 status = -ENXIO;
14969 goto out;
14970 }
14971 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14972 if (cq->queue_id == 0xFFFF) {
14973 status = -ENXIO;
14974 goto out;
14975 }
14976 /* link the cq onto the parent eq child list */
14977 list_add_tail(&cq->list, &eq->child_list);
14978 /* Set up completion queue's type and subtype */
14979 cq->type = type;
14980 cq->subtype = subtype;
14981 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 14982 cq->assoc_qid = eq->queue_id;
6a828b0f 14983 cq->assoc_qp = eq;
4f774513 14984 cq->host_index = 0;
32517fc0
JS
14985 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14986 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
4f774513 14987
6a828b0f
JS
14988 if (cq->queue_id > phba->sli4_hba.cq_max)
14989 phba->sli4_hba.cq_max = cq->queue_id;
8fa38513
JS
14990out:
14991 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14992 return status;
14993}
14994
2d7dbc4c
JS
14995/**
14996 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14997 * @phba: HBA structure that indicates port to create a queue on.
14998 * @cqp: The queue structure array to use to create the completion queues.
cdb42bec 14999 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
2d7dbc4c
JS
15000 *
15001 * This function creates a set of completion queue, s to support MRQ
15002 * as detailed in @cqp, on a port,
15003 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15004 *
15005 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15006 * is used to get the entry count and entry size that are necessary to
15007 * determine the number of pages to allocate and use for this queue. The @eq
15008 * is used to indicate which event queue to bind this completion queue to. This
15009 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15010 * completion queue. This function is asynchronous and will wait for the mailbox
15011 * command to finish before continuing.
15012 *
15013 * On success this function will return a zero. If unable to allocate enough
15014 * memory this function will return -ENOMEM. If the queue create mailbox command
15015 * fails this function will return -ENXIO.
15016 **/
15017int
15018lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cdb42bec
JS
15019 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15020 uint32_t subtype)
2d7dbc4c
JS
15021{
15022 struct lpfc_queue *cq;
15023 struct lpfc_queue *eq;
15024 struct lpfc_mbx_cq_create_set *cq_set;
15025 struct lpfc_dmabuf *dmabuf;
15026 LPFC_MBOXQ_t *mbox;
15027 int rc, length, alloclen, status = 0;
15028 int cnt, idx, numcq, page_idx = 0;
15029 uint32_t shdr_status, shdr_add_status;
15030 union lpfc_sli4_cfg_shdr *shdr;
15031 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15032
15033 /* sanity check on queue memory */
15034 numcq = phba->cfg_nvmet_mrq;
cdb42bec 15035 if (!cqp || !hdwq || !numcq)
2d7dbc4c 15036 return -ENODEV;
2d7dbc4c
JS
15037
15038 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15039 if (!mbox)
15040 return -ENOMEM;
15041
15042 length = sizeof(struct lpfc_mbx_cq_create_set);
15043 length += ((numcq * cqp[0]->page_count) *
15044 sizeof(struct dma_address));
15045 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15046 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15047 LPFC_SLI4_MBX_NEMBED);
15048 if (alloclen < length) {
15049 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15050 "3098 Allocated DMA memory size (%d) is "
15051 "less than the requested DMA memory size "
15052 "(%d)\n", alloclen, length);
15053 status = -ENOMEM;
15054 goto out;
15055 }
15056 cq_set = mbox->sge_array->addr[0];
15057 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15058 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15059
15060 for (idx = 0; idx < numcq; idx++) {
15061 cq = cqp[idx];
cdb42bec 15062 eq = hdwq[idx].hba_eq;
2d7dbc4c
JS
15063 if (!cq || !eq) {
15064 status = -ENOMEM;
15065 goto out;
15066 }
81b96eda
JS
15067 if (!phba->sli4_hba.pc_sli4_params.supported)
15068 hw_page_size = cq->page_size;
2d7dbc4c
JS
15069
15070 switch (idx) {
15071 case 0:
15072 bf_set(lpfc_mbx_cq_create_set_page_size,
15073 &cq_set->u.request,
15074 (hw_page_size / SLI4_PAGE_SIZE));
15075 bf_set(lpfc_mbx_cq_create_set_num_pages,
15076 &cq_set->u.request, cq->page_count);
15077 bf_set(lpfc_mbx_cq_create_set_evt,
15078 &cq_set->u.request, 1);
15079 bf_set(lpfc_mbx_cq_create_set_valid,
15080 &cq_set->u.request, 1);
15081 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15082 &cq_set->u.request, 0);
15083 bf_set(lpfc_mbx_cq_create_set_num_cq,
15084 &cq_set->u.request, numcq);
7365f6fd
JS
15085 bf_set(lpfc_mbx_cq_create_set_autovalid,
15086 &cq_set->u.request,
15087 phba->sli4_hba.pc_sli4_params.cqav);
2d7dbc4c 15088 switch (cq->entry_count) {
81b96eda
JS
15089 case 2048:
15090 case 4096:
15091 if (phba->sli4_hba.pc_sli4_params.cqv ==
15092 LPFC_Q_CREATE_VERSION_2) {
15093 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15094 &cq_set->u.request,
15095 cq->entry_count);
15096 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15097 &cq_set->u.request,
15098 LPFC_CQ_CNT_WORD7);
15099 break;
15100 }
5bd5f66c 15101 /* fall through */
2d7dbc4c
JS
15102 default:
15103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15104 "3118 Bad CQ count. (%d)\n",
15105 cq->entry_count);
15106 if (cq->entry_count < 256) {
15107 status = -EINVAL;
15108 goto out;
15109 }
5bd5f66c 15110 /* fall through - otherwise default to smallest */
2d7dbc4c
JS
15111 case 256:
15112 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15113 &cq_set->u.request, LPFC_CQ_CNT_256);
15114 break;
15115 case 512:
15116 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15117 &cq_set->u.request, LPFC_CQ_CNT_512);
15118 break;
15119 case 1024:
15120 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15121 &cq_set->u.request, LPFC_CQ_CNT_1024);
15122 break;
15123 }
15124 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15125 &cq_set->u.request, eq->queue_id);
15126 break;
15127 case 1:
15128 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15129 &cq_set->u.request, eq->queue_id);
15130 break;
15131 case 2:
15132 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15133 &cq_set->u.request, eq->queue_id);
15134 break;
15135 case 3:
15136 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15137 &cq_set->u.request, eq->queue_id);
15138 break;
15139 case 4:
15140 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15141 &cq_set->u.request, eq->queue_id);
15142 break;
15143 case 5:
15144 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15145 &cq_set->u.request, eq->queue_id);
15146 break;
15147 case 6:
15148 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15149 &cq_set->u.request, eq->queue_id);
15150 break;
15151 case 7:
15152 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15153 &cq_set->u.request, eq->queue_id);
15154 break;
15155 case 8:
15156 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15157 &cq_set->u.request, eq->queue_id);
15158 break;
15159 case 9:
15160 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15161 &cq_set->u.request, eq->queue_id);
15162 break;
15163 case 10:
15164 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15165 &cq_set->u.request, eq->queue_id);
15166 break;
15167 case 11:
15168 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15169 &cq_set->u.request, eq->queue_id);
15170 break;
15171 case 12:
15172 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15173 &cq_set->u.request, eq->queue_id);
15174 break;
15175 case 13:
15176 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15177 &cq_set->u.request, eq->queue_id);
15178 break;
15179 case 14:
15180 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15181 &cq_set->u.request, eq->queue_id);
15182 break;
15183 case 15:
15184 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15185 &cq_set->u.request, eq->queue_id);
15186 break;
15187 }
15188
15189 /* link the cq onto the parent eq child list */
15190 list_add_tail(&cq->list, &eq->child_list);
15191 /* Set up completion queue's type and subtype */
15192 cq->type = type;
15193 cq->subtype = subtype;
15194 cq->assoc_qid = eq->queue_id;
6a828b0f 15195 cq->assoc_qp = eq;
2d7dbc4c 15196 cq->host_index = 0;
32517fc0
JS
15197 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15198 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15199 cq->entry_count);
81b96eda 15200 cq->chann = idx;
2d7dbc4c
JS
15201
15202 rc = 0;
15203 list_for_each_entry(dmabuf, &cq->page_list, list) {
15204 memset(dmabuf->virt, 0, hw_page_size);
15205 cnt = page_idx + dmabuf->buffer_tag;
15206 cq_set->u.request.page[cnt].addr_lo =
15207 putPaddrLow(dmabuf->phys);
15208 cq_set->u.request.page[cnt].addr_hi =
15209 putPaddrHigh(dmabuf->phys);
15210 rc++;
15211 }
15212 page_idx += rc;
15213 }
15214
15215 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15216
15217 /* The IOCTL status is embedded in the mailbox subheader. */
15218 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15219 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15220 if (shdr_status || shdr_add_status || rc) {
15221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15222 "3119 CQ_CREATE_SET mailbox failed with "
15223 "status x%x add_status x%x, mbx status x%x\n",
15224 shdr_status, shdr_add_status, rc);
15225 status = -ENXIO;
15226 goto out;
15227 }
15228 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15229 if (rc == 0xFFFF) {
15230 status = -ENXIO;
15231 goto out;
15232 }
15233
15234 for (idx = 0; idx < numcq; idx++) {
15235 cq = cqp[idx];
15236 cq->queue_id = rc + idx;
6a828b0f
JS
15237 if (cq->queue_id > phba->sli4_hba.cq_max)
15238 phba->sli4_hba.cq_max = cq->queue_id;
2d7dbc4c
JS
15239 }
15240
15241out:
15242 lpfc_sli4_mbox_cmd_free(phba, mbox);
15243 return status;
15244}
15245
b19a061a
JS
15246/**
15247 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15248 * @phba: HBA structure that indicates port to create a queue on.
15249 * @mq: The queue structure to use to create the mailbox queue.
15250 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15251 * @cq: The completion queue to associate with this cq.
15252 *
15253 * This function provides failback (fb) functionality when the
15254 * mq_create_ext fails on older FW generations. It's purpose is identical
15255 * to mq_create_ext otherwise.
15256 *
15257 * This routine cannot fail as all attributes were previously accessed and
15258 * initialized in mq_create_ext.
15259 **/
15260static void
15261lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15262 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15263{
15264 struct lpfc_mbx_mq_create *mq_create;
15265 struct lpfc_dmabuf *dmabuf;
15266 int length;
15267
15268 length = (sizeof(struct lpfc_mbx_mq_create) -
15269 sizeof(struct lpfc_sli4_cfg_mhdr));
15270 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15271 LPFC_MBOX_OPCODE_MQ_CREATE,
15272 length, LPFC_SLI4_MBX_EMBED);
15273 mq_create = &mbox->u.mqe.un.mq_create;
15274 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15275 mq->page_count);
15276 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15277 cq->queue_id);
15278 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15279 switch (mq->entry_count) {
15280 case 16:
5a6f133e
JS
15281 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15282 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
15283 break;
15284 case 32:
5a6f133e
JS
15285 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15286 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
15287 break;
15288 case 64:
5a6f133e
JS
15289 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15290 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
15291 break;
15292 case 128:
5a6f133e
JS
15293 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15294 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
15295 break;
15296 }
15297 list_for_each_entry(dmabuf, &mq->page_list, list) {
15298 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15299 putPaddrLow(dmabuf->phys);
15300 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15301 putPaddrHigh(dmabuf->phys);
15302 }
15303}
15304
04c68496
JS
15305/**
15306 * lpfc_mq_create - Create a mailbox Queue on the HBA
15307 * @phba: HBA structure that indicates port to create a queue on.
15308 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
15309 * @cq: The completion queue to associate with this cq.
15310 * @subtype: The queue's subtype.
04c68496
JS
15311 *
15312 * This function creates a mailbox queue, as detailed in @mq, on a port,
15313 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15314 *
15315 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15316 * is used to get the entry count and entry size that are necessary to
15317 * determine the number of pages to allocate and use for this queue. This
15318 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15319 * mailbox queue. This function is asynchronous and will wait for the mailbox
15320 * command to finish before continuing.
15321 *
15322 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15323 * memory this function will return -ENOMEM. If the queue create mailbox command
15324 * fails this function will return -ENXIO.
04c68496 15325 **/
b19a061a 15326int32_t
04c68496
JS
15327lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15328 struct lpfc_queue *cq, uint32_t subtype)
15329{
15330 struct lpfc_mbx_mq_create *mq_create;
b19a061a 15331 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
15332 struct lpfc_dmabuf *dmabuf;
15333 LPFC_MBOXQ_t *mbox;
15334 int rc, length, status = 0;
15335 uint32_t shdr_status, shdr_add_status;
15336 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15337 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 15338
2e90f4b5
JS
15339 /* sanity check on queue memory */
15340 if (!mq || !cq)
15341 return -ENODEV;
49198b37
JS
15342 if (!phba->sli4_hba.pc_sli4_params.supported)
15343 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 15344
04c68496
JS
15345 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15346 if (!mbox)
15347 return -ENOMEM;
b19a061a 15348 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
15349 sizeof(struct lpfc_sli4_cfg_mhdr));
15350 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 15351 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 15352 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
15353
15354 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 15355 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
15356 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15357 &mq_create_ext->u.request, mq->page_count);
15358 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15359 &mq_create_ext->u.request, 1);
15360 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
15361 &mq_create_ext->u.request, 1);
15362 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15363 &mq_create_ext->u.request, 1);
70f3c073
JS
15364 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15365 &mq_create_ext->u.request, 1);
15366 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15367 &mq_create_ext->u.request, 1);
b19a061a 15368 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
15369 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15370 phba->sli4_hba.pc_sli4_params.mqv);
15371 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15372 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15373 cq->queue_id);
15374 else
15375 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15376 cq->queue_id);
04c68496
JS
15377 switch (mq->entry_count) {
15378 default:
15379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15380 "0362 Unsupported MQ count. (%d)\n",
15381 mq->entry_count);
4f4c1863
JS
15382 if (mq->entry_count < 16) {
15383 status = -EINVAL;
15384 goto out;
15385 }
5bd5f66c 15386 /* fall through - otherwise default to smallest count */
04c68496 15387 case 16:
5a6f133e
JS
15388 bf_set(lpfc_mq_context_ring_size,
15389 &mq_create_ext->u.request.context,
15390 LPFC_MQ_RING_SIZE_16);
04c68496
JS
15391 break;
15392 case 32:
5a6f133e
JS
15393 bf_set(lpfc_mq_context_ring_size,
15394 &mq_create_ext->u.request.context,
15395 LPFC_MQ_RING_SIZE_32);
04c68496
JS
15396 break;
15397 case 64:
5a6f133e
JS
15398 bf_set(lpfc_mq_context_ring_size,
15399 &mq_create_ext->u.request.context,
15400 LPFC_MQ_RING_SIZE_64);
04c68496
JS
15401 break;
15402 case 128:
5a6f133e
JS
15403 bf_set(lpfc_mq_context_ring_size,
15404 &mq_create_ext->u.request.context,
15405 LPFC_MQ_RING_SIZE_128);
04c68496
JS
15406 break;
15407 }
15408 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 15409 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 15410 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 15411 putPaddrLow(dmabuf->phys);
b19a061a 15412 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
15413 putPaddrHigh(dmabuf->phys);
15414 }
15415 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
15416 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15417 &mq_create_ext->u.response);
15418 if (rc != MBX_SUCCESS) {
15419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15420 "2795 MQ_CREATE_EXT failed with "
15421 "status x%x. Failback to MQ_CREATE.\n",
15422 rc);
15423 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15424 mq_create = &mbox->u.mqe.un.mq_create;
15425 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15426 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15427 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15428 &mq_create->u.response);
15429 }
15430
04c68496 15431 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
15432 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15433 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15434 if (shdr_status || shdr_add_status || rc) {
15435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15436 "2502 MQ_CREATE mailbox failed with "
15437 "status x%x add_status x%x, mbx status x%x\n",
15438 shdr_status, shdr_add_status, rc);
15439 status = -ENXIO;
15440 goto out;
15441 }
04c68496
JS
15442 if (mq->queue_id == 0xFFFF) {
15443 status = -ENXIO;
15444 goto out;
15445 }
15446 mq->type = LPFC_MQ;
2a622bfb 15447 mq->assoc_qid = cq->queue_id;
04c68496
JS
15448 mq->subtype = subtype;
15449 mq->host_index = 0;
15450 mq->hba_index = 0;
15451
15452 /* link the mq onto the parent cq child list */
15453 list_add_tail(&mq->list, &cq->child_list);
15454out:
8fa38513 15455 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
15456 return status;
15457}
15458
4f774513
JS
15459/**
15460 * lpfc_wq_create - Create a Work Queue on the HBA
15461 * @phba: HBA structure that indicates port to create a queue on.
15462 * @wq: The queue structure to use to create the work queue.
15463 * @cq: The completion queue to bind this work queue to.
15464 * @subtype: The subtype of the work queue indicating its functionality.
15465 *
15466 * This function creates a work queue, as detailed in @wq, on a port, described
15467 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15468 *
15469 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15470 * is used to get the entry count and entry size that are necessary to
15471 * determine the number of pages to allocate and use for this queue. The @cq
15472 * is used to indicate which completion queue to bind this work queue to. This
15473 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15474 * work queue. This function is asynchronous and will wait for the mailbox
15475 * command to finish before continuing.
15476 *
15477 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15478 * memory this function will return -ENOMEM. If the queue create mailbox command
15479 * fails this function will return -ENXIO.
4f774513 15480 **/
a2fc4aef 15481int
4f774513
JS
15482lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15483 struct lpfc_queue *cq, uint32_t subtype)
15484{
15485 struct lpfc_mbx_wq_create *wq_create;
15486 struct lpfc_dmabuf *dmabuf;
15487 LPFC_MBOXQ_t *mbox;
15488 int rc, length, status = 0;
15489 uint32_t shdr_status, shdr_add_status;
15490 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15491 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 15492 struct dma_address *page;
962bc51b
JS
15493 void __iomem *bar_memmap_p;
15494 uint32_t db_offset;
15495 uint16_t pci_barset;
1351e69f
JS
15496 uint8_t dpp_barset;
15497 uint32_t dpp_offset;
15498 unsigned long pg_addr;
81b96eda 15499 uint8_t wq_create_version;
49198b37 15500
2e90f4b5
JS
15501 /* sanity check on queue memory */
15502 if (!wq || !cq)
15503 return -ENODEV;
49198b37 15504 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 15505 hw_page_size = wq->page_size;
4f774513
JS
15506
15507 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15508 if (!mbox)
15509 return -ENOMEM;
15510 length = (sizeof(struct lpfc_mbx_wq_create) -
15511 sizeof(struct lpfc_sli4_cfg_mhdr));
15512 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15513 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15514 length, LPFC_SLI4_MBX_EMBED);
15515 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 15516 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
15517 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15518 wq->page_count);
15519 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15520 cq->queue_id);
0c651878
JS
15521
15522 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
15523 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15524 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 15525
c176ffa0
JS
15526 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15527 (wq->page_size > SLI4_PAGE_SIZE))
81b96eda
JS
15528 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15529 else
15530 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15531
0c651878 15532
1351e69f
JS
15533 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15534 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15535 else
15536 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15537
15538 switch (wq_create_version) {
0c651878 15539 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
15540 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15541 wq->entry_count);
3f247de7
JS
15542 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15543 LPFC_Q_CREATE_VERSION_1);
15544
5a6f133e
JS
15545 switch (wq->entry_size) {
15546 default:
15547 case 64:
15548 bf_set(lpfc_mbx_wq_create_wqe_size,
15549 &wq_create->u.request_1,
15550 LPFC_WQ_WQE_SIZE_64);
15551 break;
15552 case 128:
15553 bf_set(lpfc_mbx_wq_create_wqe_size,
15554 &wq_create->u.request_1,
15555 LPFC_WQ_WQE_SIZE_128);
15556 break;
15557 }
1351e69f
JS
15558 /* Request DPP by default */
15559 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
8ea73db4
JS
15560 bf_set(lpfc_mbx_wq_create_page_size,
15561 &wq_create->u.request_1,
81b96eda 15562 (wq->page_size / SLI4_PAGE_SIZE));
5a6f133e 15563 page = wq_create->u.request_1.page;
0c651878
JS
15564 break;
15565 default:
1351e69f
JS
15566 page = wq_create->u.request.page;
15567 break;
5a6f133e 15568 }
0c651878 15569
4f774513 15570 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 15571 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
15572 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15573 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 15574 }
962bc51b
JS
15575
15576 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15577 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15578
4f774513
JS
15579 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15580 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15581 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15582 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15583 if (shdr_status || shdr_add_status || rc) {
15584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15585 "2503 WQ_CREATE mailbox failed with "
15586 "status x%x add_status x%x, mbx status x%x\n",
15587 shdr_status, shdr_add_status, rc);
15588 status = -ENXIO;
15589 goto out;
15590 }
1351e69f
JS
15591
15592 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15593 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15594 &wq_create->u.response);
15595 else
15596 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15597 &wq_create->u.response_1);
15598
4f774513
JS
15599 if (wq->queue_id == 0xFFFF) {
15600 status = -ENXIO;
15601 goto out;
15602 }
1351e69f
JS
15603
15604 wq->db_format = LPFC_DB_LIST_FORMAT;
15605 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15606 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15607 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15608 &wq_create->u.response);
15609 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15610 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15612 "3265 WQ[%d] doorbell format "
15613 "not supported: x%x\n",
15614 wq->queue_id, wq->db_format);
15615 status = -EINVAL;
15616 goto out;
15617 }
15618 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15619 &wq_create->u.response);
15620 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15621 pci_barset);
15622 if (!bar_memmap_p) {
15623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15624 "3263 WQ[%d] failed to memmap "
15625 "pci barset:x%x\n",
15626 wq->queue_id, pci_barset);
15627 status = -ENOMEM;
15628 goto out;
15629 }
15630 db_offset = wq_create->u.response.doorbell_offset;
15631 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15632 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15634 "3252 WQ[%d] doorbell offset "
15635 "not supported: x%x\n",
15636 wq->queue_id, db_offset);
15637 status = -EINVAL;
15638 goto out;
15639 }
15640 wq->db_regaddr = bar_memmap_p + db_offset;
15641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15642 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15643 "format:x%x\n", wq->queue_id,
15644 pci_barset, db_offset, wq->db_format);
15645 } else
15646 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15647 } else {
1351e69f
JS
15648 /* Check if DPP was honored by the firmware */
15649 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15650 &wq_create->u.response_1);
15651 if (wq->dpp_enable) {
15652 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15653 &wq_create->u.response_1);
15654 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15655 pci_barset);
15656 if (!bar_memmap_p) {
15657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15658 "3267 WQ[%d] failed to memmap "
15659 "pci barset:x%x\n",
15660 wq->queue_id, pci_barset);
15661 status = -ENOMEM;
15662 goto out;
15663 }
15664 db_offset = wq_create->u.response_1.doorbell_offset;
15665 wq->db_regaddr = bar_memmap_p + db_offset;
15666 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15667 &wq_create->u.response_1);
15668 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15669 &wq_create->u.response_1);
15670 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15671 dpp_barset);
15672 if (!bar_memmap_p) {
15673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15674 "3268 WQ[%d] failed to memmap "
15675 "pci barset:x%x\n",
15676 wq->queue_id, dpp_barset);
15677 status = -ENOMEM;
15678 goto out;
15679 }
15680 dpp_offset = wq_create->u.response_1.dpp_offset;
15681 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15682 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15683 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15684 "dpp_id:x%x dpp_barset:x%x "
15685 "dpp_offset:x%x\n",
15686 wq->queue_id, pci_barset, db_offset,
15687 wq->dpp_id, dpp_barset, dpp_offset);
15688
15689 /* Enable combined writes for DPP aperture */
15690 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15691#ifdef CONFIG_X86
15692 rc = set_memory_wc(pg_addr, 1);
15693 if (rc) {
15694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15695 "3272 Cannot setup Combined "
15696 "Write on WQ[%d] - disable DPP\n",
15697 wq->queue_id);
15698 phba->cfg_enable_dpp = 0;
15699 }
15700#else
15701 phba->cfg_enable_dpp = 0;
15702#endif
15703 } else
15704 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15705 }
895427bd
JS
15706 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15707 if (wq->pring == NULL) {
15708 status = -ENOMEM;
15709 goto out;
15710 }
4f774513 15711 wq->type = LPFC_WQ;
2a622bfb 15712 wq->assoc_qid = cq->queue_id;
4f774513
JS
15713 wq->subtype = subtype;
15714 wq->host_index = 0;
15715 wq->hba_index = 0;
32517fc0 15716 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
4f774513
JS
15717
15718 /* link the wq onto the parent cq child list */
15719 list_add_tail(&wq->list, &cq->child_list);
15720out:
8fa38513 15721 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15722 return status;
15723}
15724
15725/**
15726 * lpfc_rq_create - Create a Receive Queue on the HBA
15727 * @phba: HBA structure that indicates port to create a queue on.
15728 * @hrq: The queue structure to use to create the header receive queue.
15729 * @drq: The queue structure to use to create the data receive queue.
15730 * @cq: The completion queue to bind this work queue to.
15731 *
15732 * This function creates a receive buffer queue pair , as detailed in @hrq and
15733 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15734 * to the HBA.
15735 *
15736 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15737 * struct is used to get the entry count that is necessary to determine the
15738 * number of pages to use for this queue. The @cq is used to indicate which
15739 * completion queue to bind received buffers that are posted to these queues to.
15740 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15741 * receive queue pair. This function is asynchronous and will wait for the
15742 * mailbox command to finish before continuing.
15743 *
15744 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15745 * memory this function will return -ENOMEM. If the queue create mailbox command
15746 * fails this function will return -ENXIO.
4f774513 15747 **/
a2fc4aef 15748int
4f774513
JS
15749lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15750 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15751{
15752 struct lpfc_mbx_rq_create *rq_create;
15753 struct lpfc_dmabuf *dmabuf;
15754 LPFC_MBOXQ_t *mbox;
15755 int rc, length, status = 0;
15756 uint32_t shdr_status, shdr_add_status;
15757 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15758 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
15759 void __iomem *bar_memmap_p;
15760 uint32_t db_offset;
15761 uint16_t pci_barset;
49198b37 15762
2e90f4b5
JS
15763 /* sanity check on queue memory */
15764 if (!hrq || !drq || !cq)
15765 return -ENODEV;
49198b37
JS
15766 if (!phba->sli4_hba.pc_sli4_params.supported)
15767 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
15768
15769 if (hrq->entry_count != drq->entry_count)
15770 return -EINVAL;
15771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15772 if (!mbox)
15773 return -ENOMEM;
15774 length = (sizeof(struct lpfc_mbx_rq_create) -
15775 sizeof(struct lpfc_sli4_cfg_mhdr));
15776 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15777 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15778 length, LPFC_SLI4_MBX_EMBED);
15779 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
15780 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15781 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15782 phba->sli4_hba.pc_sli4_params.rqv);
15783 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15784 bf_set(lpfc_rq_context_rqe_count_1,
15785 &rq_create->u.request.context,
15786 hrq->entry_count);
15787 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
15788 bf_set(lpfc_rq_context_rqe_size,
15789 &rq_create->u.request.context,
15790 LPFC_RQE_SIZE_8);
15791 bf_set(lpfc_rq_context_page_size,
15792 &rq_create->u.request.context,
8ea73db4 15793 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
15794 } else {
15795 switch (hrq->entry_count) {
15796 default:
15797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15798 "2535 Unsupported RQ count. (%d)\n",
15799 hrq->entry_count);
4f4c1863
JS
15800 if (hrq->entry_count < 512) {
15801 status = -EINVAL;
15802 goto out;
15803 }
5bd5f66c 15804 /* fall through - otherwise default to smallest count */
5a6f133e
JS
15805 case 512:
15806 bf_set(lpfc_rq_context_rqe_count,
15807 &rq_create->u.request.context,
15808 LPFC_RQ_RING_SIZE_512);
15809 break;
15810 case 1024:
15811 bf_set(lpfc_rq_context_rqe_count,
15812 &rq_create->u.request.context,
15813 LPFC_RQ_RING_SIZE_1024);
15814 break;
15815 case 2048:
15816 bf_set(lpfc_rq_context_rqe_count,
15817 &rq_create->u.request.context,
15818 LPFC_RQ_RING_SIZE_2048);
15819 break;
15820 case 4096:
15821 bf_set(lpfc_rq_context_rqe_count,
15822 &rq_create->u.request.context,
15823 LPFC_RQ_RING_SIZE_4096);
15824 break;
15825 }
15826 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15827 LPFC_HDR_BUF_SIZE);
4f774513
JS
15828 }
15829 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15830 cq->queue_id);
15831 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15832 hrq->page_count);
4f774513 15833 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 15834 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
15835 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15836 putPaddrLow(dmabuf->phys);
15837 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15838 putPaddrHigh(dmabuf->phys);
15839 }
962bc51b
JS
15840 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15841 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15842
4f774513
JS
15843 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15844 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15847 if (shdr_status || shdr_add_status || rc) {
15848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15849 "2504 RQ_CREATE mailbox failed with "
15850 "status x%x add_status x%x, mbx status x%x\n",
15851 shdr_status, shdr_add_status, rc);
15852 status = -ENXIO;
15853 goto out;
15854 }
15855 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15856 if (hrq->queue_id == 0xFFFF) {
15857 status = -ENXIO;
15858 goto out;
15859 }
962bc51b
JS
15860
15861 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15862 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15863 &rq_create->u.response);
15864 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15865 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15867 "3262 RQ [%d] doorbell format not "
15868 "supported: x%x\n", hrq->queue_id,
15869 hrq->db_format);
15870 status = -EINVAL;
15871 goto out;
15872 }
15873
15874 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15875 &rq_create->u.response);
15876 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15877 if (!bar_memmap_p) {
15878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15879 "3269 RQ[%d] failed to memmap pci "
15880 "barset:x%x\n", hrq->queue_id,
15881 pci_barset);
15882 status = -ENOMEM;
15883 goto out;
15884 }
15885
15886 db_offset = rq_create->u.response.doorbell_offset;
15887 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15888 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15890 "3270 RQ[%d] doorbell offset not "
15891 "supported: x%x\n", hrq->queue_id,
15892 db_offset);
15893 status = -EINVAL;
15894 goto out;
15895 }
15896 hrq->db_regaddr = bar_memmap_p + db_offset;
15897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
15898 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15899 "format:x%x\n", hrq->queue_id, pci_barset,
15900 db_offset, hrq->db_format);
962bc51b
JS
15901 } else {
15902 hrq->db_format = LPFC_DB_RING_FORMAT;
15903 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15904 }
4f774513 15905 hrq->type = LPFC_HRQ;
2a622bfb 15906 hrq->assoc_qid = cq->queue_id;
4f774513
JS
15907 hrq->subtype = subtype;
15908 hrq->host_index = 0;
15909 hrq->hba_index = 0;
32517fc0 15910 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
15911
15912 /* now create the data queue */
15913 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15914 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15915 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
15916 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15917 phba->sli4_hba.pc_sli4_params.rqv);
15918 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15919 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 15920 &rq_create->u.request.context, hrq->entry_count);
3c603be9
JS
15921 if (subtype == LPFC_NVMET)
15922 rq_create->u.request.context.buffer_size =
15923 LPFC_NVMET_DATA_BUF_SIZE;
15924 else
15925 rq_create->u.request.context.buffer_size =
15926 LPFC_DATA_BUF_SIZE;
c31098ce
JS
15927 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15928 LPFC_RQE_SIZE_8);
15929 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15930 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
15931 } else {
15932 switch (drq->entry_count) {
15933 default:
15934 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15935 "2536 Unsupported RQ count. (%d)\n",
15936 drq->entry_count);
4f4c1863
JS
15937 if (drq->entry_count < 512) {
15938 status = -EINVAL;
15939 goto out;
15940 }
5bd5f66c 15941 /* fall through - otherwise default to smallest count */
5a6f133e
JS
15942 case 512:
15943 bf_set(lpfc_rq_context_rqe_count,
15944 &rq_create->u.request.context,
15945 LPFC_RQ_RING_SIZE_512);
15946 break;
15947 case 1024:
15948 bf_set(lpfc_rq_context_rqe_count,
15949 &rq_create->u.request.context,
15950 LPFC_RQ_RING_SIZE_1024);
15951 break;
15952 case 2048:
15953 bf_set(lpfc_rq_context_rqe_count,
15954 &rq_create->u.request.context,
15955 LPFC_RQ_RING_SIZE_2048);
15956 break;
15957 case 4096:
15958 bf_set(lpfc_rq_context_rqe_count,
15959 &rq_create->u.request.context,
15960 LPFC_RQ_RING_SIZE_4096);
15961 break;
15962 }
3c603be9
JS
15963 if (subtype == LPFC_NVMET)
15964 bf_set(lpfc_rq_context_buf_size,
15965 &rq_create->u.request.context,
15966 LPFC_NVMET_DATA_BUF_SIZE);
15967 else
15968 bf_set(lpfc_rq_context_buf_size,
15969 &rq_create->u.request.context,
15970 LPFC_DATA_BUF_SIZE);
4f774513
JS
15971 }
15972 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15973 cq->queue_id);
15974 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15975 drq->page_count);
4f774513
JS
15976 list_for_each_entry(dmabuf, &drq->page_list, list) {
15977 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15978 putPaddrLow(dmabuf->phys);
15979 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15980 putPaddrHigh(dmabuf->phys);
15981 }
962bc51b
JS
15982 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15983 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
15984 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15985 /* The IOCTL status is embedded in the mailbox subheader. */
15986 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15987 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15988 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15989 if (shdr_status || shdr_add_status || rc) {
15990 status = -ENXIO;
15991 goto out;
15992 }
15993 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15994 if (drq->queue_id == 0xFFFF) {
15995 status = -ENXIO;
15996 goto out;
15997 }
15998 drq->type = LPFC_DRQ;
2a622bfb 15999 drq->assoc_qid = cq->queue_id;
4f774513
JS
16000 drq->subtype = subtype;
16001 drq->host_index = 0;
16002 drq->hba_index = 0;
32517fc0 16003 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
16004
16005 /* link the header and data RQs onto the parent cq child list */
16006 list_add_tail(&hrq->list, &cq->child_list);
16007 list_add_tail(&drq->list, &cq->child_list);
16008
16009out:
8fa38513 16010 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
16011 return status;
16012}
16013
2d7dbc4c
JS
16014/**
16015 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16016 * @phba: HBA structure that indicates port to create a queue on.
16017 * @hrqp: The queue structure array to use to create the header receive queues.
16018 * @drqp: The queue structure array to use to create the data receive queues.
16019 * @cqp: The completion queue array to bind these receive queues to.
16020 *
16021 * This function creates a receive buffer queue pair , as detailed in @hrq and
16022 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16023 * to the HBA.
16024 *
16025 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16026 * struct is used to get the entry count that is necessary to determine the
16027 * number of pages to use for this queue. The @cq is used to indicate which
16028 * completion queue to bind received buffers that are posted to these queues to.
16029 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16030 * receive queue pair. This function is asynchronous and will wait for the
16031 * mailbox command to finish before continuing.
16032 *
16033 * On success this function will return a zero. If unable to allocate enough
16034 * memory this function will return -ENOMEM. If the queue create mailbox command
16035 * fails this function will return -ENXIO.
16036 **/
16037int
16038lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16039 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16040 uint32_t subtype)
16041{
16042 struct lpfc_queue *hrq, *drq, *cq;
16043 struct lpfc_mbx_rq_create_v2 *rq_create;
16044 struct lpfc_dmabuf *dmabuf;
16045 LPFC_MBOXQ_t *mbox;
16046 int rc, length, alloclen, status = 0;
16047 int cnt, idx, numrq, page_idx = 0;
16048 uint32_t shdr_status, shdr_add_status;
16049 union lpfc_sli4_cfg_shdr *shdr;
16050 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16051
16052 numrq = phba->cfg_nvmet_mrq;
16053 /* sanity check on array memory */
16054 if (!hrqp || !drqp || !cqp || !numrq)
16055 return -ENODEV;
16056 if (!phba->sli4_hba.pc_sli4_params.supported)
16057 hw_page_size = SLI4_PAGE_SIZE;
16058
16059 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16060 if (!mbox)
16061 return -ENOMEM;
16062
16063 length = sizeof(struct lpfc_mbx_rq_create_v2);
16064 length += ((2 * numrq * hrqp[0]->page_count) *
16065 sizeof(struct dma_address));
16066
16067 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16068 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16069 LPFC_SLI4_MBX_NEMBED);
16070 if (alloclen < length) {
16071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16072 "3099 Allocated DMA memory size (%d) is "
16073 "less than the requested DMA memory size "
16074 "(%d)\n", alloclen, length);
16075 status = -ENOMEM;
16076 goto out;
16077 }
16078
16079
16080
16081 rq_create = mbox->sge_array->addr[0];
16082 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16083
16084 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16085 cnt = 0;
16086
16087 for (idx = 0; idx < numrq; idx++) {
16088 hrq = hrqp[idx];
16089 drq = drqp[idx];
16090 cq = cqp[idx];
16091
2d7dbc4c
JS
16092 /* sanity check on queue memory */
16093 if (!hrq || !drq || !cq) {
16094 status = -ENODEV;
16095 goto out;
16096 }
16097
7aabe84b
JS
16098 if (hrq->entry_count != drq->entry_count) {
16099 status = -EINVAL;
16100 goto out;
16101 }
16102
2d7dbc4c
JS
16103 if (idx == 0) {
16104 bf_set(lpfc_mbx_rq_create_num_pages,
16105 &rq_create->u.request,
16106 hrq->page_count);
16107 bf_set(lpfc_mbx_rq_create_rq_cnt,
16108 &rq_create->u.request, (numrq * 2));
16109 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16110 1);
16111 bf_set(lpfc_rq_context_base_cq,
16112 &rq_create->u.request.context,
16113 cq->queue_id);
16114 bf_set(lpfc_rq_context_data_size,
16115 &rq_create->u.request.context,
3c603be9 16116 LPFC_NVMET_DATA_BUF_SIZE);
2d7dbc4c
JS
16117 bf_set(lpfc_rq_context_hdr_size,
16118 &rq_create->u.request.context,
16119 LPFC_HDR_BUF_SIZE);
16120 bf_set(lpfc_rq_context_rqe_count_1,
16121 &rq_create->u.request.context,
16122 hrq->entry_count);
16123 bf_set(lpfc_rq_context_rqe_size,
16124 &rq_create->u.request.context,
16125 LPFC_RQE_SIZE_8);
16126 bf_set(lpfc_rq_context_page_size,
16127 &rq_create->u.request.context,
16128 (PAGE_SIZE/SLI4_PAGE_SIZE));
16129 }
16130 rc = 0;
16131 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16132 memset(dmabuf->virt, 0, hw_page_size);
16133 cnt = page_idx + dmabuf->buffer_tag;
16134 rq_create->u.request.page[cnt].addr_lo =
16135 putPaddrLow(dmabuf->phys);
16136 rq_create->u.request.page[cnt].addr_hi =
16137 putPaddrHigh(dmabuf->phys);
16138 rc++;
16139 }
16140 page_idx += rc;
16141
16142 rc = 0;
16143 list_for_each_entry(dmabuf, &drq->page_list, list) {
16144 memset(dmabuf->virt, 0, hw_page_size);
16145 cnt = page_idx + dmabuf->buffer_tag;
16146 rq_create->u.request.page[cnt].addr_lo =
16147 putPaddrLow(dmabuf->phys);
16148 rq_create->u.request.page[cnt].addr_hi =
16149 putPaddrHigh(dmabuf->phys);
16150 rc++;
16151 }
16152 page_idx += rc;
16153
16154 hrq->db_format = LPFC_DB_RING_FORMAT;
16155 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16156 hrq->type = LPFC_HRQ;
16157 hrq->assoc_qid = cq->queue_id;
16158 hrq->subtype = subtype;
16159 hrq->host_index = 0;
16160 hrq->hba_index = 0;
32517fc0 16161 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16162
16163 drq->db_format = LPFC_DB_RING_FORMAT;
16164 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16165 drq->type = LPFC_DRQ;
16166 drq->assoc_qid = cq->queue_id;
16167 drq->subtype = subtype;
16168 drq->host_index = 0;
16169 drq->hba_index = 0;
32517fc0 16170 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16171
16172 list_add_tail(&hrq->list, &cq->child_list);
16173 list_add_tail(&drq->list, &cq->child_list);
16174 }
16175
16176 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16177 /* The IOCTL status is embedded in the mailbox subheader. */
16178 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16179 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16180 if (shdr_status || shdr_add_status || rc) {
16181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16182 "3120 RQ_CREATE mailbox failed with "
16183 "status x%x add_status x%x, mbx status x%x\n",
16184 shdr_status, shdr_add_status, rc);
16185 status = -ENXIO;
16186 goto out;
16187 }
16188 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16189 if (rc == 0xFFFF) {
16190 status = -ENXIO;
16191 goto out;
16192 }
16193
16194 /* Initialize all RQs with associated queue id */
16195 for (idx = 0; idx < numrq; idx++) {
16196 hrq = hrqp[idx];
16197 hrq->queue_id = rc + (2 * idx);
16198 drq = drqp[idx];
16199 drq->queue_id = rc + (2 * idx) + 1;
16200 }
16201
16202out:
16203 lpfc_sli4_mbox_cmd_free(phba, mbox);
16204 return status;
16205}
16206
4f774513
JS
16207/**
16208 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16209 * @eq: The queue structure associated with the queue to destroy.
16210 *
16211 * This function destroys a queue, as detailed in @eq by sending an mailbox
16212 * command, specific to the type of queue, to the HBA.
16213 *
16214 * The @eq struct is used to get the queue ID of the queue to destroy.
16215 *
16216 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16217 * command fails this function will return -ENXIO.
4f774513 16218 **/
a2fc4aef 16219int
4f774513
JS
16220lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16221{
16222 LPFC_MBOXQ_t *mbox;
16223 int rc, length, status = 0;
16224 uint32_t shdr_status, shdr_add_status;
16225 union lpfc_sli4_cfg_shdr *shdr;
16226
2e90f4b5 16227 /* sanity check on queue memory */
4f774513
JS
16228 if (!eq)
16229 return -ENODEV;
32517fc0 16230
4f774513
JS
16231 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16232 if (!mbox)
16233 return -ENOMEM;
16234 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16235 sizeof(struct lpfc_sli4_cfg_mhdr));
16236 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16237 LPFC_MBOX_OPCODE_EQ_DESTROY,
16238 length, LPFC_SLI4_MBX_EMBED);
16239 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16240 eq->queue_id);
16241 mbox->vport = eq->phba->pport;
16242 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16243
16244 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16245 /* The IOCTL status is embedded in the mailbox subheader. */
16246 shdr = (union lpfc_sli4_cfg_shdr *)
16247 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16248 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16249 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16250 if (shdr_status || shdr_add_status || rc) {
16251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16252 "2505 EQ_DESTROY mailbox failed with "
16253 "status x%x add_status x%x, mbx status x%x\n",
16254 shdr_status, shdr_add_status, rc);
16255 status = -ENXIO;
16256 }
16257
16258 /* Remove eq from any list */
16259 list_del_init(&eq->list);
8fa38513 16260 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
16261 return status;
16262}
16263
16264/**
16265 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16266 * @cq: The queue structure associated with the queue to destroy.
16267 *
16268 * This function destroys a queue, as detailed in @cq by sending an mailbox
16269 * command, specific to the type of queue, to the HBA.
16270 *
16271 * The @cq struct is used to get the queue ID of the queue to destroy.
16272 *
16273 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16274 * command fails this function will return -ENXIO.
4f774513 16275 **/
a2fc4aef 16276int
4f774513
JS
16277lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16278{
16279 LPFC_MBOXQ_t *mbox;
16280 int rc, length, status = 0;
16281 uint32_t shdr_status, shdr_add_status;
16282 union lpfc_sli4_cfg_shdr *shdr;
16283
2e90f4b5 16284 /* sanity check on queue memory */
4f774513
JS
16285 if (!cq)
16286 return -ENODEV;
16287 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16288 if (!mbox)
16289 return -ENOMEM;
16290 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16291 sizeof(struct lpfc_sli4_cfg_mhdr));
16292 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16293 LPFC_MBOX_OPCODE_CQ_DESTROY,
16294 length, LPFC_SLI4_MBX_EMBED);
16295 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16296 cq->queue_id);
16297 mbox->vport = cq->phba->pport;
16298 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16299 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16300 /* The IOCTL status is embedded in the mailbox subheader. */
16301 shdr = (union lpfc_sli4_cfg_shdr *)
16302 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16303 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16304 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16305 if (shdr_status || shdr_add_status || rc) {
16306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16307 "2506 CQ_DESTROY mailbox failed with "
16308 "status x%x add_status x%x, mbx status x%x\n",
16309 shdr_status, shdr_add_status, rc);
16310 status = -ENXIO;
16311 }
16312 /* Remove cq from any list */
16313 list_del_init(&cq->list);
8fa38513 16314 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
16315 return status;
16316}
16317
04c68496
JS
16318/**
16319 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16320 * @qm: The queue structure associated with the queue to destroy.
16321 *
16322 * This function destroys a queue, as detailed in @mq by sending an mailbox
16323 * command, specific to the type of queue, to the HBA.
16324 *
16325 * The @mq struct is used to get the queue ID of the queue to destroy.
16326 *
16327 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16328 * command fails this function will return -ENXIO.
04c68496 16329 **/
a2fc4aef 16330int
04c68496
JS
16331lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16332{
16333 LPFC_MBOXQ_t *mbox;
16334 int rc, length, status = 0;
16335 uint32_t shdr_status, shdr_add_status;
16336 union lpfc_sli4_cfg_shdr *shdr;
16337
2e90f4b5 16338 /* sanity check on queue memory */
04c68496
JS
16339 if (!mq)
16340 return -ENODEV;
16341 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16342 if (!mbox)
16343 return -ENOMEM;
16344 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16345 sizeof(struct lpfc_sli4_cfg_mhdr));
16346 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16347 LPFC_MBOX_OPCODE_MQ_DESTROY,
16348 length, LPFC_SLI4_MBX_EMBED);
16349 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16350 mq->queue_id);
16351 mbox->vport = mq->phba->pport;
16352 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16353 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16354 /* The IOCTL status is embedded in the mailbox subheader. */
16355 shdr = (union lpfc_sli4_cfg_shdr *)
16356 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16359 if (shdr_status || shdr_add_status || rc) {
16360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16361 "2507 MQ_DESTROY mailbox failed with "
16362 "status x%x add_status x%x, mbx status x%x\n",
16363 shdr_status, shdr_add_status, rc);
16364 status = -ENXIO;
16365 }
16366 /* Remove mq from any list */
16367 list_del_init(&mq->list);
8fa38513 16368 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
16369 return status;
16370}
16371
4f774513
JS
16372/**
16373 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16374 * @wq: The queue structure associated with the queue to destroy.
16375 *
16376 * This function destroys a queue, as detailed in @wq by sending an mailbox
16377 * command, specific to the type of queue, to the HBA.
16378 *
16379 * The @wq struct is used to get the queue ID of the queue to destroy.
16380 *
16381 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16382 * command fails this function will return -ENXIO.
4f774513 16383 **/
a2fc4aef 16384int
4f774513
JS
16385lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16386{
16387 LPFC_MBOXQ_t *mbox;
16388 int rc, length, status = 0;
16389 uint32_t shdr_status, shdr_add_status;
16390 union lpfc_sli4_cfg_shdr *shdr;
16391
2e90f4b5 16392 /* sanity check on queue memory */
4f774513
JS
16393 if (!wq)
16394 return -ENODEV;
16395 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16396 if (!mbox)
16397 return -ENOMEM;
16398 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16399 sizeof(struct lpfc_sli4_cfg_mhdr));
16400 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16401 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16402 length, LPFC_SLI4_MBX_EMBED);
16403 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16404 wq->queue_id);
16405 mbox->vport = wq->phba->pport;
16406 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16407 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16408 shdr = (union lpfc_sli4_cfg_shdr *)
16409 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16410 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16411 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16412 if (shdr_status || shdr_add_status || rc) {
16413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16414 "2508 WQ_DESTROY mailbox failed with "
16415 "status x%x add_status x%x, mbx status x%x\n",
16416 shdr_status, shdr_add_status, rc);
16417 status = -ENXIO;
16418 }
16419 /* Remove wq from any list */
16420 list_del_init(&wq->list);
d1f525aa
JS
16421 kfree(wq->pring);
16422 wq->pring = NULL;
8fa38513 16423 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
16424 return status;
16425}
16426
16427/**
16428 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16429 * @rq: The queue structure associated with the queue to destroy.
16430 *
16431 * This function destroys a queue, as detailed in @rq by sending an mailbox
16432 * command, specific to the type of queue, to the HBA.
16433 *
16434 * The @rq struct is used to get the queue ID of the queue to destroy.
16435 *
16436 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16437 * command fails this function will return -ENXIO.
4f774513 16438 **/
a2fc4aef 16439int
4f774513
JS
16440lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16441 struct lpfc_queue *drq)
16442{
16443 LPFC_MBOXQ_t *mbox;
16444 int rc, length, status = 0;
16445 uint32_t shdr_status, shdr_add_status;
16446 union lpfc_sli4_cfg_shdr *shdr;
16447
2e90f4b5 16448 /* sanity check on queue memory */
4f774513
JS
16449 if (!hrq || !drq)
16450 return -ENODEV;
16451 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16452 if (!mbox)
16453 return -ENOMEM;
16454 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 16455 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
16456 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16457 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16458 length, LPFC_SLI4_MBX_EMBED);
16459 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16460 hrq->queue_id);
16461 mbox->vport = hrq->phba->pport;
16462 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16463 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16464 /* The IOCTL status is embedded in the mailbox subheader. */
16465 shdr = (union lpfc_sli4_cfg_shdr *)
16466 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16467 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16468 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16469 if (shdr_status || shdr_add_status || rc) {
16470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16471 "2509 RQ_DESTROY mailbox failed with "
16472 "status x%x add_status x%x, mbx status x%x\n",
16473 shdr_status, shdr_add_status, rc);
16474 if (rc != MBX_TIMEOUT)
16475 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16476 return -ENXIO;
16477 }
16478 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16479 drq->queue_id);
16480 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16481 shdr = (union lpfc_sli4_cfg_shdr *)
16482 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16483 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16484 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16485 if (shdr_status || shdr_add_status || rc) {
16486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16487 "2510 RQ_DESTROY mailbox failed with "
16488 "status x%x add_status x%x, mbx status x%x\n",
16489 shdr_status, shdr_add_status, rc);
16490 status = -ENXIO;
16491 }
16492 list_del_init(&hrq->list);
16493 list_del_init(&drq->list);
8fa38513 16494 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
16495 return status;
16496}
16497
16498/**
16499 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16500 * @phba: The virtual port for which this call being executed.
16501 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16502 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16503 * @xritag: the xritag that ties this io to the SGL pages.
16504 *
16505 * This routine will post the sgl pages for the IO that has the xritag
16506 * that is in the iocbq structure. The xritag is assigned during iocbq
16507 * creation and persists for as long as the driver is loaded.
16508 * if the caller has fewer than 256 scatter gather segments to map then
16509 * pdma_phys_addr1 should be 0.
16510 * If the caller needs to map more than 256 scatter gather segment then
16511 * pdma_phys_addr1 should be a valid physical address.
16512 * physical address for SGLs must be 64 byte aligned.
16513 * If you are going to map 2 SGL's then the first one must have 256 entries
16514 * the second sgl can have between 1 and 256 entries.
16515 *
16516 * Return codes:
16517 * 0 - Success
16518 * -ENXIO, -ENOMEM - Failure
16519 **/
16520int
16521lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16522 dma_addr_t pdma_phys_addr0,
16523 dma_addr_t pdma_phys_addr1,
16524 uint16_t xritag)
16525{
16526 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16527 LPFC_MBOXQ_t *mbox;
16528 int rc;
16529 uint32_t shdr_status, shdr_add_status;
6d368e53 16530 uint32_t mbox_tmo;
4f774513
JS
16531 union lpfc_sli4_cfg_shdr *shdr;
16532
16533 if (xritag == NO_XRI) {
16534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16535 "0364 Invalid param:\n");
16536 return -EINVAL;
16537 }
16538
16539 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16540 if (!mbox)
16541 return -ENOMEM;
16542
16543 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16544 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16545 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 16546 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
16547
16548 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16549 &mbox->u.mqe.un.post_sgl_pages;
16550 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16551 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16552
16553 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16554 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16555 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16556 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16557
16558 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16559 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16560 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16561 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16562 if (!phba->sli4_hba.intr_enable)
16563 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 16564 else {
a183a15f 16565 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
16566 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16567 }
4f774513
JS
16568 /* The IOCTL status is embedded in the mailbox subheader. */
16569 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16570 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16571 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16572 if (rc != MBX_TIMEOUT)
16573 mempool_free(mbox, phba->mbox_mem_pool);
16574 if (shdr_status || shdr_add_status || rc) {
16575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16576 "2511 POST_SGL mailbox failed with "
16577 "status x%x add_status x%x, mbx status x%x\n",
16578 shdr_status, shdr_add_status, rc);
4f774513
JS
16579 }
16580 return 0;
16581}
4f774513 16582
6d368e53 16583/**
88a2cfbb 16584 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
16585 * @phba: pointer to lpfc hba data structure.
16586 *
16587 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
16588 * HBA consistent with the SLI-4 interface spec. This routine
16589 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16590 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 16591 *
88a2cfbb
JS
16592 * Returns
16593 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16594 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16595 **/
5d8b8167 16596static uint16_t
6d368e53
JS
16597lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16598{
16599 unsigned long xri;
16600
16601 /*
16602 * Fetch the next logical xri. Because this index is logical,
16603 * the driver starts at 0 each time.
16604 */
16605 spin_lock_irq(&phba->hbalock);
16606 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16607 phba->sli4_hba.max_cfg_param.max_xri, 0);
16608 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16609 spin_unlock_irq(&phba->hbalock);
16610 return NO_XRI;
16611 } else {
16612 set_bit(xri, phba->sli4_hba.xri_bmask);
16613 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 16614 }
6d368e53
JS
16615 spin_unlock_irq(&phba->hbalock);
16616 return xri;
16617}
16618
16619/**
16620 * lpfc_sli4_free_xri - Release an xri for reuse.
16621 * @phba: pointer to lpfc hba data structure.
16622 *
16623 * This routine is invoked to release an xri to the pool of
16624 * available rpis maintained by the driver.
16625 **/
5d8b8167 16626static void
6d368e53
JS
16627__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16628{
16629 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
16630 phba->sli4_hba.max_cfg_param.xri_used--;
16631 }
16632}
16633
16634/**
16635 * lpfc_sli4_free_xri - Release an xri for reuse.
16636 * @phba: pointer to lpfc hba data structure.
16637 *
16638 * This routine is invoked to release an xri to the pool of
16639 * available rpis maintained by the driver.
16640 **/
16641void
16642lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16643{
16644 spin_lock_irq(&phba->hbalock);
16645 __lpfc_sli4_free_xri(phba, xri);
16646 spin_unlock_irq(&phba->hbalock);
16647}
16648
4f774513
JS
16649/**
16650 * lpfc_sli4_next_xritag - Get an xritag for the io
16651 * @phba: Pointer to HBA context object.
16652 *
16653 * This function gets an xritag for the iocb. If there is no unused xritag
16654 * it will return 0xffff.
16655 * The function returns the allocated xritag if successful, else returns zero.
16656 * Zero is not a valid xritag.
16657 * The caller is not required to hold any lock.
16658 **/
16659uint16_t
16660lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16661{
6d368e53 16662 uint16_t xri_index;
4f774513 16663
6d368e53 16664 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
16665 if (xri_index == NO_XRI)
16666 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16667 "2004 Failed to allocate XRI.last XRITAG is %d"
16668 " Max XRI is %d, Used XRI is %d\n",
16669 xri_index,
16670 phba->sli4_hba.max_cfg_param.max_xri,
16671 phba->sli4_hba.max_cfg_param.xri_used);
16672 return xri_index;
4f774513
JS
16673}
16674
16675/**
895427bd 16676 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 16677 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
16678 * @post_sgl_list: pointer to els sgl entry list.
16679 * @count: number of els sgl entries on the list.
4f774513
JS
16680 *
16681 * This routine is invoked to post a block of driver's sgl pages to the
16682 * HBA using non-embedded mailbox command. No Lock is held. This routine
16683 * is only called when the driver is loading and after all IO has been
16684 * stopped.
16685 **/
8a9d2e80 16686static int
895427bd 16687lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
16688 struct list_head *post_sgl_list,
16689 int post_cnt)
4f774513 16690{
8a9d2e80 16691 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
16692 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16693 struct sgl_page_pairs *sgl_pg_pairs;
16694 void *viraddr;
16695 LPFC_MBOXQ_t *mbox;
16696 uint32_t reqlen, alloclen, pg_pairs;
16697 uint32_t mbox_tmo;
8a9d2e80
JS
16698 uint16_t xritag_start = 0;
16699 int rc = 0;
4f774513
JS
16700 uint32_t shdr_status, shdr_add_status;
16701 union lpfc_sli4_cfg_shdr *shdr;
16702
895427bd 16703 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 16704 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16705 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 16706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
16707 "2559 Block sgl registration required DMA "
16708 "size (%d) great than a page\n", reqlen);
16709 return -ENOMEM;
16710 }
895427bd 16711
4f774513 16712 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 16713 if (!mbox)
4f774513 16714 return -ENOMEM;
4f774513
JS
16715
16716 /* Allocate DMA memory and set up the non-embedded mailbox command */
16717 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16718 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16719 LPFC_SLI4_MBX_NEMBED);
16720
16721 if (alloclen < reqlen) {
16722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16723 "0285 Allocated DMA memory size (%d) is "
16724 "less than the requested DMA memory "
16725 "size (%d)\n", alloclen, reqlen);
16726 lpfc_sli4_mbox_cmd_free(phba, mbox);
16727 return -ENOMEM;
16728 }
4f774513 16729 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 16730 viraddr = mbox->sge_array->addr[0];
4f774513
JS
16731 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16732 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16733
8a9d2e80
JS
16734 pg_pairs = 0;
16735 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
16736 /* Set up the sge entry */
16737 sgl_pg_pairs->sgl_pg0_addr_lo =
16738 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16739 sgl_pg_pairs->sgl_pg0_addr_hi =
16740 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16741 sgl_pg_pairs->sgl_pg1_addr_lo =
16742 cpu_to_le32(putPaddrLow(0));
16743 sgl_pg_pairs->sgl_pg1_addr_hi =
16744 cpu_to_le32(putPaddrHigh(0));
6d368e53 16745
4f774513
JS
16746 /* Keep the first xritag on the list */
16747 if (pg_pairs == 0)
16748 xritag_start = sglq_entry->sli4_xritag;
16749 sgl_pg_pairs++;
8a9d2e80 16750 pg_pairs++;
4f774513 16751 }
6d368e53
JS
16752
16753 /* Complete initialization and perform endian conversion. */
4f774513 16754 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 16755 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 16756 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 16757
4f774513
JS
16758 if (!phba->sli4_hba.intr_enable)
16759 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16760 else {
a183a15f 16761 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16762 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16763 }
16764 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16765 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16766 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16767 if (rc != MBX_TIMEOUT)
16768 lpfc_sli4_mbox_cmd_free(phba, mbox);
16769 if (shdr_status || shdr_add_status || rc) {
16770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16771 "2513 POST_SGL_BLOCK mailbox command failed "
16772 "status x%x add_status x%x mbx status x%x\n",
16773 shdr_status, shdr_add_status, rc);
16774 rc = -ENXIO;
16775 }
16776 return rc;
16777}
16778
16779/**
5e5b511d 16780 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
4f774513 16781 * @phba: pointer to lpfc hba data structure.
0794d601 16782 * @nblist: pointer to nvme buffer list.
4f774513
JS
16783 * @count: number of scsi buffers on the list.
16784 *
16785 * This routine is invoked to post a block of @count scsi sgl pages from a
0794d601 16786 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
4f774513
JS
16787 * No Lock is held.
16788 *
16789 **/
0794d601 16790static int
5e5b511d
JS
16791lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16792 int count)
4f774513 16793{
c490850a 16794 struct lpfc_io_buf *lpfc_ncmd;
4f774513
JS
16795 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16796 struct sgl_page_pairs *sgl_pg_pairs;
16797 void *viraddr;
16798 LPFC_MBOXQ_t *mbox;
16799 uint32_t reqlen, alloclen, pg_pairs;
16800 uint32_t mbox_tmo;
16801 uint16_t xritag_start = 0;
16802 int rc = 0;
16803 uint32_t shdr_status, shdr_add_status;
16804 dma_addr_t pdma_phys_bpl1;
16805 union lpfc_sli4_cfg_shdr *shdr;
16806
16807 /* Calculate the requested length of the dma memory */
8a9d2e80 16808 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 16809 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16810 if (reqlen > SLI4_PAGE_SIZE) {
4f774513 16811 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0794d601 16812 "6118 Block sgl registration required DMA "
4f774513
JS
16813 "size (%d) great than a page\n", reqlen);
16814 return -ENOMEM;
16815 }
16816 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16817 if (!mbox) {
16818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16819 "6119 Failed to allocate mbox cmd memory\n");
4f774513
JS
16820 return -ENOMEM;
16821 }
16822
16823 /* Allocate DMA memory and set up the non-embedded mailbox command */
16824 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
0794d601
JS
16825 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16826 reqlen, LPFC_SLI4_MBX_NEMBED);
4f774513
JS
16827
16828 if (alloclen < reqlen) {
16829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16830 "6120 Allocated DMA memory size (%d) is "
4f774513
JS
16831 "less than the requested DMA memory "
16832 "size (%d)\n", alloclen, reqlen);
16833 lpfc_sli4_mbox_cmd_free(phba, mbox);
16834 return -ENOMEM;
16835 }
6d368e53 16836
4f774513 16837 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
16838 viraddr = mbox->sge_array->addr[0];
16839
16840 /* Set up the SGL pages in the non-embedded DMA pages */
16841 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16842 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16843
16844 pg_pairs = 0;
0794d601 16845 list_for_each_entry(lpfc_ncmd, nblist, list) {
4f774513
JS
16846 /* Set up the sge entry */
16847 sgl_pg_pairs->sgl_pg0_addr_lo =
0794d601 16848 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
4f774513 16849 sgl_pg_pairs->sgl_pg0_addr_hi =
0794d601 16850 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
4f774513 16851 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
0794d601
JS
16852 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16853 SGL_PAGE_SIZE;
4f774513
JS
16854 else
16855 pdma_phys_bpl1 = 0;
16856 sgl_pg_pairs->sgl_pg1_addr_lo =
16857 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16858 sgl_pg_pairs->sgl_pg1_addr_hi =
16859 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16860 /* Keep the first xritag on the list */
16861 if (pg_pairs == 0)
0794d601 16862 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
4f774513
JS
16863 sgl_pg_pairs++;
16864 pg_pairs++;
16865 }
16866 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16867 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16868 /* Perform endian conversion if necessary */
16869 sgl->word0 = cpu_to_le32(sgl->word0);
16870
0794d601 16871 if (!phba->sli4_hba.intr_enable) {
4f774513 16872 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
0794d601 16873 } else {
a183a15f 16874 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16875 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16876 }
0794d601 16877 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
4f774513
JS
16878 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16879 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16880 if (rc != MBX_TIMEOUT)
16881 lpfc_sli4_mbox_cmd_free(phba, mbox);
16882 if (shdr_status || shdr_add_status || rc) {
16883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
0794d601 16884 "6125 POST_SGL_BLOCK mailbox command failed "
4f774513
JS
16885 "status x%x add_status x%x mbx status x%x\n",
16886 shdr_status, shdr_add_status, rc);
16887 rc = -ENXIO;
16888 }
16889 return rc;
16890}
16891
0794d601 16892/**
5e5b511d 16893 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
0794d601
JS
16894 * @phba: pointer to lpfc hba data structure.
16895 * @post_nblist: pointer to the nvme buffer list.
16896 *
16897 * This routine walks a list of nvme buffers that was passed in. It attempts
16898 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16899 * uses the non-embedded SGL block post mailbox commands to post to the port.
16900 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16901 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16902 * must be local list, thus no lock is needed when manipulate the list.
16903 *
16904 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16905 **/
16906int
5e5b511d
JS
16907lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16908 struct list_head *post_nblist, int sb_count)
0794d601 16909{
c490850a 16910 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
0794d601
JS
16911 int status, sgl_size;
16912 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16913 dma_addr_t pdma_phys_sgl1;
16914 int last_xritag = NO_XRI;
16915 int cur_xritag;
0794d601
JS
16916 LIST_HEAD(prep_nblist);
16917 LIST_HEAD(blck_nblist);
16918 LIST_HEAD(nvme_nblist);
16919
16920 /* sanity check */
16921 if (sb_count <= 0)
16922 return -EINVAL;
16923
16924 sgl_size = phba->cfg_sg_dma_buf_size;
16925 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16926 list_del_init(&lpfc_ncmd->list);
16927 block_cnt++;
16928 if ((last_xritag != NO_XRI) &&
16929 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16930 /* a hole in xri block, form a sgl posting block */
16931 list_splice_init(&prep_nblist, &blck_nblist);
16932 post_cnt = block_cnt - 1;
16933 /* prepare list for next posting block */
16934 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16935 block_cnt = 1;
16936 } else {
16937 /* prepare list for next posting block */
16938 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16939 /* enough sgls for non-embed sgl mbox command */
16940 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16941 list_splice_init(&prep_nblist, &blck_nblist);
16942 post_cnt = block_cnt;
16943 block_cnt = 0;
16944 }
16945 }
16946 num_posting++;
16947 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16948
16949 /* end of repost sgl list condition for NVME buffers */
16950 if (num_posting == sb_count) {
16951 if (post_cnt == 0) {
16952 /* last sgl posting block */
16953 list_splice_init(&prep_nblist, &blck_nblist);
16954 post_cnt = block_cnt;
16955 } else if (block_cnt == 1) {
16956 /* last single sgl with non-contiguous xri */
16957 if (sgl_size > SGL_PAGE_SIZE)
16958 pdma_phys_sgl1 =
16959 lpfc_ncmd->dma_phys_sgl +
16960 SGL_PAGE_SIZE;
16961 else
16962 pdma_phys_sgl1 = 0;
16963 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16964 status = lpfc_sli4_post_sgl(
16965 phba, lpfc_ncmd->dma_phys_sgl,
16966 pdma_phys_sgl1, cur_xritag);
16967 if (status) {
c490850a
JS
16968 /* Post error. Buffer unavailable. */
16969 lpfc_ncmd->flags |=
16970 LPFC_SBUF_NOT_POSTED;
0794d601 16971 } else {
c490850a
JS
16972 /* Post success. Bffer available. */
16973 lpfc_ncmd->flags &=
16974 ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
16975 lpfc_ncmd->status = IOSTAT_SUCCESS;
16976 num_posted++;
16977 }
16978 /* success, put on NVME buffer sgl list */
16979 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16980 }
16981 }
16982
16983 /* continue until a nembed page worth of sgls */
16984 if (post_cnt == 0)
16985 continue;
16986
16987 /* post block of NVME buffer list sgls */
5e5b511d
JS
16988 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16989 post_cnt);
0794d601
JS
16990
16991 /* don't reset xirtag due to hole in xri block */
16992 if (block_cnt == 0)
16993 last_xritag = NO_XRI;
4f774513 16994
0794d601
JS
16995 /* reset NVME buffer post count for next round of posting */
16996 post_cnt = 0;
4f774513 16997
0794d601
JS
16998 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16999 while (!list_empty(&blck_nblist)) {
17000 list_remove_head(&blck_nblist, lpfc_ncmd,
c490850a 17001 struct lpfc_io_buf, list);
0794d601 17002 if (status) {
c490850a
JS
17003 /* Post error. Mark buffer unavailable. */
17004 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
0794d601 17005 } else {
c490850a
JS
17006 /* Post success, Mark buffer available. */
17007 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
17008 lpfc_ncmd->status = IOSTAT_SUCCESS;
17009 num_posted++;
17010 }
17011 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17012 }
4f774513 17013 }
0794d601 17014 /* Push NVME buffers with sgl posted to the available list */
5e5b511d
JS
17015 lpfc_io_buf_replenish(phba, &nvme_nblist);
17016
0794d601 17017 return num_posted;
4f774513
JS
17018}
17019
17020/**
17021 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17022 * @phba: pointer to lpfc_hba struct that the frame was received on
17023 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17024 *
17025 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17026 * valid type of frame that the LPFC driver will handle. This function will
17027 * return a zero if the frame is a valid frame or a non zero value when the
17028 * frame does not pass the check.
17029 **/
17030static int
17031lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17032{
474ffb74 17033 /* make rctl_names static to save stack space */
4f774513 17034 struct fc_vft_header *fc_vft_hdr;
546fc854 17035 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
17036
17037 switch (fc_hdr->fh_r_ctl) {
17038 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17039 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17040 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17041 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17042 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17043 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17044 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17045 case FC_RCTL_DD_CMD_STATUS: /* command status */
17046 case FC_RCTL_ELS_REQ: /* extended link services request */
17047 case FC_RCTL_ELS_REP: /* extended link services reply */
17048 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17049 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17050 case FC_RCTL_BA_NOP: /* basic link service NOP */
17051 case FC_RCTL_BA_ABTS: /* basic link service abort */
17052 case FC_RCTL_BA_RMC: /* remove connection */
17053 case FC_RCTL_BA_ACC: /* basic accept */
17054 case FC_RCTL_BA_RJT: /* basic reject */
17055 case FC_RCTL_BA_PRMT:
17056 case FC_RCTL_ACK_1: /* acknowledge_1 */
17057 case FC_RCTL_ACK_0: /* acknowledge_0 */
17058 case FC_RCTL_P_RJT: /* port reject */
17059 case FC_RCTL_F_RJT: /* fabric reject */
17060 case FC_RCTL_P_BSY: /* port busy */
17061 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17062 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17063 case FC_RCTL_LCR: /* link credit reset */
ae9e28f3 17064 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
4f774513
JS
17065 case FC_RCTL_END: /* end */
17066 break;
17067 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17068 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17069 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17070 return lpfc_fc_frame_check(phba, fc_hdr);
17071 default:
17072 goto drop;
17073 }
ae9e28f3 17074
4f774513
JS
17075 switch (fc_hdr->fh_type) {
17076 case FC_TYPE_BLS:
17077 case FC_TYPE_ELS:
17078 case FC_TYPE_FCP:
17079 case FC_TYPE_CT:
895427bd 17080 case FC_TYPE_NVME:
4f774513
JS
17081 break;
17082 case FC_TYPE_IP:
17083 case FC_TYPE_ILS:
17084 default:
17085 goto drop;
17086 }
546fc854 17087
4f774513 17088 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
78e1d200 17089 "2538 Received frame rctl:x%x, type:x%x, "
88f43a08 17090 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
78e1d200
JS
17091 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17092 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17093 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17094 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17095 be32_to_cpu(header[6]));
4f774513
JS
17096 return 0;
17097drop:
17098 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
78e1d200
JS
17099 "2539 Dropped frame rctl:x%x type:x%x\n",
17100 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
4f774513
JS
17101 return 1;
17102}
17103
17104/**
17105 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17106 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17107 *
17108 * This function processes the FC header to retrieve the VFI from the VF
17109 * header, if one exists. This function will return the VFI if one exists
17110 * or 0 if no VSAN Header exists.
17111 **/
17112static uint32_t
17113lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17114{
17115 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17116
17117 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17118 return 0;
17119 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17120}
17121
17122/**
17123 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17124 * @phba: Pointer to the HBA structure to search for the vport on
17125 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17126 * @fcfi: The FC Fabric ID that the frame came from
17127 *
17128 * This function searches the @phba for a vport that matches the content of the
17129 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17130 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17131 * returns the matching vport pointer or NULL if unable to match frame to a
17132 * vport.
17133 **/
17134static struct lpfc_vport *
17135lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 17136 uint16_t fcfi, uint32_t did)
4f774513
JS
17137{
17138 struct lpfc_vport **vports;
17139 struct lpfc_vport *vport = NULL;
17140 int i;
939723a4 17141
bf08611b
JS
17142 if (did == Fabric_DID)
17143 return phba->pport;
939723a4
JS
17144 if ((phba->pport->fc_flag & FC_PT2PT) &&
17145 !(phba->link_state == LPFC_HBA_READY))
17146 return phba->pport;
17147
4f774513 17148 vports = lpfc_create_vport_work_array(phba);
895427bd 17149 if (vports != NULL) {
4f774513
JS
17150 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17151 if (phba->fcf.fcfi == fcfi &&
17152 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17153 vports[i]->fc_myDID == did) {
17154 vport = vports[i];
17155 break;
17156 }
17157 }
895427bd 17158 }
4f774513
JS
17159 lpfc_destroy_vport_work_array(phba, vports);
17160 return vport;
17161}
17162
45ed1190
JS
17163/**
17164 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17165 * @vport: The vport to work on.
17166 *
17167 * This function updates the receive sequence time stamp for this vport. The
17168 * receive sequence time stamp indicates the time that the last frame of the
17169 * the sequence that has been idle for the longest amount of time was received.
17170 * the driver uses this time stamp to indicate if any received sequences have
17171 * timed out.
17172 **/
5d8b8167 17173static void
45ed1190
JS
17174lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17175{
17176 struct lpfc_dmabuf *h_buf;
17177 struct hbq_dmabuf *dmabuf = NULL;
17178
17179 /* get the oldest sequence on the rcv list */
17180 h_buf = list_get_first(&vport->rcv_buffer_list,
17181 struct lpfc_dmabuf, list);
17182 if (!h_buf)
17183 return;
17184 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17185 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17186}
17187
17188/**
17189 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17190 * @vport: The vport that the received sequences were sent to.
17191 *
17192 * This function cleans up all outstanding received sequences. This is called
17193 * by the driver when a link event or user action invalidates all the received
17194 * sequences.
17195 **/
17196void
17197lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17198{
17199 struct lpfc_dmabuf *h_buf, *hnext;
17200 struct lpfc_dmabuf *d_buf, *dnext;
17201 struct hbq_dmabuf *dmabuf = NULL;
17202
17203 /* start with the oldest sequence on the rcv list */
17204 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17205 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17206 list_del_init(&dmabuf->hbuf.list);
17207 list_for_each_entry_safe(d_buf, dnext,
17208 &dmabuf->dbuf.list, list) {
17209 list_del_init(&d_buf->list);
17210 lpfc_in_buf_free(vport->phba, d_buf);
17211 }
17212 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17213 }
17214}
17215
17216/**
17217 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17218 * @vport: The vport that the received sequences were sent to.
17219 *
17220 * This function determines whether any received sequences have timed out by
17221 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17222 * indicates that there is at least one timed out sequence this routine will
17223 * go through the received sequences one at a time from most inactive to most
17224 * active to determine which ones need to be cleaned up. Once it has determined
17225 * that a sequence needs to be cleaned up it will simply free up the resources
17226 * without sending an abort.
17227 **/
17228void
17229lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17230{
17231 struct lpfc_dmabuf *h_buf, *hnext;
17232 struct lpfc_dmabuf *d_buf, *dnext;
17233 struct hbq_dmabuf *dmabuf = NULL;
17234 unsigned long timeout;
17235 int abort_count = 0;
17236
17237 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17238 vport->rcv_buffer_time_stamp);
17239 if (list_empty(&vport->rcv_buffer_list) ||
17240 time_before(jiffies, timeout))
17241 return;
17242 /* start with the oldest sequence on the rcv list */
17243 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17244 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17245 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17246 dmabuf->time_stamp);
17247 if (time_before(jiffies, timeout))
17248 break;
17249 abort_count++;
17250 list_del_init(&dmabuf->hbuf.list);
17251 list_for_each_entry_safe(d_buf, dnext,
17252 &dmabuf->dbuf.list, list) {
17253 list_del_init(&d_buf->list);
17254 lpfc_in_buf_free(vport->phba, d_buf);
17255 }
17256 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17257 }
17258 if (abort_count)
17259 lpfc_update_rcv_time_stamp(vport);
17260}
17261
4f774513
JS
17262/**
17263 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17264 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17265 *
17266 * This function searches through the existing incomplete sequences that have
17267 * been sent to this @vport. If the frame matches one of the incomplete
17268 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17269 * make up that sequence. If no sequence is found that matches this frame then
17270 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17271 * This function returns a pointer to the first dmabuf in the sequence list that
17272 * the frame was linked to.
17273 **/
17274static struct hbq_dmabuf *
17275lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17276{
17277 struct fc_frame_header *new_hdr;
17278 struct fc_frame_header *temp_hdr;
17279 struct lpfc_dmabuf *d_buf;
17280 struct lpfc_dmabuf *h_buf;
17281 struct hbq_dmabuf *seq_dmabuf = NULL;
17282 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 17283 uint8_t found = 0;
4f774513 17284
4d9ab994 17285 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 17286 dmabuf->time_stamp = jiffies;
4f774513 17287 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 17288
4f774513
JS
17289 /* Use the hdr_buf to find the sequence that this frame belongs to */
17290 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17291 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17292 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17293 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17294 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17295 continue;
17296 /* found a pending sequence that matches this frame */
17297 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17298 break;
17299 }
17300 if (!seq_dmabuf) {
17301 /*
17302 * This indicates first frame received for this sequence.
17303 * Queue the buffer on the vport's rcv_buffer_list.
17304 */
17305 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 17306 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17307 return dmabuf;
17308 }
17309 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
17310 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17311 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
17312 list_del_init(&seq_dmabuf->hbuf.list);
17313 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17314 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 17315 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17316 return dmabuf;
17317 }
45ed1190
JS
17318 /* move this sequence to the tail to indicate a young sequence */
17319 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17320 seq_dmabuf->time_stamp = jiffies;
17321 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
17322 if (list_empty(&seq_dmabuf->dbuf.list)) {
17323 temp_hdr = dmabuf->hbuf.virt;
17324 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17325 return seq_dmabuf;
17326 }
4f774513 17327 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
17328 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17329 while (!found) {
4f774513
JS
17330 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17331 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17332 /*
17333 * If the frame's sequence count is greater than the frame on
17334 * the list then insert the frame right after this frame
17335 */
eeead811
JS
17336 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17337 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 17338 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
17339 found = 1;
17340 break;
4f774513 17341 }
4360ca9c
JS
17342
17343 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17344 break;
17345 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 17346 }
4360ca9c
JS
17347
17348 if (found)
17349 return seq_dmabuf;
4f774513
JS
17350 return NULL;
17351}
17352
6669f9bb
JS
17353/**
17354 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17355 * @vport: pointer to a vitural port
17356 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17357 *
17358 * This function tries to abort from the partially assembed sequence, described
17359 * by the information from basic abbort @dmabuf. It checks to see whether such
17360 * partially assembled sequence held by the driver. If so, it shall free up all
17361 * the frames from the partially assembled sequence.
17362 *
17363 * Return
17364 * true -- if there is matching partially assembled sequence present and all
17365 * the frames freed with the sequence;
17366 * false -- if there is no matching partially assembled sequence present so
17367 * nothing got aborted in the lower layer driver
17368 **/
17369static bool
17370lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17371 struct hbq_dmabuf *dmabuf)
17372{
17373 struct fc_frame_header *new_hdr;
17374 struct fc_frame_header *temp_hdr;
17375 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17376 struct hbq_dmabuf *seq_dmabuf = NULL;
17377
17378 /* Use the hdr_buf to find the sequence that matches this frame */
17379 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17380 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17381 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17382 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17383 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17384 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17385 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17386 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17387 continue;
17388 /* found a pending sequence that matches this frame */
17389 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17390 break;
17391 }
17392
17393 /* Free up all the frames from the partially assembled sequence */
17394 if (seq_dmabuf) {
17395 list_for_each_entry_safe(d_buf, n_buf,
17396 &seq_dmabuf->dbuf.list, list) {
17397 list_del_init(&d_buf->list);
17398 lpfc_in_buf_free(vport->phba, d_buf);
17399 }
17400 return true;
17401 }
17402 return false;
17403}
17404
6dd9e31c
JS
17405/**
17406 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17407 * @vport: pointer to a vitural port
17408 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17409 *
17410 * This function tries to abort from the assembed sequence from upper level
17411 * protocol, described by the information from basic abbort @dmabuf. It
17412 * checks to see whether such pending context exists at upper level protocol.
17413 * If so, it shall clean up the pending context.
17414 *
17415 * Return
17416 * true -- if there is matching pending context of the sequence cleaned
17417 * at ulp;
17418 * false -- if there is no matching pending context of the sequence present
17419 * at ulp.
17420 **/
17421static bool
17422lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17423{
17424 struct lpfc_hba *phba = vport->phba;
17425 int handled;
17426
17427 /* Accepting abort at ulp with SLI4 only */
17428 if (phba->sli_rev < LPFC_SLI_REV4)
17429 return false;
17430
17431 /* Register all caring upper level protocols to attend abort */
17432 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17433 if (handled)
17434 return true;
17435
17436 return false;
17437}
17438
6669f9bb 17439/**
546fc854 17440 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
17441 * @phba: Pointer to HBA context object.
17442 * @cmd_iocbq: pointer to the command iocbq structure.
17443 * @rsp_iocbq: pointer to the response iocbq structure.
17444 *
546fc854 17445 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
17446 * event. It properly releases the memory allocated to the sequence abort
17447 * accept iocb.
17448 **/
17449static void
546fc854 17450lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
17451 struct lpfc_iocbq *cmd_iocbq,
17452 struct lpfc_iocbq *rsp_iocbq)
17453{
6dd9e31c
JS
17454 struct lpfc_nodelist *ndlp;
17455
17456 if (cmd_iocbq) {
17457 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17458 lpfc_nlp_put(ndlp);
17459 lpfc_nlp_not_used(ndlp);
6669f9bb 17460 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 17461 }
6b5151fd
JS
17462
17463 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17464 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17466 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17467 rsp_iocbq->iocb.ulpStatus,
17468 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
17469}
17470
6d368e53
JS
17471/**
17472 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17473 * @phba: Pointer to HBA context object.
17474 * @xri: xri id in transaction.
17475 *
17476 * This function validates the xri maps to the known range of XRIs allocated an
17477 * used by the driver.
17478 **/
7851fe2c 17479uint16_t
6d368e53
JS
17480lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17481 uint16_t xri)
17482{
a2fc4aef 17483 uint16_t i;
6d368e53
JS
17484
17485 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17486 if (xri == phba->sli4_hba.xri_ids[i])
17487 return i;
17488 }
17489 return NO_XRI;
17490}
17491
6669f9bb 17492/**
546fc854 17493 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
17494 * @phba: Pointer to HBA context object.
17495 * @fc_hdr: pointer to a FC frame header.
17496 *
546fc854 17497 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
17498 * event after aborting the sequence handling.
17499 **/
86c67379 17500void
6dd9e31c
JS
17501lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17502 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 17503{
6dd9e31c 17504 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
17505 struct lpfc_iocbq *ctiocb = NULL;
17506 struct lpfc_nodelist *ndlp;
ee0f4fe1 17507 uint16_t oxid, rxid, xri, lxri;
5ffc266e 17508 uint32_t sid, fctl;
6669f9bb 17509 IOCB_t *icmd;
546fc854 17510 int rc;
6669f9bb
JS
17511
17512 if (!lpfc_is_link_up(phba))
17513 return;
17514
17515 sid = sli4_sid_from_fc_hdr(fc_hdr);
17516 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 17517 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 17518
6dd9e31c 17519 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 17520 if (!ndlp) {
9d3d340d 17521 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
17522 if (!ndlp) {
17523 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17524 "1268 Failed to allocate ndlp for "
17525 "oxid:x%x SID:x%x\n", oxid, sid);
17526 return;
17527 }
6dd9e31c
JS
17528 /* Put ndlp onto pport node list */
17529 lpfc_enqueue_node(vport, ndlp);
17530 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17531 /* re-setup ndlp without removing from node list */
17532 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17533 if (!ndlp) {
17534 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17535 "3275 Failed to active ndlp found "
17536 "for oxid:x%x SID:x%x\n", oxid, sid);
17537 return;
17538 }
6669f9bb
JS
17539 }
17540
546fc854 17541 /* Allocate buffer for rsp iocb */
6669f9bb
JS
17542 ctiocb = lpfc_sli_get_iocbq(phba);
17543 if (!ctiocb)
17544 return;
17545
5ffc266e
JS
17546 /* Extract the F_CTL field from FC_HDR */
17547 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17548
6669f9bb 17549 icmd = &ctiocb->iocb;
6669f9bb 17550 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 17551 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
17552 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17553 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17554 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17555
17556 /* Fill in the rest of iocb fields */
17557 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17558 icmd->ulpBdeCount = 0;
17559 icmd->ulpLe = 1;
17560 icmd->ulpClass = CLASS3;
6d368e53 17561 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 17562 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 17563
6669f9bb
JS
17564 ctiocb->iocb_cmpl = NULL;
17565 ctiocb->vport = phba->pport;
546fc854 17566 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 17567 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
17568 ctiocb->sli4_xritag = NO_XRI;
17569
ee0f4fe1
JS
17570 if (fctl & FC_FC_EX_CTX)
17571 /* Exchange responder sent the abort so we
17572 * own the oxid.
17573 */
17574 xri = oxid;
17575 else
17576 xri = rxid;
17577 lxri = lpfc_sli4_xri_inrange(phba, xri);
17578 if (lxri != NO_XRI)
17579 lpfc_set_rrq_active(phba, ndlp, lxri,
17580 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
17581 /* For BA_ABTS from exchange responder, if the logical xri with
17582 * the oxid maps to the FCP XRI range, the port no longer has
17583 * that exchange context, send a BLS_RJT. Override the IOCB for
17584 * a BA_RJT.
17585 */
17586 if ((fctl & FC_FC_EX_CTX) &&
895427bd 17587 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
17588 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17589 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17590 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17591 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17592 }
17593
17594 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17595 * the driver no longer has that exchange, send a BLS_RJT. Override
17596 * the IOCB for a BA_RJT.
546fc854 17597 */
6dd9e31c 17598 if (aborted == false) {
546fc854
JS
17599 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17600 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17601 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17602 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17603 }
6669f9bb 17604
5ffc266e
JS
17605 if (fctl & FC_FC_EX_CTX) {
17606 /* ABTS sent by responder to CT exchange, construction
17607 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17608 * field and RX_ID from ABTS for RX_ID field.
17609 */
546fc854 17610 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
17611 } else {
17612 /* ABTS sent by initiator to CT exchange, construction
17613 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 17614 * XRI_TAG field.
5ffc266e 17615 */
546fc854 17616 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 17617 }
f09c3acc 17618 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 17619 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 17620
546fc854 17621 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
17622 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17623 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17624 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
17625
17626 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17627 if (rc == IOCB_ERROR) {
6dd9e31c
JS
17628 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17629 "2925 Failed to issue CT ABTS RSP x%x on "
17630 "xri x%x, Data x%x\n",
17631 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17632 phba->link_state);
17633 lpfc_nlp_put(ndlp);
17634 ctiocb->context1 = NULL;
546fc854
JS
17635 lpfc_sli_release_iocbq(phba, ctiocb);
17636 }
6669f9bb
JS
17637}
17638
17639/**
17640 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17641 * @vport: Pointer to the vport on which this sequence was received
17642 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17643 *
17644 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17645 * receive sequence is only partially assembed by the driver, it shall abort
17646 * the partially assembled frames for the sequence. Otherwise, if the
17647 * unsolicited receive sequence has been completely assembled and passed to
17648 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17649 * unsolicited sequence has been aborted. After that, it will issue a basic
17650 * accept to accept the abort.
17651 **/
5d8b8167 17652static void
6669f9bb
JS
17653lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17654 struct hbq_dmabuf *dmabuf)
17655{
17656 struct lpfc_hba *phba = vport->phba;
17657 struct fc_frame_header fc_hdr;
5ffc266e 17658 uint32_t fctl;
6dd9e31c 17659 bool aborted;
6669f9bb 17660
6669f9bb
JS
17661 /* Make a copy of fc_hdr before the dmabuf being released */
17662 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 17663 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 17664
5ffc266e 17665 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
17666 /* ABTS by responder to exchange, no cleanup needed */
17667 aborted = true;
5ffc266e 17668 } else {
6dd9e31c
JS
17669 /* ABTS by initiator to exchange, need to do cleanup */
17670 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17671 if (aborted == false)
17672 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 17673 }
6dd9e31c
JS
17674 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17675
86c67379
JS
17676 if (phba->nvmet_support) {
17677 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17678 return;
17679 }
17680
6dd9e31c
JS
17681 /* Respond with BA_ACC or BA_RJT accordingly */
17682 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
17683}
17684
4f774513
JS
17685/**
17686 * lpfc_seq_complete - Indicates if a sequence is complete
17687 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17688 *
17689 * This function checks the sequence, starting with the frame described by
17690 * @dmabuf, to see if all the frames associated with this sequence are present.
17691 * the frames associated with this sequence are linked to the @dmabuf using the
17692 * dbuf list. This function looks for two major things. 1) That the first frame
17693 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17694 * set. 3) That there are no holes in the sequence count. The function will
17695 * return 1 when the sequence is complete, otherwise it will return 0.
17696 **/
17697static int
17698lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17699{
17700 struct fc_frame_header *hdr;
17701 struct lpfc_dmabuf *d_buf;
17702 struct hbq_dmabuf *seq_dmabuf;
17703 uint32_t fctl;
17704 int seq_count = 0;
17705
17706 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17707 /* make sure first fame of sequence has a sequence count of zero */
17708 if (hdr->fh_seq_cnt != seq_count)
17709 return 0;
17710 fctl = (hdr->fh_f_ctl[0] << 16 |
17711 hdr->fh_f_ctl[1] << 8 |
17712 hdr->fh_f_ctl[2]);
17713 /* If last frame of sequence we can return success. */
17714 if (fctl & FC_FC_END_SEQ)
17715 return 1;
17716 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17717 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17718 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17719 /* If there is a hole in the sequence count then fail. */
eeead811 17720 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
17721 return 0;
17722 fctl = (hdr->fh_f_ctl[0] << 16 |
17723 hdr->fh_f_ctl[1] << 8 |
17724 hdr->fh_f_ctl[2]);
17725 /* If last frame of sequence we can return success. */
17726 if (fctl & FC_FC_END_SEQ)
17727 return 1;
17728 }
17729 return 0;
17730}
17731
17732/**
17733 * lpfc_prep_seq - Prep sequence for ULP processing
17734 * @vport: Pointer to the vport on which this sequence was received
17735 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17736 *
17737 * This function takes a sequence, described by a list of frames, and creates
17738 * a list of iocbq structures to describe the sequence. This iocbq list will be
17739 * used to issue to the generic unsolicited sequence handler. This routine
17740 * returns a pointer to the first iocbq in the list. If the function is unable
17741 * to allocate an iocbq then it throw out the received frames that were not
17742 * able to be described and return a pointer to the first iocbq. If unable to
17743 * allocate any iocbqs (including the first) this function will return NULL.
17744 **/
17745static struct lpfc_iocbq *
17746lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17747{
7851fe2c 17748 struct hbq_dmabuf *hbq_buf;
4f774513
JS
17749 struct lpfc_dmabuf *d_buf, *n_buf;
17750 struct lpfc_iocbq *first_iocbq, *iocbq;
17751 struct fc_frame_header *fc_hdr;
17752 uint32_t sid;
7851fe2c 17753 uint32_t len, tot_len;
eeead811 17754 struct ulp_bde64 *pbde;
4f774513
JS
17755
17756 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17757 /* remove from receive buffer list */
17758 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 17759 lpfc_update_rcv_time_stamp(vport);
4f774513 17760 /* get the Remote Port's SID */
6669f9bb 17761 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 17762 tot_len = 0;
4f774513
JS
17763 /* Get an iocbq struct to fill in. */
17764 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17765 if (first_iocbq) {
17766 /* Initialize the first IOCB. */
8fa38513 17767 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 17768 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 17769 first_iocbq->vport = vport;
939723a4
JS
17770
17771 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17772 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17773 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17774 first_iocbq->iocb.un.rcvels.parmRo =
17775 sli4_did_from_fc_hdr(fc_hdr);
17776 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17777 } else
17778 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
17779 first_iocbq->iocb.ulpContext = NO_XRI;
17780 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17781 be16_to_cpu(fc_hdr->fh_ox_id);
17782 /* iocbq is prepped for internal consumption. Physical vpi. */
17783 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17784 vport->phba->vpi_ids[vport->vpi];
4f774513 17785 /* put the first buffer into the first IOCBq */
48a5a664
JS
17786 tot_len = bf_get(lpfc_rcqe_length,
17787 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17788
4f774513
JS
17789 first_iocbq->context2 = &seq_dmabuf->dbuf;
17790 first_iocbq->context3 = NULL;
17791 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17792 if (tot_len > LPFC_DATA_BUF_SIZE)
17793 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17794 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17795 else
17796 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17797
4f774513 17798 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 17799
7851fe2c 17800 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
17801 }
17802 iocbq = first_iocbq;
17803 /*
17804 * Each IOCBq can have two Buffers assigned, so go through the list
17805 * of buffers for this sequence and save two buffers in each IOCBq
17806 */
17807 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17808 if (!iocbq) {
17809 lpfc_in_buf_free(vport->phba, d_buf);
17810 continue;
17811 }
17812 if (!iocbq->context3) {
17813 iocbq->context3 = d_buf;
17814 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
17815 /* We need to get the size out of the right CQE */
17816 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17817 len = bf_get(lpfc_rcqe_length,
17818 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
17819 pbde = (struct ulp_bde64 *)
17820 &iocbq->iocb.unsli3.sli3Words[4];
17821 if (len > LPFC_DATA_BUF_SIZE)
17822 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17823 else
17824 pbde->tus.f.bdeSize = len;
17825
7851fe2c
JS
17826 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17827 tot_len += len;
4f774513
JS
17828 } else {
17829 iocbq = lpfc_sli_get_iocbq(vport->phba);
17830 if (!iocbq) {
17831 if (first_iocbq) {
17832 first_iocbq->iocb.ulpStatus =
17833 IOSTAT_FCP_RSP_ERROR;
17834 first_iocbq->iocb.un.ulpWord[4] =
17835 IOERR_NO_RESOURCES;
17836 }
17837 lpfc_in_buf_free(vport->phba, d_buf);
17838 continue;
17839 }
48a5a664
JS
17840 /* We need to get the size out of the right CQE */
17841 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17842 len = bf_get(lpfc_rcqe_length,
17843 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
17844 iocbq->context2 = d_buf;
17845 iocbq->context3 = NULL;
17846 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17847 if (len > LPFC_DATA_BUF_SIZE)
17848 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17849 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17850 else
17851 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 17852
7851fe2c
JS
17853 tot_len += len;
17854 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17855
4f774513
JS
17856 iocbq->iocb.un.rcvels.remoteID = sid;
17857 list_add_tail(&iocbq->list, &first_iocbq->list);
17858 }
17859 }
17860 return first_iocbq;
17861}
17862
6669f9bb
JS
17863static void
17864lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17865 struct hbq_dmabuf *seq_dmabuf)
17866{
17867 struct fc_frame_header *fc_hdr;
17868 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17869 struct lpfc_hba *phba = vport->phba;
17870
17871 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17872 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17873 if (!iocbq) {
17874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17875 "2707 Ring %d handler: Failed to allocate "
17876 "iocb Rctl x%x Type x%x received\n",
17877 LPFC_ELS_RING,
17878 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17879 return;
17880 }
17881 if (!lpfc_complete_unsol_iocb(phba,
895427bd 17882 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
17883 iocbq, fc_hdr->fh_r_ctl,
17884 fc_hdr->fh_type))
6d368e53 17885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
17886 "2540 Ring %d handler: unexpected Rctl "
17887 "x%x Type x%x received\n",
17888 LPFC_ELS_RING,
17889 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17890
17891 /* Free iocb created in lpfc_prep_seq */
17892 list_for_each_entry_safe(curr_iocb, next_iocb,
17893 &iocbq->list, list) {
17894 list_del_init(&curr_iocb->list);
17895 lpfc_sli_release_iocbq(phba, curr_iocb);
17896 }
17897 lpfc_sli_release_iocbq(phba, iocbq);
17898}
17899
ae9e28f3
JS
17900static void
17901lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17902 struct lpfc_iocbq *rspiocb)
17903{
17904 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17905
17906 if (pcmd && pcmd->virt)
771db5c0 17907 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3
JS
17908 kfree(pcmd);
17909 lpfc_sli_release_iocbq(phba, cmdiocb);
e817e5d7 17910 lpfc_drain_txq(phba);
ae9e28f3
JS
17911}
17912
17913static void
17914lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17915 struct hbq_dmabuf *dmabuf)
17916{
17917 struct fc_frame_header *fc_hdr;
17918 struct lpfc_hba *phba = vport->phba;
17919 struct lpfc_iocbq *iocbq = NULL;
17920 union lpfc_wqe *wqe;
17921 struct lpfc_dmabuf *pcmd = NULL;
17922 uint32_t frame_len;
17923 int rc;
e817e5d7 17924 unsigned long iflags;
ae9e28f3
JS
17925
17926 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17927 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17928
17929 /* Send the received frame back */
17930 iocbq = lpfc_sli_get_iocbq(phba);
e817e5d7
JS
17931 if (!iocbq) {
17932 /* Queue cq event and wakeup worker thread to process it */
17933 spin_lock_irqsave(&phba->hbalock, iflags);
17934 list_add_tail(&dmabuf->cq_event.list,
17935 &phba->sli4_hba.sp_queue_event);
17936 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17937 spin_unlock_irqrestore(&phba->hbalock, iflags);
17938 lpfc_worker_wake_up(phba);
17939 return;
17940 }
ae9e28f3
JS
17941
17942 /* Allocate buffer for command payload */
17943 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17944 if (pcmd)
771db5c0 17945 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
ae9e28f3
JS
17946 &pcmd->phys);
17947 if (!pcmd || !pcmd->virt)
17948 goto exit;
17949
17950 INIT_LIST_HEAD(&pcmd->list);
17951
17952 /* copyin the payload */
17953 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17954
17955 /* fill in BDE's for command */
17956 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17957 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17958 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17959 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17960
17961 iocbq->context2 = pcmd;
17962 iocbq->vport = vport;
17963 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17964 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17965
17966 /*
17967 * Setup rest of the iocb as though it were a WQE
17968 * Build the SEND_FRAME WQE
17969 */
17970 wqe = (union lpfc_wqe *)&iocbq->iocb;
17971
17972 wqe->send_frame.frame_len = frame_len;
17973 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17974 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17975 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17976 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17977 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17978 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17979
17980 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17981 iocbq->iocb.ulpLe = 1;
17982 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17983 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17984 if (rc == IOCB_ERROR)
17985 goto exit;
17986
17987 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17988 return;
17989
17990exit:
17991 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17992 "2023 Unable to process MDS loopback frame\n");
17993 if (pcmd && pcmd->virt)
771db5c0 17994 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3 17995 kfree(pcmd);
401bb416
DK
17996 if (iocbq)
17997 lpfc_sli_release_iocbq(phba, iocbq);
ae9e28f3
JS
17998 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17999}
18000
4f774513
JS
18001/**
18002 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18003 * @phba: Pointer to HBA context object.
18004 *
18005 * This function is called with no lock held. This function processes all
18006 * the received buffers and gives it to upper layers when a received buffer
18007 * indicates that it is the final frame in the sequence. The interrupt
895427bd 18008 * service routine processes received buffers at interrupt contexts.
4f774513
JS
18009 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18010 * appropriate receive function when the final frame in a sequence is received.
18011 **/
4d9ab994
JS
18012void
18013lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18014 struct hbq_dmabuf *dmabuf)
4f774513 18015{
4d9ab994 18016 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
18017 struct fc_frame_header *fc_hdr;
18018 struct lpfc_vport *vport;
18019 uint32_t fcfi;
939723a4 18020 uint32_t did;
4f774513 18021
4f774513 18022 /* Process each received buffer */
4d9ab994 18023 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 18024
e817e5d7
JS
18025 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18026 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18027 vport = phba->pport;
18028 /* Handle MDS Loopback frames */
18029 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18030 return;
18031 }
18032
4d9ab994
JS
18033 /* check to see if this a valid type of frame */
18034 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18035 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18036 return;
18037 }
2ea259ee 18038
7851fe2c
JS
18039 if ((bf_get(lpfc_cqe_code,
18040 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18041 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18042 &dmabuf->cq_event.cqe.rcqe_cmpl);
18043 else
18044 fcfi = bf_get(lpfc_rcqe_fcf_id,
18045 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 18046
895427bd
JS
18047 /* d_id this frame is directed to */
18048 did = sli4_did_from_fc_hdr(fc_hdr);
18049
18050 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 18051 if (!vport) {
4d9ab994
JS
18052 /* throw out the frame */
18053 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18054 return;
18055 }
939723a4 18056
939723a4
JS
18057 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18058 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18059 (did != Fabric_DID)) {
18060 /*
18061 * Throw out the frame if we are not pt2pt.
18062 * The pt2pt protocol allows for discovery frames
18063 * to be received without a registered VPI.
18064 */
18065 if (!(vport->fc_flag & FC_PT2PT) ||
18066 (phba->link_state == LPFC_HBA_READY)) {
18067 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18068 return;
18069 }
18070 }
18071
6669f9bb
JS
18072 /* Handle the basic abort sequence (BA_ABTS) event */
18073 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18074 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18075 return;
18076 }
18077
4d9ab994
JS
18078 /* Link this frame */
18079 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18080 if (!seq_dmabuf) {
18081 /* unable to add frame to vport - throw it out */
18082 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18083 return;
18084 }
18085 /* If not last frame in sequence continue processing frames. */
def9c7a9 18086 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 18087 return;
def9c7a9 18088
6669f9bb
JS
18089 /* Send the complete sequence to the upper layer protocol */
18090 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 18091}
6fb120a7
JS
18092
18093/**
18094 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18095 * @phba: pointer to lpfc hba data structure.
18096 *
18097 * This routine is invoked to post rpi header templates to the
18098 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18099 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18100 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18101 *
18102 * This routine does not require any locks. It's usage is expected
18103 * to be driver load or reset recovery when the driver is
18104 * sequential.
18105 *
18106 * Return codes
af901ca1 18107 * 0 - successful
d439d286 18108 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18109 * When this error occurs, the driver is not guaranteed
18110 * to have any rpi regions posted to the device and
18111 * must either attempt to repost the regions or take a
18112 * fatal error.
18113 **/
18114int
18115lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18116{
18117 struct lpfc_rpi_hdr *rpi_page;
18118 uint32_t rc = 0;
6d368e53
JS
18119 uint16_t lrpi = 0;
18120
18121 /* SLI4 ports that support extents do not require RPI headers. */
18122 if (!phba->sli4_hba.rpi_hdrs_in_use)
18123 goto exit;
18124 if (phba->sli4_hba.extents_in_use)
18125 return -EIO;
6fb120a7 18126
6fb120a7 18127 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
18128 /*
18129 * Assign the rpi headers a physical rpi only if the driver
18130 * has not initialized those resources. A port reset only
18131 * needs the headers posted.
18132 */
18133 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18134 LPFC_RPI_RSRC_RDY)
18135 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18136
6fb120a7
JS
18137 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18138 if (rc != MBX_SUCCESS) {
18139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18140 "2008 Error %d posting all rpi "
18141 "headers\n", rc);
18142 rc = -EIO;
18143 break;
18144 }
18145 }
18146
6d368e53
JS
18147 exit:
18148 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18149 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
18150 return rc;
18151}
18152
18153/**
18154 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18155 * @phba: pointer to lpfc hba data structure.
18156 * @rpi_page: pointer to the rpi memory region.
18157 *
18158 * This routine is invoked to post a single rpi header to the
18159 * HBA consistent with the SLI-4 interface spec. This memory region
18160 * maps up to 64 rpi context regions.
18161 *
18162 * Return codes
af901ca1 18163 * 0 - successful
d439d286
JS
18164 * -ENOMEM - No available memory
18165 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18166 **/
18167int
18168lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18169{
18170 LPFC_MBOXQ_t *mboxq;
18171 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18172 uint32_t rc = 0;
6fb120a7
JS
18173 uint32_t shdr_status, shdr_add_status;
18174 union lpfc_sli4_cfg_shdr *shdr;
18175
6d368e53
JS
18176 /* SLI4 ports that support extents do not require RPI headers. */
18177 if (!phba->sli4_hba.rpi_hdrs_in_use)
18178 return rc;
18179 if (phba->sli4_hba.extents_in_use)
18180 return -EIO;
18181
6fb120a7
JS
18182 /* The port is notified of the header region via a mailbox command. */
18183 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18184 if (!mboxq) {
18185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18186 "2001 Unable to allocate memory for issuing "
18187 "SLI_CONFIG_SPECIAL mailbox command\n");
18188 return -ENOMEM;
18189 }
18190
18191 /* Post all rpi memory regions to the port. */
18192 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
18193 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18194 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18195 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
18196 sizeof(struct lpfc_sli4_cfg_mhdr),
18197 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
18198
18199
18200 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
18201 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18202 rpi_page->start_rpi);
6d368e53
JS
18203 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18204 hdr_tmpl, rpi_page->page_count);
18205
6fb120a7
JS
18206 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18207 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 18208 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
18209 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18210 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18211 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18212 if (rc != MBX_TIMEOUT)
18213 mempool_free(mboxq, phba->mbox_mem_pool);
18214 if (shdr_status || shdr_add_status || rc) {
18215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18216 "2514 POST_RPI_HDR mailbox failed with "
18217 "status x%x add_status x%x, mbx status x%x\n",
18218 shdr_status, shdr_add_status, rc);
18219 rc = -ENXIO;
845d9e8d
JS
18220 } else {
18221 /*
18222 * The next_rpi stores the next logical module-64 rpi value used
18223 * to post physical rpis in subsequent rpi postings.
18224 */
18225 spin_lock_irq(&phba->hbalock);
18226 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18227 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
18228 }
18229 return rc;
18230}
18231
18232/**
18233 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18234 * @phba: pointer to lpfc hba data structure.
18235 *
18236 * This routine is invoked to post rpi header templates to the
18237 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18238 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18239 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18240 *
18241 * Returns
af901ca1 18242 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
18243 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18244 **/
18245int
18246lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18247{
6d368e53
JS
18248 unsigned long rpi;
18249 uint16_t max_rpi, rpi_limit;
18250 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 18251 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 18252 unsigned long iflag;
6fb120a7 18253
6fb120a7 18254 /*
6d368e53
JS
18255 * Fetch the next logical rpi. Because this index is logical,
18256 * the driver starts at 0 each time.
6fb120a7 18257 */
4902b381 18258 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
18259 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18260 rpi_limit = phba->sli4_hba.next_rpi;
18261
6d368e53
JS
18262 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18263 if (rpi >= rpi_limit)
6fb120a7
JS
18264 rpi = LPFC_RPI_ALLOC_ERROR;
18265 else {
18266 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18267 phba->sli4_hba.max_cfg_param.rpi_used++;
18268 phba->sli4_hba.rpi_count++;
18269 }
be6bb941
JS
18270 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18271 "0001 rpi:%x max:%x lim:%x\n",
18272 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
18273
18274 /*
18275 * Don't try to allocate more rpi header regions if the device limit
6d368e53 18276 * has been exhausted.
6fb120a7
JS
18277 */
18278 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18279 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 18280 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18281 return rpi;
18282 }
18283
6d368e53
JS
18284 /*
18285 * RPI header postings are not required for SLI4 ports capable of
18286 * extents.
18287 */
18288 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 18289 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
18290 return rpi;
18291 }
18292
6fb120a7
JS
18293 /*
18294 * If the driver is running low on rpi resources, allocate another
18295 * page now. Note that the next_rpi value is used because
18296 * it represents how many are actually in use whereas max_rpi notes
18297 * how many are supported max by the device.
18298 */
6d368e53 18299 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 18300 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18301 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18302 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18303 if (!rpi_hdr) {
18304 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18305 "2002 Error Could not grow rpi "
18306 "count\n");
18307 } else {
6d368e53
JS
18308 lrpi = rpi_hdr->start_rpi;
18309 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
18310 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18311 }
18312 }
18313
18314 return rpi;
18315}
18316
d7c47992
JS
18317/**
18318 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18319 * @phba: pointer to lpfc hba data structure.
18320 *
18321 * This routine is invoked to release an rpi to the pool of
18322 * available rpis maintained by the driver.
18323 **/
5d8b8167 18324static void
d7c47992
JS
18325__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18326{
18327 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18328 phba->sli4_hba.rpi_count--;
18329 phba->sli4_hba.max_cfg_param.rpi_used--;
18330 }
18331}
18332
6fb120a7
JS
18333/**
18334 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18335 * @phba: pointer to lpfc hba data structure.
18336 *
18337 * This routine is invoked to release an rpi to the pool of
18338 * available rpis maintained by the driver.
18339 **/
18340void
18341lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18342{
18343 spin_lock_irq(&phba->hbalock);
d7c47992 18344 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
18345 spin_unlock_irq(&phba->hbalock);
18346}
18347
18348/**
18349 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18350 * @phba: pointer to lpfc hba data structure.
18351 *
18352 * This routine is invoked to remove the memory region that
18353 * provided rpi via a bitmask.
18354 **/
18355void
18356lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18357{
18358 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
18359 kfree(phba->sli4_hba.rpi_ids);
18360 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
18361}
18362
18363/**
18364 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18365 * @phba: pointer to lpfc hba data structure.
18366 *
18367 * This routine is invoked to remove the memory region that
18368 * provided rpi via a bitmask.
18369 **/
18370int
6b5151fd
JS
18371lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18372 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
18373{
18374 LPFC_MBOXQ_t *mboxq;
18375 struct lpfc_hba *phba = ndlp->phba;
18376 int rc;
18377
18378 /* The port is notified of the header region via a mailbox command. */
18379 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18380 if (!mboxq)
18381 return -ENOMEM;
18382
18383 /* Post all rpi memory regions to the port. */
18384 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
18385 if (cmpl) {
18386 mboxq->mbox_cmpl = cmpl;
3e1f0718
JS
18387 mboxq->ctx_buf = arg;
18388 mboxq->ctx_ndlp = ndlp;
72859909
JS
18389 } else
18390 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 18391 mboxq->vport = ndlp->vport;
6fb120a7
JS
18392 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18393 if (rc == MBX_NOT_FINISHED) {
18394 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18395 "2010 Resume RPI Mailbox failed "
18396 "status %d, mbxStatus x%x\n", rc,
18397 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18398 mempool_free(mboxq, phba->mbox_mem_pool);
18399 return -EIO;
18400 }
18401 return 0;
18402}
18403
18404/**
18405 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 18406 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 18407 *
76a95d75 18408 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
18409 *
18410 * Returns:
18411 * 0 success
18412 * -Evalue otherwise
18413 **/
18414int
76a95d75 18415lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
18416{
18417 LPFC_MBOXQ_t *mboxq;
18418 int rc = 0;
6a9c52cf 18419 int retval = MBX_SUCCESS;
6fb120a7 18420 uint32_t mbox_tmo;
76a95d75 18421 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
18422 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18423 if (!mboxq)
18424 return -ENOMEM;
76a95d75 18425 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 18426 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 18427 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 18428 if (rc != MBX_SUCCESS) {
76a95d75 18429 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
18430 "2022 INIT VPI Mailbox failed "
18431 "status %d, mbxStatus x%x\n", rc,
18432 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 18433 retval = -EIO;
6fb120a7 18434 }
6a9c52cf 18435 if (rc != MBX_TIMEOUT)
76a95d75 18436 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
18437
18438 return retval;
6fb120a7
JS
18439}
18440
18441/**
18442 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18443 * @phba: pointer to lpfc hba data structure.
18444 * @mboxq: Pointer to mailbox object.
18445 *
18446 * This routine is invoked to manually add a single FCF record. The caller
18447 * must pass a completely initialized FCF_Record. This routine takes
18448 * care of the nonembedded mailbox operations.
18449 **/
18450static void
18451lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18452{
18453 void *virt_addr;
18454 union lpfc_sli4_cfg_shdr *shdr;
18455 uint32_t shdr_status, shdr_add_status;
18456
18457 virt_addr = mboxq->sge_array->addr[0];
18458 /* The IOCTL status is embedded in the mailbox subheader. */
18459 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18460 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18461 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18462
18463 if ((shdr_status || shdr_add_status) &&
18464 (shdr_status != STATUS_FCF_IN_USE))
18465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18466 "2558 ADD_FCF_RECORD mailbox failed with "
18467 "status x%x add_status x%x\n",
18468 shdr_status, shdr_add_status);
18469
18470 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18471}
18472
18473/**
18474 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18475 * @phba: pointer to lpfc hba data structure.
18476 * @fcf_record: pointer to the initialized fcf record to add.
18477 *
18478 * This routine is invoked to manually add a single FCF record. The caller
18479 * must pass a completely initialized FCF_Record. This routine takes
18480 * care of the nonembedded mailbox operations.
18481 **/
18482int
18483lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18484{
18485 int rc = 0;
18486 LPFC_MBOXQ_t *mboxq;
18487 uint8_t *bytep;
18488 void *virt_addr;
6fb120a7
JS
18489 struct lpfc_mbx_sge sge;
18490 uint32_t alloc_len, req_len;
18491 uint32_t fcfindex;
18492
18493 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18494 if (!mboxq) {
18495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18496 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18497 return -ENOMEM;
18498 }
18499
18500 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18501 sizeof(uint32_t);
18502
18503 /* Allocate DMA memory and set up the non-embedded mailbox command */
18504 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18505 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18506 req_len, LPFC_SLI4_MBX_NEMBED);
18507 if (alloc_len < req_len) {
18508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18509 "2523 Allocated DMA memory size (x%x) is "
18510 "less than the requested DMA memory "
18511 "size (x%x)\n", alloc_len, req_len);
18512 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18513 return -ENOMEM;
18514 }
18515
18516 /*
18517 * Get the first SGE entry from the non-embedded DMA memory. This
18518 * routine only uses a single SGE.
18519 */
18520 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
18521 virt_addr = mboxq->sge_array->addr[0];
18522 /*
18523 * Configure the FCF record for FCFI 0. This is the driver's
18524 * hardcoded default and gets used in nonFIP mode.
18525 */
18526 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18527 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18528 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18529
18530 /*
18531 * Copy the fcf_index and the FCF Record Data. The data starts after
18532 * the FCoE header plus word10. The data copy needs to be endian
18533 * correct.
18534 */
18535 bytep += sizeof(uint32_t);
18536 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18537 mboxq->vport = phba->pport;
18538 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18539 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18540 if (rc == MBX_NOT_FINISHED) {
18541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18542 "2515 ADD_FCF_RECORD mailbox failed with "
18543 "status 0x%x\n", rc);
18544 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18545 rc = -EIO;
18546 } else
18547 rc = 0;
18548
18549 return rc;
18550}
18551
18552/**
18553 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18554 * @phba: pointer to lpfc hba data structure.
18555 * @fcf_record: pointer to the fcf record to write the default data.
18556 * @fcf_index: FCF table entry index.
18557 *
18558 * This routine is invoked to build the driver's default FCF record. The
18559 * values used are hardcoded. This routine handles memory initialization.
18560 *
18561 **/
18562void
18563lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18564 struct fcf_record *fcf_record,
18565 uint16_t fcf_index)
18566{
18567 memset(fcf_record, 0, sizeof(struct fcf_record));
18568 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18569 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18570 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18571 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18572 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18573 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18574 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18575 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18576 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18577 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18578 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18579 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18580 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 18581 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
18582 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18583 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18584 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18585 /* Set the VLAN bit map */
18586 if (phba->valid_vlan) {
18587 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18588 = 1 << (phba->vlan_id % 8);
18589 }
18590}
18591
18592/**
0c9ab6f5 18593 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
18594 * @phba: pointer to lpfc hba data structure.
18595 * @fcf_index: FCF table entry offset.
18596 *
0c9ab6f5
JS
18597 * This routine is invoked to scan the entire FCF table by reading FCF
18598 * record and processing it one at a time starting from the @fcf_index
18599 * for initial FCF discovery or fast FCF failover rediscovery.
18600 *
25985edc 18601 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 18602 * otherwise.
6fb120a7
JS
18603 **/
18604int
0c9ab6f5 18605lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
18606{
18607 int rc = 0, error;
18608 LPFC_MBOXQ_t *mboxq;
6fb120a7 18609
32b9793f 18610 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 18611 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
18612 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18613 if (!mboxq) {
18614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18615 "2000 Failed to allocate mbox for "
18616 "READ_FCF cmd\n");
4d9ab994 18617 error = -ENOMEM;
0c9ab6f5 18618 goto fail_fcf_scan;
6fb120a7 18619 }
ecfd03c6 18620 /* Construct the read FCF record mailbox command */
0c9ab6f5 18621 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
18622 if (rc) {
18623 error = -EINVAL;
0c9ab6f5 18624 goto fail_fcf_scan;
6fb120a7 18625 }
ecfd03c6 18626 /* Issue the mailbox command asynchronously */
6fb120a7 18627 mboxq->vport = phba->pport;
0c9ab6f5 18628 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
18629
18630 spin_lock_irq(&phba->hbalock);
18631 phba->hba_flag |= FCF_TS_INPROG;
18632 spin_unlock_irq(&phba->hbalock);
18633
6fb120a7 18634 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 18635 if (rc == MBX_NOT_FINISHED)
6fb120a7 18636 error = -EIO;
ecfd03c6 18637 else {
38b92ef8
JS
18638 /* Reset eligible FCF count for new scan */
18639 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 18640 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 18641 error = 0;
32b9793f 18642 }
0c9ab6f5 18643fail_fcf_scan:
4d9ab994
JS
18644 if (error) {
18645 if (mboxq)
18646 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 18647 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 18648 spin_lock_irq(&phba->hbalock);
a93ff37a 18649 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
18650 spin_unlock_irq(&phba->hbalock);
18651 }
6fb120a7
JS
18652 return error;
18653}
a0c87cbd 18654
0c9ab6f5 18655/**
a93ff37a 18656 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
18657 * @phba: pointer to lpfc hba data structure.
18658 * @fcf_index: FCF table entry offset.
18659 *
18660 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 18661 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 18662 *
25985edc 18663 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18664 * otherwise.
18665 **/
18666int
18667lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18668{
18669 int rc = 0, error;
18670 LPFC_MBOXQ_t *mboxq;
18671
18672 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18673 if (!mboxq) {
18674 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18675 "2763 Failed to allocate mbox for "
18676 "READ_FCF cmd\n");
18677 error = -ENOMEM;
18678 goto fail_fcf_read;
18679 }
18680 /* Construct the read FCF record mailbox command */
18681 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18682 if (rc) {
18683 error = -EINVAL;
18684 goto fail_fcf_read;
18685 }
18686 /* Issue the mailbox command asynchronously */
18687 mboxq->vport = phba->pport;
18688 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18689 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18690 if (rc == MBX_NOT_FINISHED)
18691 error = -EIO;
18692 else
18693 error = 0;
18694
18695fail_fcf_read:
18696 if (error && mboxq)
18697 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18698 return error;
18699}
18700
18701/**
18702 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18703 * @phba: pointer to lpfc hba data structure.
18704 * @fcf_index: FCF table entry offset.
18705 *
18706 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 18707 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 18708 *
25985edc 18709 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18710 * otherwise.
18711 **/
18712int
18713lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18714{
18715 int rc = 0, error;
18716 LPFC_MBOXQ_t *mboxq;
18717
18718 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18719 if (!mboxq) {
18720 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18721 "2758 Failed to allocate mbox for "
18722 "READ_FCF cmd\n");
18723 error = -ENOMEM;
18724 goto fail_fcf_read;
18725 }
18726 /* Construct the read FCF record mailbox command */
18727 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18728 if (rc) {
18729 error = -EINVAL;
18730 goto fail_fcf_read;
18731 }
18732 /* Issue the mailbox command asynchronously */
18733 mboxq->vport = phba->pport;
18734 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18735 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18736 if (rc == MBX_NOT_FINISHED)
18737 error = -EIO;
18738 else
18739 error = 0;
18740
18741fail_fcf_read:
18742 if (error && mboxq)
18743 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18744 return error;
18745}
18746
7d791df7 18747/**
f5cb5304 18748 * lpfc_check_next_fcf_pri_level
7d791df7
JS
18749 * phba pointer to the lpfc_hba struct for this port.
18750 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18751 * routine when the rr_bmask is empty. The FCF indecies are put into the
18752 * rr_bmask based on their priority level. Starting from the highest priority
18753 * to the lowest. The most likely FCF candidate will be in the highest
18754 * priority group. When this routine is called it searches the fcf_pri list for
18755 * next lowest priority group and repopulates the rr_bmask with only those
18756 * fcf_indexes.
18757 * returns:
18758 * 1=success 0=failure
18759 **/
5d8b8167 18760static int
7d791df7
JS
18761lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18762{
18763 uint16_t next_fcf_pri;
18764 uint16_t last_index;
18765 struct lpfc_fcf_pri *fcf_pri;
18766 int rc;
18767 int ret = 0;
18768
18769 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18770 LPFC_SLI4_FCF_TBL_INDX_MAX);
18771 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18772 "3060 Last IDX %d\n", last_index);
2562669c
JS
18773
18774 /* Verify the priority list has 2 or more entries */
18775 spin_lock_irq(&phba->hbalock);
18776 if (list_empty(&phba->fcf.fcf_pri_list) ||
18777 list_is_singular(&phba->fcf.fcf_pri_list)) {
18778 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
18779 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18780 "3061 Last IDX %d\n", last_index);
18781 return 0; /* Empty rr list */
18782 }
2562669c
JS
18783 spin_unlock_irq(&phba->hbalock);
18784
7d791df7
JS
18785 next_fcf_pri = 0;
18786 /*
18787 * Clear the rr_bmask and set all of the bits that are at this
18788 * priority.
18789 */
18790 memset(phba->fcf.fcf_rr_bmask, 0,
18791 sizeof(*phba->fcf.fcf_rr_bmask));
18792 spin_lock_irq(&phba->hbalock);
18793 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18794 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18795 continue;
18796 /*
18797 * the 1st priority that has not FLOGI failed
18798 * will be the highest.
18799 */
18800 if (!next_fcf_pri)
18801 next_fcf_pri = fcf_pri->fcf_rec.priority;
18802 spin_unlock_irq(&phba->hbalock);
18803 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18804 rc = lpfc_sli4_fcf_rr_index_set(phba,
18805 fcf_pri->fcf_rec.fcf_index);
18806 if (rc)
18807 return 0;
18808 }
18809 spin_lock_irq(&phba->hbalock);
18810 }
18811 /*
18812 * if next_fcf_pri was not set above and the list is not empty then
18813 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 18814 * and start at the beginning.
7d791df7
JS
18815 */
18816 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18817 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18818 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18819 /*
18820 * the 1st priority that has not FLOGI failed
18821 * will be the highest.
18822 */
18823 if (!next_fcf_pri)
18824 next_fcf_pri = fcf_pri->fcf_rec.priority;
18825 spin_unlock_irq(&phba->hbalock);
18826 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18827 rc = lpfc_sli4_fcf_rr_index_set(phba,
18828 fcf_pri->fcf_rec.fcf_index);
18829 if (rc)
18830 return 0;
18831 }
18832 spin_lock_irq(&phba->hbalock);
18833 }
18834 } else
18835 ret = 1;
18836 spin_unlock_irq(&phba->hbalock);
18837
18838 return ret;
18839}
0c9ab6f5
JS
18840/**
18841 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18842 * @phba: pointer to lpfc hba data structure.
18843 *
18844 * This routine is to get the next eligible FCF record index in a round
18845 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 18846 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
18847 * shall be returned, otherwise, the next eligible FCF record's index
18848 * shall be returned.
18849 **/
18850uint16_t
18851lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18852{
18853 uint16_t next_fcf_index;
18854
421c6622 18855initial_priority:
3804dc84 18856 /* Search start from next bit of currently registered FCF index */
421c6622
JS
18857 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18858
7d791df7 18859next_priority:
421c6622
JS
18860 /* Determine the next fcf index to check */
18861 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
18862 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18863 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
18864 next_fcf_index);
18865
0c9ab6f5 18866 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
18867 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18868 /*
18869 * If we have wrapped then we need to clear the bits that
18870 * have been tested so that we can detect when we should
18871 * change the priority level.
18872 */
0c9ab6f5
JS
18873 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18874 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
18875 }
18876
3804dc84
JS
18877
18878 /* Check roundrobin failover list empty condition */
7d791df7
JS
18879 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18880 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18881 /*
18882 * If next fcf index is not found check if there are lower
18883 * Priority level fcf's in the fcf_priority list.
18884 * Set up the rr_bmask with all of the avaiable fcf bits
18885 * at that level and continue the selection process.
18886 */
18887 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 18888 goto initial_priority;
3804dc84
JS
18889 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18890 "2844 No roundrobin failover FCF available\n");
036cad1f
JS
18891
18892 return LPFC_FCOE_FCF_NEXT_NONE;
3804dc84
JS
18893 }
18894
7d791df7
JS
18895 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18896 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
18897 LPFC_FCF_FLOGI_FAILED) {
18898 if (list_is_singular(&phba->fcf.fcf_pri_list))
18899 return LPFC_FCOE_FCF_NEXT_NONE;
18900
7d791df7 18901 goto next_priority;
f5cb5304 18902 }
7d791df7 18903
3804dc84 18904 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
18905 "2845 Get next roundrobin failover FCF (x%x)\n",
18906 next_fcf_index);
18907
0c9ab6f5
JS
18908 return next_fcf_index;
18909}
18910
18911/**
18912 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18913 * @phba: pointer to lpfc hba data structure.
18914 *
18915 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 18916 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18917 * does not go beyond the range of the driver allocated bmask dimension
18918 * before setting the bit.
18919 *
18920 * Returns 0 if the index bit successfully set, otherwise, it returns
18921 * -EINVAL.
18922 **/
18923int
18924lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18925{
18926 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18927 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18928 "2610 FCF (x%x) reached driver's book "
18929 "keeping dimension:x%x\n",
0c9ab6f5
JS
18930 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18931 return -EINVAL;
18932 }
18933 /* Set the eligible FCF record index bmask */
18934 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18935
3804dc84 18936 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18937 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
18938 "bmask\n", fcf_index);
18939
0c9ab6f5
JS
18940 return 0;
18941}
18942
18943/**
3804dc84 18944 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
18945 * @phba: pointer to lpfc hba data structure.
18946 *
18947 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 18948 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18949 * does not go beyond the range of the driver allocated bmask dimension
18950 * before clearing the bit.
18951 **/
18952void
18953lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18954{
9a803a74 18955 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
18956 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18957 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18958 "2762 FCF (x%x) reached driver's book "
18959 "keeping dimension:x%x\n",
0c9ab6f5
JS
18960 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18961 return;
18962 }
18963 /* Clear the eligible FCF record index bmask */
7d791df7 18964 spin_lock_irq(&phba->hbalock);
9a803a74
JS
18965 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18966 list) {
7d791df7
JS
18967 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18968 list_del_init(&fcf_pri->list);
18969 break;
18970 }
18971 }
18972 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 18973 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
18974
18975 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18976 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 18977 "bmask\n", fcf_index);
0c9ab6f5
JS
18978}
18979
ecfd03c6
JS
18980/**
18981 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18982 * @phba: pointer to lpfc hba data structure.
18983 *
18984 * This routine is the completion routine for the rediscover FCF table mailbox
18985 * command. If the mailbox command returned failure, it will try to stop the
18986 * FCF rediscover wait timer.
18987 **/
5d8b8167 18988static void
ecfd03c6
JS
18989lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18990{
18991 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18992 uint32_t shdr_status, shdr_add_status;
18993
18994 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18995
18996 shdr_status = bf_get(lpfc_mbox_hdr_status,
18997 &redisc_fcf->header.cfg_shdr.response);
18998 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18999 &redisc_fcf->header.cfg_shdr.response);
19000 if (shdr_status || shdr_add_status) {
0c9ab6f5 19001 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
19002 "2746 Requesting for FCF rediscovery failed "
19003 "status x%x add_status x%x\n",
19004 shdr_status, shdr_add_status);
0c9ab6f5 19005 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 19006 spin_lock_irq(&phba->hbalock);
0c9ab6f5 19007 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
19008 spin_unlock_irq(&phba->hbalock);
19009 /*
19010 * CVL event triggered FCF rediscover request failed,
19011 * last resort to re-try current registered FCF entry.
19012 */
19013 lpfc_retry_pport_discovery(phba);
19014 } else {
19015 spin_lock_irq(&phba->hbalock);
0c9ab6f5 19016 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
19017 spin_unlock_irq(&phba->hbalock);
19018 /*
19019 * DEAD FCF event triggered FCF rediscover request
19020 * failed, last resort to fail over as a link down
19021 * to FCF registration.
19022 */
19023 lpfc_sli4_fcf_dead_failthrough(phba);
19024 }
0c9ab6f5
JS
19025 } else {
19026 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 19027 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
19028 /*
19029 * Start FCF rediscovery wait timer for pending FCF
19030 * before rescan FCF record table.
19031 */
19032 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 19033 }
ecfd03c6
JS
19034
19035 mempool_free(mbox, phba->mbox_mem_pool);
19036}
19037
19038/**
3804dc84 19039 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
19040 * @phba: pointer to lpfc hba data structure.
19041 *
19042 * This routine is invoked to request for rediscovery of the entire FCF table
19043 * by the port.
19044 **/
19045int
19046lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19047{
19048 LPFC_MBOXQ_t *mbox;
19049 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19050 int rc, length;
19051
0c9ab6f5
JS
19052 /* Cancel retry delay timers to all vports before FCF rediscover */
19053 lpfc_cancel_all_vport_retry_delay_timer(phba);
19054
ecfd03c6
JS
19055 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19056 if (!mbox) {
19057 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19058 "2745 Failed to allocate mbox for "
19059 "requesting FCF rediscover.\n");
19060 return -ENOMEM;
19061 }
19062
19063 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19064 sizeof(struct lpfc_sli4_cfg_mhdr));
19065 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19066 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19067 length, LPFC_SLI4_MBX_EMBED);
19068
19069 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19070 /* Set count to 0 for invalidating the entire FCF database */
19071 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19072
19073 /* Issue the mailbox command asynchronously */
19074 mbox->vport = phba->pport;
19075 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19076 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19077
19078 if (rc == MBX_NOT_FINISHED) {
19079 mempool_free(mbox, phba->mbox_mem_pool);
19080 return -EIO;
19081 }
19082 return 0;
19083}
19084
fc2b989b
JS
19085/**
19086 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19087 * @phba: pointer to lpfc hba data structure.
19088 *
19089 * This function is the failover routine as a last resort to the FCF DEAD
19090 * event when driver failed to perform fast FCF failover.
19091 **/
19092void
19093lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19094{
19095 uint32_t link_state;
19096
19097 /*
19098 * Last resort as FCF DEAD event failover will treat this as
19099 * a link down, but save the link state because we don't want
19100 * it to be changed to Link Down unless it is already down.
19101 */
19102 link_state = phba->link_state;
19103 lpfc_linkdown(phba);
19104 phba->link_state = link_state;
19105
19106 /* Unregister FCF if no devices connected to it */
19107 lpfc_unregister_unused_fcf(phba);
19108}
19109
a0c87cbd 19110/**
026abb87 19111 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 19112 * @phba: pointer to lpfc hba data structure.
026abb87 19113 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 19114 *
026abb87
JS
19115 * This function gets SLI3 port configure region 23 data through memory dump
19116 * mailbox command. When it successfully retrieves data, the size of the data
19117 * will be returned, otherwise, 0 will be returned.
a0c87cbd 19118 **/
026abb87
JS
19119static uint32_t
19120lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
19121{
19122 LPFC_MBOXQ_t *pmb = NULL;
19123 MAILBOX_t *mb;
026abb87 19124 uint32_t offset = 0;
a0c87cbd
JS
19125 int rc;
19126
026abb87
JS
19127 if (!rgn23_data)
19128 return 0;
19129
a0c87cbd
JS
19130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19131 if (!pmb) {
19132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
19133 "2600 failed to allocate mailbox memory\n");
19134 return 0;
a0c87cbd
JS
19135 }
19136 mb = &pmb->u.mb;
19137
a0c87cbd
JS
19138 do {
19139 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19141
19142 if (rc != MBX_SUCCESS) {
19143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
19144 "2601 failed to read config "
19145 "region 23, rc 0x%x Status 0x%x\n",
19146 rc, mb->mbxStatus);
a0c87cbd
JS
19147 mb->un.varDmp.word_cnt = 0;
19148 }
19149 /*
19150 * dump mem may return a zero when finished or we got a
19151 * mailbox error, either way we are done.
19152 */
19153 if (mb->un.varDmp.word_cnt == 0)
19154 break;
19155 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19156 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19157
19158 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
19159 rgn23_data + offset,
19160 mb->un.varDmp.word_cnt);
a0c87cbd
JS
19161 offset += mb->un.varDmp.word_cnt;
19162 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19163
026abb87
JS
19164 mempool_free(pmb, phba->mbox_mem_pool);
19165 return offset;
19166}
19167
19168/**
19169 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19170 * @phba: pointer to lpfc hba data structure.
19171 * @rgn23_data: pointer to configure region 23 data.
19172 *
19173 * This function gets SLI4 port configure region 23 data through memory dump
19174 * mailbox command. When it successfully retrieves data, the size of the data
19175 * will be returned, otherwise, 0 will be returned.
19176 **/
19177static uint32_t
19178lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19179{
19180 LPFC_MBOXQ_t *mboxq = NULL;
19181 struct lpfc_dmabuf *mp = NULL;
19182 struct lpfc_mqe *mqe;
19183 uint32_t data_length = 0;
19184 int rc;
19185
19186 if (!rgn23_data)
19187 return 0;
19188
19189 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19190 if (!mboxq) {
19191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19192 "3105 failed to allocate mailbox memory\n");
19193 return 0;
19194 }
19195
19196 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19197 goto out;
19198 mqe = &mboxq->u.mqe;
3e1f0718 19199 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
026abb87
JS
19200 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19201 if (rc)
19202 goto out;
19203 data_length = mqe->un.mb_words[5];
19204 if (data_length == 0)
19205 goto out;
19206 if (data_length > DMP_RGN23_SIZE) {
19207 data_length = 0;
19208 goto out;
19209 }
19210 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19211out:
19212 mempool_free(mboxq, phba->mbox_mem_pool);
19213 if (mp) {
19214 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19215 kfree(mp);
19216 }
19217 return data_length;
19218}
19219
19220/**
19221 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19222 * @phba: pointer to lpfc hba data structure.
19223 *
19224 * This function read region 23 and parse TLV for port status to
19225 * decide if the user disaled the port. If the TLV indicates the
19226 * port is disabled, the hba_flag is set accordingly.
19227 **/
19228void
19229lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19230{
19231 uint8_t *rgn23_data = NULL;
19232 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19233 uint32_t offset = 0;
19234
19235 /* Get adapter Region 23 data */
19236 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19237 if (!rgn23_data)
19238 goto out;
19239
19240 if (phba->sli_rev < LPFC_SLI_REV4)
19241 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19242 else {
19243 if_type = bf_get(lpfc_sli_intf_if_type,
19244 &phba->sli4_hba.sli_intf);
19245 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19246 goto out;
19247 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19248 }
a0c87cbd
JS
19249
19250 if (!data_size)
19251 goto out;
19252
19253 /* Check the region signature first */
19254 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19256 "2619 Config region 23 has bad signature\n");
19257 goto out;
19258 }
19259 offset += 4;
19260
19261 /* Check the data structure version */
19262 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19264 "2620 Config region 23 has bad version\n");
19265 goto out;
19266 }
19267 offset += 4;
19268
19269 /* Parse TLV entries in the region */
19270 while (offset < data_size) {
19271 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19272 break;
19273 /*
19274 * If the TLV is not driver specific TLV or driver id is
19275 * not linux driver id, skip the record.
19276 */
19277 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19278 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19279 (rgn23_data[offset + 3] != 0)) {
19280 offset += rgn23_data[offset + 1] * 4 + 4;
19281 continue;
19282 }
19283
19284 /* Driver found a driver specific TLV in the config region */
19285 sub_tlv_len = rgn23_data[offset + 1] * 4;
19286 offset += 4;
19287 tlv_offset = 0;
19288
19289 /*
19290 * Search for configured port state sub-TLV.
19291 */
19292 while ((offset < data_size) &&
19293 (tlv_offset < sub_tlv_len)) {
19294 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19295 offset += 4;
19296 tlv_offset += 4;
19297 break;
19298 }
19299 if (rgn23_data[offset] != PORT_STE_TYPE) {
19300 offset += rgn23_data[offset + 1] * 4 + 4;
19301 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19302 continue;
19303 }
19304
19305 /* This HBA contains PORT_STE configured */
19306 if (!rgn23_data[offset + 2])
19307 phba->hba_flag |= LINK_DISABLED;
19308
19309 goto out;
19310 }
19311 }
026abb87 19312
a0c87cbd 19313out:
a0c87cbd
JS
19314 kfree(rgn23_data);
19315 return;
19316}
695a814e 19317
52d52440
JS
19318/**
19319 * lpfc_wr_object - write an object to the firmware
19320 * @phba: HBA structure that indicates port to create a queue on.
19321 * @dmabuf_list: list of dmabufs to write to the port.
19322 * @size: the total byte value of the objects to write to the port.
19323 * @offset: the current offset to be used to start the transfer.
19324 *
19325 * This routine will create a wr_object mailbox command to send to the port.
19326 * the mailbox command will be constructed using the dma buffers described in
19327 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19328 * BDEs that the imbedded mailbox can support. The @offset variable will be
19329 * used to indicate the starting offset of the transfer and will also return
19330 * the offset after the write object mailbox has completed. @size is used to
19331 * determine the end of the object and whether the eof bit should be set.
19332 *
19333 * Return 0 is successful and offset will contain the the new offset to use
19334 * for the next write.
19335 * Return negative value for error cases.
19336 **/
19337int
19338lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19339 uint32_t size, uint32_t *offset)
19340{
19341 struct lpfc_mbx_wr_object *wr_object;
19342 LPFC_MBOXQ_t *mbox;
19343 int rc = 0, i = 0;
5021267a 19344 uint32_t shdr_status, shdr_add_status, shdr_change_status;
52d52440 19345 uint32_t mbox_tmo;
52d52440
JS
19346 struct lpfc_dmabuf *dmabuf;
19347 uint32_t written = 0;
5021267a 19348 bool check_change_status = false;
52d52440
JS
19349
19350 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19351 if (!mbox)
19352 return -ENOMEM;
19353
19354 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19355 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19356 sizeof(struct lpfc_mbx_wr_object) -
19357 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19358
19359 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19360 wr_object->u.request.write_offset = *offset;
19361 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19362 wr_object->u.request.object_name[0] =
19363 cpu_to_le32(wr_object->u.request.object_name[0]);
19364 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19365 list_for_each_entry(dmabuf, dmabuf_list, list) {
19366 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19367 break;
19368 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19369 wr_object->u.request.bde[i].addrHigh =
19370 putPaddrHigh(dmabuf->phys);
19371 if (written + SLI4_PAGE_SIZE >= size) {
19372 wr_object->u.request.bde[i].tus.f.bdeSize =
19373 (size - written);
19374 written += (size - written);
19375 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
5021267a
JS
19376 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19377 check_change_status = true;
52d52440
JS
19378 } else {
19379 wr_object->u.request.bde[i].tus.f.bdeSize =
19380 SLI4_PAGE_SIZE;
19381 written += SLI4_PAGE_SIZE;
19382 }
19383 i++;
19384 }
19385 wr_object->u.request.bde_count = i;
19386 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19387 if (!phba->sli4_hba.intr_enable)
19388 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19389 else {
a183a15f 19390 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
19391 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19392 }
19393 /* The IOCTL status is embedded in the mailbox subheader. */
5021267a
JS
19394 shdr_status = bf_get(lpfc_mbox_hdr_status,
19395 &wr_object->header.cfg_shdr.response);
19396 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19397 &wr_object->header.cfg_shdr.response);
19398 if (check_change_status) {
19399 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19400 &wr_object->u.response);
19401 switch (shdr_change_status) {
19402 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19403 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19404 "3198 Firmware write complete: System "
19405 "reboot required to instantiate\n");
19406 break;
19407 case (LPFC_CHANGE_STATUS_FW_RESET):
19408 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19409 "3199 Firmware write complete: Firmware"
19410 " reset required to instantiate\n");
19411 break;
19412 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19413 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19414 "3200 Firmware write complete: Port "
19415 "Migration or PCI Reset required to "
19416 "instantiate\n");
19417 break;
19418 case (LPFC_CHANGE_STATUS_PCI_RESET):
19419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19420 "3201 Firmware write complete: PCI "
19421 "Reset required to instantiate\n");
19422 break;
19423 default:
19424 break;
19425 }
19426 }
52d52440
JS
19427 if (rc != MBX_TIMEOUT)
19428 mempool_free(mbox, phba->mbox_mem_pool);
19429 if (shdr_status || shdr_add_status || rc) {
19430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19431 "3025 Write Object mailbox failed with "
19432 "status x%x add_status x%x, mbx status x%x\n",
19433 shdr_status, shdr_add_status, rc);
19434 rc = -ENXIO;
1feb8204 19435 *offset = shdr_add_status;
52d52440
JS
19436 } else
19437 *offset += wr_object->u.response.actual_write_length;
19438 return rc;
19439}
19440
695a814e
JS
19441/**
19442 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19443 * @vport: pointer to vport data structure.
19444 *
19445 * This function iterate through the mailboxq and clean up all REG_LOGIN
19446 * and REG_VPI mailbox commands associated with the vport. This function
19447 * is called when driver want to restart discovery of the vport due to
19448 * a Clear Virtual Link event.
19449 **/
19450void
19451lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19452{
19453 struct lpfc_hba *phba = vport->phba;
19454 LPFC_MBOXQ_t *mb, *nextmb;
19455 struct lpfc_dmabuf *mp;
78730cfe 19456 struct lpfc_nodelist *ndlp;
d439d286 19457 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 19458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 19459 LIST_HEAD(mbox_cmd_list);
63e801ce 19460 uint8_t restart_loop;
695a814e 19461
d439d286 19462 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
19463 spin_lock_irq(&phba->hbalock);
19464 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19465 if (mb->vport != vport)
19466 continue;
19467
19468 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19469 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19470 continue;
19471
d439d286
JS
19472 list_del(&mb->list);
19473 list_add_tail(&mb->list, &mbox_cmd_list);
19474 }
19475 /* Clean up active mailbox command with the vport */
19476 mb = phba->sli.mbox_active;
19477 if (mb && (mb->vport == vport)) {
19478 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19479 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19480 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19481 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19482 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
d439d286
JS
19483 /* Put reference count for delayed processing */
19484 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19485 /* Unregister the RPI when mailbox complete */
19486 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19487 }
19488 }
63e801ce
JS
19489 /* Cleanup any mailbox completions which are not yet processed */
19490 do {
19491 restart_loop = 0;
19492 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19493 /*
19494 * If this mailox is already processed or it is
19495 * for another vport ignore it.
19496 */
19497 if ((mb->vport != vport) ||
19498 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19499 continue;
19500
19501 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19502 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19503 continue;
19504
19505 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19506 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19507 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
63e801ce
JS
19508 /* Unregister the RPI when mailbox complete */
19509 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19510 restart_loop = 1;
19511 spin_unlock_irq(&phba->hbalock);
19512 spin_lock(shost->host_lock);
19513 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19514 spin_unlock(shost->host_lock);
19515 spin_lock_irq(&phba->hbalock);
19516 break;
19517 }
19518 }
19519 } while (restart_loop);
19520
d439d286
JS
19521 spin_unlock_irq(&phba->hbalock);
19522
19523 /* Release the cleaned-up mailbox commands */
19524 while (!list_empty(&mbox_cmd_list)) {
19525 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e 19526 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19527 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
695a814e
JS
19528 if (mp) {
19529 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19530 kfree(mp);
19531 }
3e1f0718
JS
19532 mb->ctx_buf = NULL;
19533 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19534 mb->ctx_ndlp = NULL;
78730cfe 19535 if (ndlp) {
ec21b3b0 19536 spin_lock(shost->host_lock);
589a52d6 19537 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 19538 spin_unlock(shost->host_lock);
78730cfe 19539 lpfc_nlp_put(ndlp);
78730cfe 19540 }
695a814e 19541 }
695a814e
JS
19542 mempool_free(mb, phba->mbox_mem_pool);
19543 }
d439d286
JS
19544
19545 /* Release the ndlp with the cleaned-up active mailbox command */
19546 if (act_mbx_ndlp) {
19547 spin_lock(shost->host_lock);
19548 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19549 spin_unlock(shost->host_lock);
19550 lpfc_nlp_put(act_mbx_ndlp);
695a814e 19551 }
695a814e
JS
19552}
19553
2a9bf3d0
JS
19554/**
19555 * lpfc_drain_txq - Drain the txq
19556 * @phba: Pointer to HBA context object.
19557 *
19558 * This function attempt to submit IOCBs on the txq
19559 * to the adapter. For SLI4 adapters, the txq contains
19560 * ELS IOCBs that have been deferred because the there
19561 * are no SGLs. This congestion can occur with large
19562 * vport counts during node discovery.
19563 **/
19564
19565uint32_t
19566lpfc_drain_txq(struct lpfc_hba *phba)
19567{
19568 LIST_HEAD(completions);
895427bd 19569 struct lpfc_sli_ring *pring;
2e706377 19570 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
19571 unsigned long iflags = 0;
19572 char *fail_msg = NULL;
19573 struct lpfc_sglq *sglq;
205e8240 19574 union lpfc_wqe128 wqe;
a2fc4aef 19575 uint32_t txq_cnt = 0;
dc19e3b4 19576 struct lpfc_queue *wq;
2a9bf3d0 19577
dc19e3b4
JS
19578 if (phba->link_flag & LS_MDS_LOOPBACK) {
19579 /* MDS WQE are posted only to first WQ*/
cdb42bec 19580 wq = phba->sli4_hba.hdwq[0].fcp_wq;
dc19e3b4
JS
19581 if (unlikely(!wq))
19582 return 0;
19583 pring = wq->pring;
19584 } else {
19585 wq = phba->sli4_hba.els_wq;
19586 if (unlikely(!wq))
19587 return 0;
19588 pring = lpfc_phba_elsring(phba);
19589 }
19590
19591 if (unlikely(!pring) || list_empty(&pring->txq))
1234a6d5 19592 return 0;
895427bd 19593
398d81c9 19594 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
19595 list_for_each_entry(piocbq, &pring->txq, list) {
19596 txq_cnt++;
19597 }
19598
19599 if (txq_cnt > pring->txq_max)
19600 pring->txq_max = txq_cnt;
2a9bf3d0 19601
398d81c9 19602 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19603
0e9bb8d7 19604 while (!list_empty(&pring->txq)) {
398d81c9 19605 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 19606
19ca7609 19607 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 19608 if (!piocbq) {
398d81c9 19609 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
19610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19611 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 19612 txq_cnt);
a629852a
JS
19613 break;
19614 }
895427bd 19615 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 19616 if (!sglq) {
19ca7609 19617 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 19618 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19619 break;
2a9bf3d0 19620 }
0e9bb8d7 19621 txq_cnt--;
2a9bf3d0
JS
19622
19623 /* The xri and iocb resources secured,
19624 * attempt to issue request
19625 */
6d368e53 19626 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
19627 piocbq->sli4_xritag = sglq->sli4_xritag;
19628 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19629 fail_msg = "to convert bpl to sgl";
205e8240 19630 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
2a9bf3d0 19631 fail_msg = "to convert iocb to wqe";
dc19e3b4 19632 else if (lpfc_sli4_wq_put(wq, &wqe))
2a9bf3d0
JS
19633 fail_msg = " - Wq is full";
19634 else
19635 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19636
19637 if (fail_msg) {
19638 /* Failed means we can't issue and need to cancel */
19639 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19640 "2822 IOCB failed %s iotag 0x%x "
19641 "xri 0x%x\n",
19642 fail_msg,
19643 piocbq->iotag, piocbq->sli4_xritag);
19644 list_add_tail(&piocbq->list, &completions);
19645 }
398d81c9 19646 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
19647 }
19648
2a9bf3d0
JS
19649 /* Cancel all the IOCBs that cannot be issued */
19650 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19651 IOERR_SLI_ABORTED);
19652
0e9bb8d7 19653 return txq_cnt;
2a9bf3d0 19654}
895427bd
JS
19655
19656/**
19657 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19658 * @phba: Pointer to HBA context object.
19659 * @pwqe: Pointer to command WQE.
19660 * @sglq: Pointer to the scatter gather queue object.
19661 *
19662 * This routine converts the bpl or bde that is in the WQE
19663 * to a sgl list for the sli4 hardware. The physical address
19664 * of the bpl/bde is converted back to a virtual address.
19665 * If the WQE contains a BPL then the list of BDE's is
19666 * converted to sli4_sge's. If the WQE contains a single
19667 * BDE then it is converted to a single sli_sge.
19668 * The WQE is still in cpu endianness so the contents of
19669 * the bpl can be used without byte swapping.
19670 *
19671 * Returns valid XRI = Success, NO_XRI = Failure.
19672 */
19673static uint16_t
19674lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19675 struct lpfc_sglq *sglq)
19676{
19677 uint16_t xritag = NO_XRI;
19678 struct ulp_bde64 *bpl = NULL;
19679 struct ulp_bde64 bde;
19680 struct sli4_sge *sgl = NULL;
19681 struct lpfc_dmabuf *dmabuf;
205e8240 19682 union lpfc_wqe128 *wqe;
895427bd
JS
19683 int numBdes = 0;
19684 int i = 0;
19685 uint32_t offset = 0; /* accumulated offset in the sg request list */
19686 int inbound = 0; /* number of sg reply entries inbound from firmware */
19687 uint32_t cmd;
19688
19689 if (!pwqeq || !sglq)
19690 return xritag;
19691
19692 sgl = (struct sli4_sge *)sglq->sgl;
19693 wqe = &pwqeq->wqe;
19694 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19695
19696 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19697 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19698 return sglq->sli4_xritag;
19699 numBdes = pwqeq->rsvd2;
19700 if (numBdes) {
19701 /* The addrHigh and addrLow fields within the WQE
19702 * have not been byteswapped yet so there is no
19703 * need to swap them back.
19704 */
19705 if (pwqeq->context3)
19706 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19707 else
19708 return xritag;
19709
19710 bpl = (struct ulp_bde64 *)dmabuf->virt;
19711 if (!bpl)
19712 return xritag;
19713
19714 for (i = 0; i < numBdes; i++) {
19715 /* Should already be byte swapped. */
19716 sgl->addr_hi = bpl->addrHigh;
19717 sgl->addr_lo = bpl->addrLow;
19718
19719 sgl->word2 = le32_to_cpu(sgl->word2);
19720 if ((i+1) == numBdes)
19721 bf_set(lpfc_sli4_sge_last, sgl, 1);
19722 else
19723 bf_set(lpfc_sli4_sge_last, sgl, 0);
19724 /* swap the size field back to the cpu so we
19725 * can assign it to the sgl.
19726 */
19727 bde.tus.w = le32_to_cpu(bpl->tus.w);
19728 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19729 /* The offsets in the sgl need to be accumulated
19730 * separately for the request and reply lists.
19731 * The request is always first, the reply follows.
19732 */
19733 switch (cmd) {
19734 case CMD_GEN_REQUEST64_WQE:
19735 /* add up the reply sg entries */
19736 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19737 inbound++;
19738 /* first inbound? reset the offset */
19739 if (inbound == 1)
19740 offset = 0;
19741 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19742 bf_set(lpfc_sli4_sge_type, sgl,
19743 LPFC_SGE_TYPE_DATA);
19744 offset += bde.tus.f.bdeSize;
19745 break;
19746 case CMD_FCP_TRSP64_WQE:
19747 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19748 bf_set(lpfc_sli4_sge_type, sgl,
19749 LPFC_SGE_TYPE_DATA);
19750 break;
19751 case CMD_FCP_TSEND64_WQE:
19752 case CMD_FCP_TRECEIVE64_WQE:
19753 bf_set(lpfc_sli4_sge_type, sgl,
19754 bpl->tus.f.bdeFlags);
19755 if (i < 3)
19756 offset = 0;
19757 else
19758 offset += bde.tus.f.bdeSize;
19759 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19760 break;
19761 }
19762 sgl->word2 = cpu_to_le32(sgl->word2);
19763 bpl++;
19764 sgl++;
19765 }
19766 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19767 /* The addrHigh and addrLow fields of the BDE have not
19768 * been byteswapped yet so they need to be swapped
19769 * before putting them in the sgl.
19770 */
19771 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19772 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19773 sgl->word2 = le32_to_cpu(sgl->word2);
19774 bf_set(lpfc_sli4_sge_last, sgl, 1);
19775 sgl->word2 = cpu_to_le32(sgl->word2);
19776 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19777 }
19778 return sglq->sli4_xritag;
19779}
19780
19781/**
19782 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19783 * @phba: Pointer to HBA context object.
19784 * @ring_number: Base sli ring number
19785 * @pwqe: Pointer to command WQE.
19786 **/
19787int
1fbf9742 19788lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
895427bd
JS
19789 struct lpfc_iocbq *pwqe)
19790{
205e8240 19791 union lpfc_wqe128 *wqe = &pwqe->wqe;
f358dd0c 19792 struct lpfc_nvmet_rcv_ctx *ctxp;
895427bd
JS
19793 struct lpfc_queue *wq;
19794 struct lpfc_sglq *sglq;
19795 struct lpfc_sli_ring *pring;
19796 unsigned long iflags;
cd22d605 19797 uint32_t ret = 0;
895427bd
JS
19798
19799 /* NVME_LS and NVME_LS ABTS requests. */
19800 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19801 pring = phba->sli4_hba.nvmels_wq->pring;
6a828b0f
JS
19802 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19803 qp, wq_access);
895427bd
JS
19804 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19805 if (!sglq) {
19806 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19807 return WQE_BUSY;
19808 }
19809 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19810 pwqe->sli4_xritag = sglq->sli4_xritag;
19811 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19812 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19813 return WQE_ERROR;
19814 }
19815 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19816 pwqe->sli4_xritag);
cd22d605
DK
19817 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19818 if (ret) {
895427bd 19819 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19820 return ret;
895427bd 19821 }
cd22d605 19822
895427bd
JS
19823 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19824 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19825 return 0;
19826 }
19827
19828 /* NVME_FCREQ and NVME_ABTS requests */
19829 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19830 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
1fbf9742
JS
19831 wq = qp->nvme_wq;
19832 pring = wq->pring;
895427bd 19833
1fbf9742 19834 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
895427bd 19835
6a828b0f
JS
19836 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19837 qp, wq_access);
cd22d605
DK
19838 ret = lpfc_sli4_wq_put(wq, wqe);
19839 if (ret) {
895427bd 19840 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19841 return ret;
895427bd
JS
19842 }
19843 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19844 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19845 return 0;
19846 }
19847
f358dd0c
JS
19848 /* NVMET requests */
19849 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19850 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
1fbf9742
JS
19851 wq = qp->nvme_wq;
19852 pring = wq->pring;
f358dd0c 19853
f358dd0c 19854 ctxp = pwqe->context2;
6c621a22 19855 sglq = ctxp->ctxbuf->sglq;
f358dd0c
JS
19856 if (pwqe->sli4_xritag == NO_XRI) {
19857 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19858 pwqe->sli4_xritag = sglq->sli4_xritag;
19859 }
19860 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19861 pwqe->sli4_xritag);
1fbf9742
JS
19862 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19863
6a828b0f
JS
19864 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19865 qp, wq_access);
cd22d605
DK
19866 ret = lpfc_sli4_wq_put(wq, wqe);
19867 if (ret) {
f358dd0c 19868 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19869 return ret;
f358dd0c
JS
19870 }
19871 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19872 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19873 return 0;
19874 }
895427bd
JS
19875 return WQE_ERROR;
19876}
c490850a
JS
19877
19878#ifdef LPFC_MXP_STAT
19879/**
19880 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19881 * @phba: pointer to lpfc hba data structure.
19882 * @hwqid: belong to which HWQ.
19883 *
19884 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19885 * 15 seconds after a test case is running.
19886 *
19887 * The user should call lpfc_debugfs_multixripools_write before running a test
19888 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19889 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19890 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19891 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19892 **/
19893void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19894{
19895 struct lpfc_sli4_hdw_queue *qp;
19896 struct lpfc_multixri_pool *multixri_pool;
19897 struct lpfc_pvt_pool *pvt_pool;
19898 struct lpfc_pbl_pool *pbl_pool;
19899 u32 txcmplq_cnt;
19900
19901 qp = &phba->sli4_hba.hdwq[hwqid];
19902 multixri_pool = qp->p_multixri_pool;
19903 if (!multixri_pool)
19904 return;
19905
19906 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19907 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19908 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19909 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19910 if (qp->nvme_wq)
19911 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19912
19913 multixri_pool->stat_pbl_count = pbl_pool->count;
19914 multixri_pool->stat_pvt_count = pvt_pool->count;
19915 multixri_pool->stat_busy_count = txcmplq_cnt;
19916 }
19917
19918 multixri_pool->stat_snapshot_taken++;
19919}
19920#endif
19921
19922/**
19923 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19924 * @phba: pointer to lpfc hba data structure.
19925 * @hwqid: belong to which HWQ.
19926 *
19927 * This routine moves some XRIs from private to public pool when private pool
19928 * is not busy.
19929 **/
19930void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19931{
19932 struct lpfc_multixri_pool *multixri_pool;
19933 u32 io_req_count;
19934 u32 prev_io_req_count;
19935
19936 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19937 if (!multixri_pool)
19938 return;
19939 io_req_count = multixri_pool->io_req_count;
19940 prev_io_req_count = multixri_pool->prev_io_req_count;
19941
19942 if (prev_io_req_count != io_req_count) {
19943 /* Private pool is busy */
19944 multixri_pool->prev_io_req_count = io_req_count;
19945 } else {
19946 /* Private pool is not busy.
19947 * Move XRIs from private to public pool.
19948 */
19949 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19950 }
19951}
19952
19953/**
19954 * lpfc_adjust_high_watermark - Adjust high watermark
19955 * @phba: pointer to lpfc hba data structure.
19956 * @hwqid: belong to which HWQ.
19957 *
19958 * This routine sets high watermark as number of outstanding XRIs,
19959 * but make sure the new value is between xri_limit/2 and xri_limit.
19960 **/
19961void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19962{
19963 u32 new_watermark;
19964 u32 watermark_max;
19965 u32 watermark_min;
19966 u32 xri_limit;
19967 u32 txcmplq_cnt;
19968 u32 abts_io_bufs;
19969 struct lpfc_multixri_pool *multixri_pool;
19970 struct lpfc_sli4_hdw_queue *qp;
19971
19972 qp = &phba->sli4_hba.hdwq[hwqid];
19973 multixri_pool = qp->p_multixri_pool;
19974 if (!multixri_pool)
19975 return;
19976 xri_limit = multixri_pool->xri_limit;
19977
19978 watermark_max = xri_limit;
19979 watermark_min = xri_limit / 2;
19980
19981 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19982 abts_io_bufs = qp->abts_scsi_io_bufs;
19983 if (qp->nvme_wq) {
19984 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19985 abts_io_bufs += qp->abts_nvme_io_bufs;
19986 }
19987
19988 new_watermark = txcmplq_cnt + abts_io_bufs;
19989 new_watermark = min(watermark_max, new_watermark);
19990 new_watermark = max(watermark_min, new_watermark);
19991 multixri_pool->pvt_pool.high_watermark = new_watermark;
19992
19993#ifdef LPFC_MXP_STAT
19994 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19995 new_watermark);
19996#endif
19997}
19998
19999/**
20000 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20001 * @phba: pointer to lpfc hba data structure.
20002 * @hwqid: belong to which HWQ.
20003 *
20004 * This routine is called from hearbeat timer when pvt_pool is idle.
20005 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20006 * The first step moves (all - low_watermark) amount of XRIs.
20007 * The second step moves the rest of XRIs.
20008 **/
20009void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20010{
20011 struct lpfc_pbl_pool *pbl_pool;
20012 struct lpfc_pvt_pool *pvt_pool;
6a828b0f 20013 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
20014 struct lpfc_io_buf *lpfc_ncmd;
20015 struct lpfc_io_buf *lpfc_ncmd_next;
20016 unsigned long iflag;
20017 struct list_head tmp_list;
20018 u32 tmp_count;
20019
6a828b0f
JS
20020 qp = &phba->sli4_hba.hdwq[hwqid];
20021 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20022 pvt_pool = &qp->p_multixri_pool->pvt_pool;
c490850a
JS
20023 tmp_count = 0;
20024
6a828b0f
JS
20025 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20026 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
c490850a
JS
20027
20028 if (pvt_pool->count > pvt_pool->low_watermark) {
20029 /* Step 1: move (all - low_watermark) from pvt_pool
20030 * to pbl_pool
20031 */
20032
20033 /* Move low watermark of bufs from pvt_pool to tmp_list */
20034 INIT_LIST_HEAD(&tmp_list);
20035 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20036 &pvt_pool->list, list) {
20037 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20038 tmp_count++;
20039 if (tmp_count >= pvt_pool->low_watermark)
20040 break;
20041 }
20042
20043 /* Move all bufs from pvt_pool to pbl_pool */
20044 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20045
20046 /* Move all bufs from tmp_list to pvt_pool */
20047 list_splice(&tmp_list, &pvt_pool->list);
20048
20049 pbl_pool->count += (pvt_pool->count - tmp_count);
20050 pvt_pool->count = tmp_count;
20051 } else {
20052 /* Step 2: move the rest from pvt_pool to pbl_pool */
20053 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20054 pbl_pool->count += pvt_pool->count;
20055 pvt_pool->count = 0;
20056 }
20057
20058 spin_unlock(&pvt_pool->lock);
20059 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20060}
20061
20062/**
20063 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20064 * @phba: pointer to lpfc hba data structure
20065 * @pbl_pool: specified public free XRI pool
20066 * @pvt_pool: specified private free XRI pool
20067 * @count: number of XRIs to move
20068 *
20069 * This routine tries to move some free common bufs from the specified pbl_pool
20070 * to the specified pvt_pool. It might move less than count XRIs if there's not
20071 * enough in public pool.
20072 *
20073 * Return:
20074 * true - if XRIs are successfully moved from the specified pbl_pool to the
20075 * specified pvt_pool
20076 * false - if the specified pbl_pool is empty or locked by someone else
20077 **/
20078static bool
6a828b0f
JS
20079_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20080 struct lpfc_pbl_pool *pbl_pool,
c490850a
JS
20081 struct lpfc_pvt_pool *pvt_pool, u32 count)
20082{
20083 struct lpfc_io_buf *lpfc_ncmd;
20084 struct lpfc_io_buf *lpfc_ncmd_next;
20085 unsigned long iflag;
20086 int ret;
20087
20088 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20089 if (ret) {
20090 if (pbl_pool->count) {
20091 /* Move a batch of XRIs from public to private pool */
6a828b0f 20092 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
c490850a
JS
20093 list_for_each_entry_safe(lpfc_ncmd,
20094 lpfc_ncmd_next,
20095 &pbl_pool->list,
20096 list) {
20097 list_move_tail(&lpfc_ncmd->list,
20098 &pvt_pool->list);
20099 pvt_pool->count++;
20100 pbl_pool->count--;
20101 count--;
20102 if (count == 0)
20103 break;
20104 }
20105
20106 spin_unlock(&pvt_pool->lock);
20107 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20108 return true;
20109 }
20110 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20111 }
20112
20113 return false;
20114}
20115
20116/**
20117 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20118 * @phba: pointer to lpfc hba data structure.
20119 * @hwqid: belong to which HWQ.
20120 * @count: number of XRIs to move
20121 *
20122 * This routine tries to find some free common bufs in one of public pools with
20123 * Round Robin method. The search always starts from local hwqid, then the next
20124 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20125 * a batch of free common bufs are moved to private pool on hwqid.
20126 * It might move less than count XRIs if there's not enough in public pool.
20127 **/
20128void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20129{
20130 struct lpfc_multixri_pool *multixri_pool;
20131 struct lpfc_multixri_pool *next_multixri_pool;
20132 struct lpfc_pvt_pool *pvt_pool;
20133 struct lpfc_pbl_pool *pbl_pool;
6a828b0f 20134 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
20135 u32 next_hwqid;
20136 u32 hwq_count;
20137 int ret;
20138
6a828b0f
JS
20139 qp = &phba->sli4_hba.hdwq[hwqid];
20140 multixri_pool = qp->p_multixri_pool;
c490850a
JS
20141 pvt_pool = &multixri_pool->pvt_pool;
20142 pbl_pool = &multixri_pool->pbl_pool;
20143
20144 /* Check if local pbl_pool is available */
6a828b0f 20145 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20146 if (ret) {
20147#ifdef LPFC_MXP_STAT
20148 multixri_pool->local_pbl_hit_count++;
20149#endif
20150 return;
20151 }
20152
20153 hwq_count = phba->cfg_hdw_queue;
20154
20155 /* Get the next hwqid which was found last time */
20156 next_hwqid = multixri_pool->rrb_next_hwqid;
20157
20158 do {
20159 /* Go to next hwq */
20160 next_hwqid = (next_hwqid + 1) % hwq_count;
20161
20162 next_multixri_pool =
20163 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20164 pbl_pool = &next_multixri_pool->pbl_pool;
20165
20166 /* Check if the public free xri pool is available */
20167 ret = _lpfc_move_xri_pbl_to_pvt(
6a828b0f 20168 phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20169
20170 /* Exit while-loop if success or all hwqid are checked */
20171 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20172
20173 /* Starting point for the next time */
20174 multixri_pool->rrb_next_hwqid = next_hwqid;
20175
20176 if (!ret) {
20177 /* stats: all public pools are empty*/
20178 multixri_pool->pbl_empty_count++;
20179 }
20180
20181#ifdef LPFC_MXP_STAT
20182 if (ret) {
20183 if (next_hwqid == hwqid)
20184 multixri_pool->local_pbl_hit_count++;
20185 else
20186 multixri_pool->other_pbl_hit_count++;
20187 }
20188#endif
20189}
20190
20191/**
20192 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20193 * @phba: pointer to lpfc hba data structure.
20194 * @qp: belong to which HWQ.
20195 *
20196 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20197 * low watermark.
20198 **/
20199void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20200{
20201 struct lpfc_multixri_pool *multixri_pool;
20202 struct lpfc_pvt_pool *pvt_pool;
20203
20204 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20205 pvt_pool = &multixri_pool->pvt_pool;
20206
20207 if (pvt_pool->count < pvt_pool->low_watermark)
20208 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20209}
20210
20211/**
20212 * lpfc_release_io_buf - Return one IO buf back to free pool
20213 * @phba: pointer to lpfc hba data structure.
20214 * @lpfc_ncmd: IO buf to be returned.
20215 * @qp: belong to which HWQ.
20216 *
20217 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20218 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20219 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20220 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20221 * lpfc_io_buf_list_put.
20222 **/
20223void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20224 struct lpfc_sli4_hdw_queue *qp)
20225{
20226 unsigned long iflag;
20227 struct lpfc_pbl_pool *pbl_pool;
20228 struct lpfc_pvt_pool *pvt_pool;
20229 struct lpfc_epd_pool *epd_pool;
20230 u32 txcmplq_cnt;
20231 u32 xri_owned;
20232 u32 xri_limit;
20233 u32 abts_io_bufs;
20234
20235 /* MUST zero fields if buffer is reused by another protocol */
20236 lpfc_ncmd->nvmeCmd = NULL;
20237 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20238 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20239
20240 if (phba->cfg_xri_rebalancing) {
20241 if (lpfc_ncmd->expedite) {
20242 /* Return to expedite pool */
20243 epd_pool = &phba->epd_pool;
20244 spin_lock_irqsave(&epd_pool->lock, iflag);
20245 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20246 epd_pool->count++;
20247 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20248 return;
20249 }
20250
20251 /* Avoid invalid access if an IO sneaks in and is being rejected
20252 * just _after_ xri pools are destroyed in lpfc_offline.
20253 * Nothing much can be done at this point.
20254 */
20255 if (!qp->p_multixri_pool)
20256 return;
20257
20258 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20259 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20260
20261 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20262 abts_io_bufs = qp->abts_scsi_io_bufs;
20263 if (qp->nvme_wq) {
20264 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20265 abts_io_bufs += qp->abts_nvme_io_bufs;
20266 }
20267
20268 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20269 xri_limit = qp->p_multixri_pool->xri_limit;
20270
20271#ifdef LPFC_MXP_STAT
20272 if (xri_owned <= xri_limit)
20273 qp->p_multixri_pool->below_limit_count++;
20274 else
20275 qp->p_multixri_pool->above_limit_count++;
20276#endif
20277
20278 /* XRI goes to either public or private free xri pool
20279 * based on watermark and xri_limit
20280 */
20281 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20282 (xri_owned < xri_limit &&
20283 pvt_pool->count < pvt_pool->high_watermark)) {
6a828b0f
JS
20284 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20285 qp, free_pvt_pool);
c490850a
JS
20286 list_add_tail(&lpfc_ncmd->list,
20287 &pvt_pool->list);
20288 pvt_pool->count++;
20289 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20290 } else {
6a828b0f
JS
20291 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20292 qp, free_pub_pool);
c490850a
JS
20293 list_add_tail(&lpfc_ncmd->list,
20294 &pbl_pool->list);
20295 pbl_pool->count++;
20296 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20297 }
20298 } else {
6a828b0f
JS
20299 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20300 qp, free_xri);
c490850a
JS
20301 list_add_tail(&lpfc_ncmd->list,
20302 &qp->lpfc_io_buf_list_put);
20303 qp->put_io_bufs++;
20304 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20305 iflag);
20306 }
20307}
20308
20309/**
20310 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20311 * @phba: pointer to lpfc hba data structure.
20312 * @pvt_pool: pointer to private pool data structure.
20313 * @ndlp: pointer to lpfc nodelist data structure.
20314 *
20315 * This routine tries to get one free IO buf from private pool.
20316 *
20317 * Return:
20318 * pointer to one free IO buf - if private pool is not empty
20319 * NULL - if private pool is empty
20320 **/
20321static struct lpfc_io_buf *
20322lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
6a828b0f 20323 struct lpfc_sli4_hdw_queue *qp,
c490850a
JS
20324 struct lpfc_pvt_pool *pvt_pool,
20325 struct lpfc_nodelist *ndlp)
20326{
20327 struct lpfc_io_buf *lpfc_ncmd;
20328 struct lpfc_io_buf *lpfc_ncmd_next;
20329 unsigned long iflag;
20330
6a828b0f 20331 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
c490850a
JS
20332 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20333 &pvt_pool->list, list) {
20334 if (lpfc_test_rrq_active(
20335 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20336 continue;
20337 list_del(&lpfc_ncmd->list);
20338 pvt_pool->count--;
20339 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20340 return lpfc_ncmd;
20341 }
20342 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20343
20344 return NULL;
20345}
20346
20347/**
20348 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20349 * @phba: pointer to lpfc hba data structure.
20350 *
20351 * This routine tries to get one free IO buf from expedite pool.
20352 *
20353 * Return:
20354 * pointer to one free IO buf - if expedite pool is not empty
20355 * NULL - if expedite pool is empty
20356 **/
20357static struct lpfc_io_buf *
20358lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20359{
20360 struct lpfc_io_buf *lpfc_ncmd;
20361 struct lpfc_io_buf *lpfc_ncmd_next;
20362 unsigned long iflag;
20363 struct lpfc_epd_pool *epd_pool;
20364
20365 epd_pool = &phba->epd_pool;
20366 lpfc_ncmd = NULL;
20367
20368 spin_lock_irqsave(&epd_pool->lock, iflag);
20369 if (epd_pool->count > 0) {
20370 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20371 &epd_pool->list, list) {
20372 list_del(&lpfc_ncmd->list);
20373 epd_pool->count--;
20374 break;
20375 }
20376 }
20377 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20378
20379 return lpfc_ncmd;
20380}
20381
20382/**
20383 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20384 * @phba: pointer to lpfc hba data structure.
20385 * @ndlp: pointer to lpfc nodelist data structure.
20386 * @hwqid: belong to which HWQ
20387 * @expedite: 1 means this request is urgent.
20388 *
20389 * This routine will do the following actions and then return a pointer to
20390 * one free IO buf.
20391 *
20392 * 1. If private free xri count is empty, move some XRIs from public to
20393 * private pool.
20394 * 2. Get one XRI from private free xri pool.
20395 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20396 * get one free xri from expedite pool.
20397 *
20398 * Note: ndlp is only used on SCSI side for RRQ testing.
20399 * The caller should pass NULL for ndlp on NVME side.
20400 *
20401 * Return:
20402 * pointer to one free IO buf - if private pool is not empty
20403 * NULL - if private pool is empty
20404 **/
20405static struct lpfc_io_buf *
20406lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20407 struct lpfc_nodelist *ndlp,
20408 int hwqid, int expedite)
20409{
20410 struct lpfc_sli4_hdw_queue *qp;
20411 struct lpfc_multixri_pool *multixri_pool;
20412 struct lpfc_pvt_pool *pvt_pool;
20413 struct lpfc_io_buf *lpfc_ncmd;
20414
20415 qp = &phba->sli4_hba.hdwq[hwqid];
20416 lpfc_ncmd = NULL;
20417 multixri_pool = qp->p_multixri_pool;
20418 pvt_pool = &multixri_pool->pvt_pool;
20419 multixri_pool->io_req_count++;
20420
20421 /* If pvt_pool is empty, move some XRIs from public to private pool */
20422 if (pvt_pool->count == 0)
20423 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20424
20425 /* Get one XRI from private free xri pool */
6a828b0f 20426 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
c490850a
JS
20427
20428 if (lpfc_ncmd) {
20429 lpfc_ncmd->hdwq = qp;
20430 lpfc_ncmd->hdwq_no = hwqid;
20431 } else if (expedite) {
20432 /* If we fail to get one from pvt_pool and this is an expedite
20433 * request, get one free xri from expedite pool.
20434 */
20435 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20436 }
20437
20438 return lpfc_ncmd;
20439}
20440
20441static inline struct lpfc_io_buf *
20442lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20443{
20444 struct lpfc_sli4_hdw_queue *qp;
20445 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20446
20447 qp = &phba->sli4_hba.hdwq[idx];
20448 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20449 &qp->lpfc_io_buf_list_get, list) {
20450 if (lpfc_test_rrq_active(phba, ndlp,
20451 lpfc_cmd->cur_iocbq.sli4_lxritag))
20452 continue;
20453
20454 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20455 continue;
20456
20457 list_del_init(&lpfc_cmd->list);
20458 qp->get_io_bufs--;
20459 lpfc_cmd->hdwq = qp;
20460 lpfc_cmd->hdwq_no = idx;
20461 return lpfc_cmd;
20462 }
20463 return NULL;
20464}
20465
20466/**
20467 * lpfc_get_io_buf - Get one IO buffer from free pool
20468 * @phba: The HBA for which this call is being executed.
20469 * @ndlp: pointer to lpfc nodelist data structure.
20470 * @hwqid: belong to which HWQ
20471 * @expedite: 1 means this request is urgent.
20472 *
20473 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20474 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20475 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20476 *
20477 * Note: ndlp is only used on SCSI side for RRQ testing.
20478 * The caller should pass NULL for ndlp on NVME side.
20479 *
20480 * Return codes:
20481 * NULL - Error
20482 * Pointer to lpfc_io_buf - Success
20483 **/
20484struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20485 struct lpfc_nodelist *ndlp,
20486 u32 hwqid, int expedite)
20487{
20488 struct lpfc_sli4_hdw_queue *qp;
20489 unsigned long iflag;
20490 struct lpfc_io_buf *lpfc_cmd;
20491
20492 qp = &phba->sli4_hba.hdwq[hwqid];
20493 lpfc_cmd = NULL;
20494
20495 if (phba->cfg_xri_rebalancing)
20496 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20497 phba, ndlp, hwqid, expedite);
20498 else {
6a828b0f
JS
20499 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20500 qp, alloc_xri_get);
c490850a
JS
20501 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20502 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20503 if (!lpfc_cmd) {
6a828b0f
JS
20504 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20505 qp, alloc_xri_put);
c490850a
JS
20506 list_splice(&qp->lpfc_io_buf_list_put,
20507 &qp->lpfc_io_buf_list_get);
20508 qp->get_io_bufs += qp->put_io_bufs;
20509 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20510 qp->put_io_bufs = 0;
20511 spin_unlock(&qp->io_buf_list_put_lock);
20512 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20513 expedite)
20514 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20515 }
20516 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20517 }
20518
20519 return lpfc_cmd;
20520}