scsi: lpfc: update fault value on successful trunk events.
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
128bddac 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
5a0e3ad6 28#include <linux/slab.h>
1c2ba475 29#include <linux/lockdep.h>
dea3101e 30
91886523 31#include <scsi/scsi.h>
dea3101e 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
f888ba3c 35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include <scsi/fc/fc_fs.h>
0d878419 37#include <linux/aer.h>
1351e69f
JS
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea3101e 41
895427bd
JS
42#include <linux/nvme-fc-driver.h>
43
da0436e9 44#include "lpfc_hw4.h"
dea3101e 45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
da0436e9 47#include "lpfc_sli4.h"
ea2151b4 48#include "lpfc_nl.h"
dea3101e 49#include "lpfc_disc.h"
dea3101e 50#include "lpfc.h"
895427bd
JS
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
f358dd0c 53#include "lpfc_nvmet.h"
dea3101e 54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
858c9f6c 57#include "lpfc_debugfs.h"
04c68496 58#include "lpfc_vport.h"
61bda8f7 59#include "lpfc_version.h"
dea3101e 60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
4f774513
JS
69
70/* Provide function prototypes local to this module. */
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
6669f9bb
JS
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
ae9e28f3
JS
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
895427bd 81static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
0558056c 82 struct lpfc_cqe *);
895427bd 83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 84 int);
f485c18d
DK
85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
e8d3c3b1
JS
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
895427bd
JS
89static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
0558056c 92
4f774513
JS
93static IOCB_t *
94lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95{
96 return &iocbq->iocb;
97}
98
48f8fdb4
JS
99#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
100/**
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
106 *
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
111 * lock.
112 **/
113void
114lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115{
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
118 int i;
119
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
121 *dest++ = *src++;
122}
123#else
124#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
125#endif
126
4f774513
JS
127/**
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
131 *
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
136 * -ENOMEM.
137 * The caller is expected to hold the hbalock when calling this routine.
138 **/
cd22d605 139static int
205e8240 140lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
4f774513 141{
2e90f4b5 142 union lpfc_wqe *temp_wqe;
4f774513
JS
143 struct lpfc_register doorbell;
144 uint32_t host_index;
027140ea 145 uint32_t idx;
1351e69f
JS
146 uint32_t i = 0;
147 uint8_t *tmp;
5cc167dd 148 u32 if_type;
4f774513 149
2e90f4b5
JS
150 /* sanity check on queue memory */
151 if (unlikely(!q))
152 return -ENOMEM;
153 temp_wqe = q->qe[q->host_index].wqe;
154
4f774513 155 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
b84daac9 158 q->WQ_overflow++;
cd22d605 159 return -EBUSY;
b84daac9
JS
160 }
161 q->WQ_posted++;
4f774513 162 /* set consumption flag every once in a while */
ff78d8f9 163 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
04673e38
JS
165 else
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
fedd3b7b
JS
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
48f8fdb4 169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
1351e69f
JS
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
4c06619f
JS
172 tmp = (uint8_t *)temp_wqe;
173#ifdef __raw_writeq
1351e69f 174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
4c06619f
JS
175 __raw_writeq(*((uint64_t *)(tmp + i)),
176 q->dpp_regaddr + i);
177#else
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
180 q->dpp_regaddr + i);
181#endif
1351e69f
JS
182 }
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
6b3b3bdb 184 wmb();
4f774513
JS
185
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
027140ea
JS
188
189 q->host_index = idx;
4f774513
JS
190
191 /* Ring Doorbell */
192 doorbell.word0 = 0;
962bc51b 193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
1351e69f
JS
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
198 q->dpp_id);
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 q->queue_id);
201 } else {
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
1351e69f 203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
5cc167dd
JS
204
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 host_index);
1351e69f 211 }
962bc51b
JS
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
215 } else {
216 return -EINVAL;
217 }
218 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
219
220 return 0;
221}
222
223/**
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
227 *
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers. This routine returns the number of entries that were consumed by
232 * the HBA.
233 **/
234static uint32_t
235lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
236{
237 uint32_t released = 0;
238
2e90f4b5
JS
239 /* sanity check on queue memory */
240 if (unlikely(!q))
241 return 0;
242
4f774513
JS
243 if (q->hba_index == index)
244 return 0;
245 do {
246 q->hba_index = ((q->hba_index + 1) % q->entry_count);
247 released++;
248 } while (q->hba_index != index);
249 return released;
250}
251
252/**
253 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
254 * @q: The Mailbox Queue to operate on.
255 * @wqe: The Mailbox Queue Entry to put on the Work queue.
256 *
257 * This routine will copy the contents of @mqe to the next available entry on
258 * the @q. This function will then ring the Work Queue Doorbell to signal the
259 * HBA to start processing the Work Queue Entry. This function returns 0 if
260 * successful. If no entries are available on @q then this function will return
261 * -ENOMEM.
262 * The caller is expected to hold the hbalock when calling this routine.
263 **/
264static uint32_t
265lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
266{
2e90f4b5 267 struct lpfc_mqe *temp_mqe;
4f774513 268 struct lpfc_register doorbell;
4f774513 269
2e90f4b5
JS
270 /* sanity check on queue memory */
271 if (unlikely(!q))
272 return -ENOMEM;
273 temp_mqe = q->qe[q->host_index].mqe;
274
4f774513
JS
275 /* If the host has not yet processed the next entry then we are done */
276 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
277 return -ENOMEM;
48f8fdb4 278 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
4f774513
JS
279 /* Save off the mailbox pointer for completion */
280 q->phba->mbox = (MAILBOX_t *)temp_mqe;
281
282 /* Update the host index before invoking device */
4f774513
JS
283 q->host_index = ((q->host_index + 1) % q->entry_count);
284
285 /* Ring Doorbell */
286 doorbell.word0 = 0;
287 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
288 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
289 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
290 return 0;
291}
292
293/**
294 * lpfc_sli4_mq_release - Updates internal hba index for MQ
295 * @q: The Mailbox Queue to operate on.
296 *
297 * This routine will update the HBA index of a queue to reflect consumption of
298 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
299 * an entry the host calls this function to update the queue's internal
300 * pointers. This routine returns the number of entries that were consumed by
301 * the HBA.
302 **/
303static uint32_t
304lpfc_sli4_mq_release(struct lpfc_queue *q)
305{
2e90f4b5
JS
306 /* sanity check on queue memory */
307 if (unlikely(!q))
308 return 0;
309
4f774513
JS
310 /* Clear the mailbox pointer for completion */
311 q->phba->mbox = NULL;
312 q->hba_index = ((q->hba_index + 1) % q->entry_count);
313 return 1;
314}
315
316/**
317 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
318 * @q: The Event Queue to get the first valid EQE from
319 *
320 * This routine will get the first valid Event Queue Entry from @q, update
321 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
322 * the Queue (no more work to do), or the Queue is full of EQEs that have been
323 * processed, but not popped back to the HBA then this routine will return NULL.
324 **/
325static struct lpfc_eqe *
326lpfc_sli4_eq_get(struct lpfc_queue *q)
327{
7365f6fd 328 struct lpfc_hba *phba;
2e90f4b5 329 struct lpfc_eqe *eqe;
027140ea 330 uint32_t idx;
2e90f4b5
JS
331
332 /* sanity check on queue memory */
333 if (unlikely(!q))
334 return NULL;
7365f6fd 335 phba = q->phba;
2e90f4b5 336 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
337
338 /* If the next EQE is not valid then we are done */
7365f6fd 339 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
4f774513
JS
340 return NULL;
341 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
342 idx = ((q->hba_index + 1) % q->entry_count);
343 if (idx == q->host_index)
4f774513
JS
344 return NULL;
345
027140ea 346 q->hba_index = idx;
7365f6fd
JS
347 /* if the index wrapped around, toggle the valid bit */
348 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
349 q->qe_valid = (q->qe_valid) ? 0 : 1;
350
27f344eb
JS
351
352 /*
353 * insert barrier for instruction interlock : data from the hardware
354 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
355 * upon. Speculative instructions were allowing a bcopy at the start
356 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
357 * after our return, to copy data before the valid bit check above
358 * was done. As such, some of the copied data was stale. The barrier
359 * ensures the check is before any data is copied.
27f344eb
JS
360 */
361 mb();
4f774513
JS
362 return eqe;
363}
364
ba20c853
JS
365/**
366 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
367 * @q: The Event Queue to disable interrupts
368 *
369 **/
b71413dd 370inline void
ba20c853
JS
371lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
372{
373 struct lpfc_register doorbell;
374
375 doorbell.word0 = 0;
376 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
377 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
378 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
379 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
380 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
ba20c853
JS
382}
383
27d6ac0a
JS
384/**
385 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
386 * @q: The Event Queue to disable interrupts
387 *
388 **/
389inline void
390lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
391{
392 struct lpfc_register doorbell;
393
394 doorbell.word0 = 0;
aad59d5d 395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
27d6ac0a
JS
396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
397}
398
4f774513
JS
399/**
400 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
401 * @q: The Event Queue that the host has completed processing for.
402 * @arm: Indicates whether the host wants to arms this CQ.
403 *
404 * This routine will mark all Event Queue Entries on @q, from the last
405 * known completed entry to the last entry that was processed, as completed
406 * by clearing the valid bit for each completion queue entry. Then it will
407 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
408 * The internal host index in the @q will be updated by this routine to indicate
409 * that the host has finished processing the entries. The @arm parameter
410 * indicates that the queue should be rearmed when ringing the doorbell.
411 *
412 * This function will return the number of EQEs that were popped.
413 **/
414uint32_t
415lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
416{
417 uint32_t released = 0;
7365f6fd 418 struct lpfc_hba *phba;
4f774513
JS
419 struct lpfc_eqe *temp_eqe;
420 struct lpfc_register doorbell;
421
2e90f4b5
JS
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
7365f6fd 425 phba = q->phba;
2e90f4b5 426
4f774513
JS
427 /* while there are valid entries */
428 while (q->hba_index != q->host_index) {
7365f6fd
JS
429 if (!phba->sli4_hba.pc_sli4_params.eqav) {
430 temp_eqe = q->qe[q->host_index].eqe;
431 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
432 }
4f774513
JS
433 released++;
434 q->host_index = ((q->host_index + 1) % q->entry_count);
435 }
436 if (unlikely(released == 0 && !arm))
437 return 0;
438
439 /* ring doorbell for number popped */
440 doorbell.word0 = 0;
441 if (arm) {
442 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
443 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
444 }
445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
447 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
448 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
449 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 450 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
a747c9ce
JS
451 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
452 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
9dd35425 453 readl(q->phba->sli4_hba.EQDBregaddr);
4f774513
JS
454 return released;
455}
456
27d6ac0a
JS
457/**
458 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
459 * @q: The Event Queue that the host has completed processing for.
460 * @arm: Indicates whether the host wants to arms this CQ.
461 *
462 * This routine will mark all Event Queue Entries on @q, from the last
463 * known completed entry to the last entry that was processed, as completed
464 * by clearing the valid bit for each completion queue entry. Then it will
465 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
466 * The internal host index in the @q will be updated by this routine to indicate
467 * that the host has finished processing the entries. The @arm parameter
468 * indicates that the queue should be rearmed when ringing the doorbell.
469 *
470 * This function will return the number of EQEs that were popped.
471 **/
472uint32_t
473lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
474{
475 uint32_t released = 0;
7365f6fd 476 struct lpfc_hba *phba;
27d6ac0a
JS
477 struct lpfc_eqe *temp_eqe;
478 struct lpfc_register doorbell;
479
480 /* sanity check on queue memory */
481 if (unlikely(!q))
482 return 0;
7365f6fd 483 phba = q->phba;
27d6ac0a
JS
484
485 /* while there are valid entries */
486 while (q->hba_index != q->host_index) {
7365f6fd
JS
487 if (!phba->sli4_hba.pc_sli4_params.eqav) {
488 temp_eqe = q->qe[q->host_index].eqe;
489 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
490 }
27d6ac0a
JS
491 released++;
492 q->host_index = ((q->host_index + 1) % q->entry_count);
493 }
494 if (unlikely(released == 0 && !arm))
495 return 0;
496
497 /* ring doorbell for number popped */
498 doorbell.word0 = 0;
499 if (arm)
500 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
501 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
502 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
503 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
504 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
505 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
506 readl(q->phba->sli4_hba.EQDBregaddr);
507 return released;
508}
509
4f774513
JS
510/**
511 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
512 * @q: The Completion Queue to get the first valid CQE from
513 *
514 * This routine will get the first valid Completion Queue Entry from @q, update
515 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
516 * the Queue (no more work to do), or the Queue is full of CQEs that have been
517 * processed, but not popped back to the HBA then this routine will return NULL.
518 **/
519static struct lpfc_cqe *
520lpfc_sli4_cq_get(struct lpfc_queue *q)
521{
7365f6fd 522 struct lpfc_hba *phba;
4f774513 523 struct lpfc_cqe *cqe;
027140ea 524 uint32_t idx;
4f774513 525
2e90f4b5
JS
526 /* sanity check on queue memory */
527 if (unlikely(!q))
528 return NULL;
7365f6fd
JS
529 phba = q->phba;
530 cqe = q->qe[q->hba_index].cqe;
2e90f4b5 531
4f774513 532 /* If the next CQE is not valid then we are done */
7365f6fd 533 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
4f774513
JS
534 return NULL;
535 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
536 idx = ((q->hba_index + 1) % q->entry_count);
537 if (idx == q->host_index)
4f774513
JS
538 return NULL;
539
027140ea 540 q->hba_index = idx;
7365f6fd
JS
541 /* if the index wrapped around, toggle the valid bit */
542 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
543 q->qe_valid = (q->qe_valid) ? 0 : 1;
27f344eb
JS
544
545 /*
546 * insert barrier for instruction interlock : data from the hardware
547 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
548 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
549 * instructions allowing action on content before valid bit checked,
550 * add barrier here as well. May not be needed as "content" is a
551 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
552 */
553 mb();
4f774513
JS
554 return cqe;
555}
556
557/**
558 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
559 * @q: The Completion Queue that the host has completed processing for.
560 * @arm: Indicates whether the host wants to arms this CQ.
561 *
562 * This routine will mark all Completion queue entries on @q, from the last
563 * known completed entry to the last entry that was processed, as completed
564 * by clearing the valid bit for each completion queue entry. Then it will
565 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
566 * The internal host index in the @q will be updated by this routine to indicate
567 * that the host has finished processing the entries. The @arm parameter
568 * indicates that the queue should be rearmed when ringing the doorbell.
569 *
570 * This function will return the number of CQEs that were released.
571 **/
572uint32_t
573lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
574{
575 uint32_t released = 0;
7365f6fd 576 struct lpfc_hba *phba;
4f774513
JS
577 struct lpfc_cqe *temp_qe;
578 struct lpfc_register doorbell;
579
2e90f4b5
JS
580 /* sanity check on queue memory */
581 if (unlikely(!q))
582 return 0;
7365f6fd
JS
583 phba = q->phba;
584
4f774513
JS
585 /* while there are valid entries */
586 while (q->hba_index != q->host_index) {
7365f6fd
JS
587 if (!phba->sli4_hba.pc_sli4_params.cqav) {
588 temp_qe = q->qe[q->host_index].cqe;
589 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
590 }
4f774513
JS
591 released++;
592 q->host_index = ((q->host_index + 1) % q->entry_count);
593 }
594 if (unlikely(released == 0 && !arm))
595 return 0;
596
597 /* ring doorbell for number popped */
598 doorbell.word0 = 0;
599 if (arm)
600 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
601 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
602 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
603 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
604 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
605 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
9dd35425 606 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
4f774513
JS
607 return released;
608}
609
27d6ac0a
JS
610/**
611 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
612 * @q: The Completion Queue that the host has completed processing for.
613 * @arm: Indicates whether the host wants to arms this CQ.
614 *
615 * This routine will mark all Completion queue entries on @q, from the last
616 * known completed entry to the last entry that was processed, as completed
617 * by clearing the valid bit for each completion queue entry. Then it will
618 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
619 * The internal host index in the @q will be updated by this routine to indicate
620 * that the host has finished processing the entries. The @arm parameter
621 * indicates that the queue should be rearmed when ringing the doorbell.
622 *
623 * This function will return the number of CQEs that were released.
624 **/
625uint32_t
626lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
627{
628 uint32_t released = 0;
7365f6fd 629 struct lpfc_hba *phba;
27d6ac0a
JS
630 struct lpfc_cqe *temp_qe;
631 struct lpfc_register doorbell;
632
633 /* sanity check on queue memory */
634 if (unlikely(!q))
635 return 0;
7365f6fd
JS
636 phba = q->phba;
637
27d6ac0a
JS
638 /* while there are valid entries */
639 while (q->hba_index != q->host_index) {
7365f6fd
JS
640 if (!phba->sli4_hba.pc_sli4_params.cqav) {
641 temp_qe = q->qe[q->host_index].cqe;
642 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
643 }
27d6ac0a
JS
644 released++;
645 q->host_index = ((q->host_index + 1) % q->entry_count);
646 }
647 if (unlikely(released == 0 && !arm))
648 return 0;
649
650 /* ring doorbell for number popped */
651 doorbell.word0 = 0;
652 if (arm)
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
657 return released;
658}
659
4f774513
JS
660/**
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662 * @q: The Header Receive Queue to operate on.
663 * @wqe: The Receive Queue Entry to put on the Receive queue.
664 *
665 * This routine will copy the contents of @wqe to the next available entry on
666 * the @q. This function will then ring the Receive Queue Doorbell to signal the
667 * HBA to start processing the Receive Queue Entry. This function returns the
668 * index that the rqe was copied to if successful. If no entries are available
669 * on @q then this function will return -ENOMEM.
670 * The caller is expected to hold the hbalock when calling this routine.
671 **/
895427bd 672int
4f774513
JS
673lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
674 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
675{
2e90f4b5
JS
676 struct lpfc_rqe *temp_hrqe;
677 struct lpfc_rqe *temp_drqe;
4f774513 678 struct lpfc_register doorbell;
cbc5de1b
JS
679 int hq_put_index;
680 int dq_put_index;
4f774513 681
2e90f4b5
JS
682 /* sanity check on queue memory */
683 if (unlikely(!hq) || unlikely(!dq))
684 return -ENOMEM;
cbc5de1b
JS
685 hq_put_index = hq->host_index;
686 dq_put_index = dq->host_index;
687 temp_hrqe = hq->qe[hq_put_index].rqe;
688 temp_drqe = dq->qe[dq_put_index].rqe;
2e90f4b5 689
4f774513
JS
690 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
691 return -EINVAL;
cbc5de1b 692 if (hq_put_index != dq_put_index)
4f774513
JS
693 return -EINVAL;
694 /* If the host has not yet processed the next entry then we are done */
cbc5de1b 695 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513 696 return -EBUSY;
48f8fdb4
JS
697 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
698 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
4f774513
JS
699
700 /* Update the host index to point to the next slot */
cbc5de1b
JS
701 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
702 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
61f3d4bf 703 hq->RQ_buf_posted++;
4f774513
JS
704
705 /* Ring The Header Receive Queue Doorbell */
73d91e50 706 if (!(hq->host_index % hq->entry_repost)) {
4f774513 707 doorbell.word0 = 0;
962bc51b
JS
708 if (hq->db_format == LPFC_DB_RING_FORMAT) {
709 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
710 hq->entry_repost);
711 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
712 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
713 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
714 hq->entry_repost);
715 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
716 hq->host_index);
717 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
718 } else {
719 return -EINVAL;
720 }
721 writel(doorbell.word0, hq->db_regaddr);
4f774513 722 }
cbc5de1b 723 return hq_put_index;
4f774513
JS
724}
725
726/**
727 * lpfc_sli4_rq_release - Updates internal hba index for RQ
728 * @q: The Header Receive Queue to operate on.
729 *
730 * This routine will update the HBA index of a queue to reflect consumption of
731 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
732 * consumed an entry the host calls this function to update the queue's
733 * internal pointers. This routine returns the number of entries that were
734 * consumed by the HBA.
735 **/
736static uint32_t
737lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
738{
2e90f4b5
JS
739 /* sanity check on queue memory */
740 if (unlikely(!hq) || unlikely(!dq))
741 return 0;
742
4f774513
JS
743 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
744 return 0;
745 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
746 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
747 return 1;
748}
749
e59058c4 750/**
3621a710 751 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
754 *
755 * This function returns pointer to next command iocb entry
756 * in the command ring. The caller must hold hbalock to prevent
757 * other threads consume the next command iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
759 **/
ed957684
JS
760static inline IOCB_t *
761lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
762{
7e56aa25
JS
763 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
764 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
765}
766
e59058c4 767/**
3621a710 768 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
769 * @phba: Pointer to HBA context object.
770 * @pring: Pointer to driver SLI ring object.
771 *
772 * This function returns pointer to next response iocb entry
773 * in the response ring. The caller must hold hbalock to make sure
774 * that no other thread consume the next response iocb.
775 * SLI-2/SLI-3 provide different sized iocbs.
776 **/
ed957684
JS
777static inline IOCB_t *
778lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
779{
7e56aa25
JS
780 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
781 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
782}
783
e59058c4 784/**
3621a710 785 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
786 * @phba: Pointer to HBA context object.
787 *
788 * This function is called with hbalock held. This function
789 * allocates a new driver iocb object from the iocb pool. If the
790 * allocation is successful, it returns pointer to the newly
791 * allocated iocb object else it returns NULL.
792 **/
4f2e66c6 793struct lpfc_iocbq *
2e0fef85 794__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
795{
796 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
797 struct lpfc_iocbq * iocbq = NULL;
798
1c2ba475
JT
799 lockdep_assert_held(&phba->hbalock);
800
0bd4ca25 801 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
802 if (iocbq)
803 phba->iocb_cnt++;
804 if (phba->iocb_cnt > phba->iocb_max)
805 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
806 return iocbq;
807}
808
da0436e9
JS
809/**
810 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
811 * @phba: Pointer to HBA context object.
812 * @xritag: XRI value.
813 *
814 * This function clears the sglq pointer from the array of acive
815 * sglq's. The xritag that is passed in is used to index into the
816 * array. Before the xritag can be used it needs to be adjusted
817 * by subtracting the xribase.
818 *
819 * Returns sglq ponter = success, NULL = Failure.
820 **/
895427bd 821struct lpfc_sglq *
da0436e9
JS
822__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
823{
da0436e9 824 struct lpfc_sglq *sglq;
6d368e53
JS
825
826 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
827 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
828 return sglq;
829}
830
831/**
832 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
833 * @phba: Pointer to HBA context object.
834 * @xritag: XRI value.
835 *
836 * This function returns the sglq pointer from the array of acive
837 * sglq's. The xritag that is passed in is used to index into the
838 * array. Before the xritag can be used it needs to be adjusted
839 * by subtracting the xribase.
840 *
841 * Returns sglq ponter = success, NULL = Failure.
842 **/
0f65ff68 843struct lpfc_sglq *
da0436e9
JS
844__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
845{
da0436e9 846 struct lpfc_sglq *sglq;
6d368e53
JS
847
848 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
849 return sglq;
850}
851
19ca7609 852/**
1151e3ec 853 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
854 * @phba: Pointer to HBA context object.
855 * @xritag: xri used in this exchange.
856 * @rrq: The RRQ to be cleared.
857 *
19ca7609 858 **/
1151e3ec
JS
859void
860lpfc_clr_rrq_active(struct lpfc_hba *phba,
861 uint16_t xritag,
862 struct lpfc_node_rrq *rrq)
19ca7609 863{
1151e3ec 864 struct lpfc_nodelist *ndlp = NULL;
19ca7609 865
1151e3ec
JS
866 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
867 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
868
869 /* The target DID could have been swapped (cable swap)
870 * we should use the ndlp from the findnode if it is
871 * available.
872 */
1151e3ec 873 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
874 ndlp = rrq->ndlp;
875
1151e3ec
JS
876 if (!ndlp)
877 goto out;
878
cff261f6 879 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
880 rrq->send_rrq = 0;
881 rrq->xritag = 0;
882 rrq->rrq_stop_time = 0;
883 }
1151e3ec 884out:
19ca7609
JS
885 mempool_free(rrq, phba->rrq_pool);
886}
887
888/**
889 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
890 * @phba: Pointer to HBA context object.
891 *
892 * This function is called with hbalock held. This function
893 * Checks if stop_time (ratov from setting rrq active) has
894 * been reached, if it has and the send_rrq flag is set then
895 * it will call lpfc_send_rrq. If the send_rrq flag is not set
896 * then it will just call the routine to clear the rrq and
897 * free the rrq resource.
898 * The timer is set to the next rrq that is going to expire before
899 * leaving the routine.
900 *
901 **/
902void
903lpfc_handle_rrq_active(struct lpfc_hba *phba)
904{
905 struct lpfc_node_rrq *rrq;
906 struct lpfc_node_rrq *nextrrq;
907 unsigned long next_time;
908 unsigned long iflags;
1151e3ec 909 LIST_HEAD(send_rrq);
19ca7609
JS
910
911 spin_lock_irqsave(&phba->hbalock, iflags);
912 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 913 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 914 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
915 &phba->active_rrq_list, list) {
916 if (time_after(jiffies, rrq->rrq_stop_time))
917 list_move(&rrq->list, &send_rrq);
918 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
919 next_time = rrq->rrq_stop_time;
920 }
921 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
922 if ((!list_empty(&phba->active_rrq_list)) &&
923 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 924 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
925 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
926 list_del(&rrq->list);
927 if (!rrq->send_rrq)
928 /* this call will free the rrq */
929 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
930 else if (lpfc_send_rrq(phba, rrq)) {
931 /* if we send the rrq then the completion handler
932 * will clear the bit in the xribitmap.
933 */
934 lpfc_clr_rrq_active(phba, rrq->xritag,
935 rrq);
936 }
937 }
19ca7609
JS
938}
939
940/**
941 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
942 * @vport: Pointer to vport context object.
943 * @xri: The xri used in the exchange.
944 * @did: The targets DID for this exchange.
945 *
946 * returns NULL = rrq not found in the phba->active_rrq_list.
947 * rrq = rrq for this xri and target.
948 **/
949struct lpfc_node_rrq *
950lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
951{
952 struct lpfc_hba *phba = vport->phba;
953 struct lpfc_node_rrq *rrq;
954 struct lpfc_node_rrq *nextrrq;
955 unsigned long iflags;
956
957 if (phba->sli_rev != LPFC_SLI_REV4)
958 return NULL;
959 spin_lock_irqsave(&phba->hbalock, iflags);
960 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
961 if (rrq->vport == vport && rrq->xritag == xri &&
962 rrq->nlp_DID == did){
963 list_del(&rrq->list);
964 spin_unlock_irqrestore(&phba->hbalock, iflags);
965 return rrq;
966 }
967 }
968 spin_unlock_irqrestore(&phba->hbalock, iflags);
969 return NULL;
970}
971
972/**
973 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
974 * @vport: Pointer to vport context object.
1151e3ec
JS
975 * @ndlp: Pointer to the lpfc_node_list structure.
976 * If ndlp is NULL Remove all active RRQs for this vport from the
977 * phba->active_rrq_list and clear the rrq.
978 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
979 **/
980void
1151e3ec 981lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
982
983{
984 struct lpfc_hba *phba = vport->phba;
985 struct lpfc_node_rrq *rrq;
986 struct lpfc_node_rrq *nextrrq;
987 unsigned long iflags;
1151e3ec 988 LIST_HEAD(rrq_list);
19ca7609
JS
989
990 if (phba->sli_rev != LPFC_SLI_REV4)
991 return;
1151e3ec
JS
992 if (!ndlp) {
993 lpfc_sli4_vport_delete_els_xri_aborted(vport);
994 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 995 }
1151e3ec
JS
996 spin_lock_irqsave(&phba->hbalock, iflags);
997 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
998 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
999 list_move(&rrq->list, &rrq_list);
19ca7609 1000 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
1001
1002 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1003 list_del(&rrq->list);
1004 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1005 }
19ca7609
JS
1006}
1007
19ca7609 1008/**
1151e3ec 1009 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
1010 * @phba: Pointer to HBA context object.
1011 * @ndlp: Targets nodelist pointer for this exchange.
1012 * @xritag the xri in the bitmap to test.
1013 *
1014 * This function is called with hbalock held. This function
1015 * returns 0 = rrq not active for this xri
1016 * 1 = rrq is valid for this xri.
1017 **/
1151e3ec
JS
1018int
1019lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
1020 uint16_t xritag)
1021{
1c2ba475 1022 lockdep_assert_held(&phba->hbalock);
19ca7609
JS
1023 if (!ndlp)
1024 return 0;
cff261f6
JS
1025 if (!ndlp->active_rrqs_xri_bitmap)
1026 return 0;
1027 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
19ca7609
JS
1028 return 1;
1029 else
1030 return 0;
1031}
1032
1033/**
1034 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1035 * @phba: Pointer to HBA context object.
1036 * @ndlp: nodelist pointer for this target.
1037 * @xritag: xri used in this exchange.
1038 * @rxid: Remote Exchange ID.
1039 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1040 *
1041 * This function takes the hbalock.
1042 * The active bit is always set in the active rrq xri_bitmap even
1043 * if there is no slot avaiable for the other rrq information.
1044 *
1045 * returns 0 rrq actived for this xri
1046 * < 0 No memory or invalid ndlp.
1047 **/
1048int
1049lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 1050 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 1051{
19ca7609 1052 unsigned long iflags;
b42c07c8
JS
1053 struct lpfc_node_rrq *rrq;
1054 int empty;
1055
1056 if (!ndlp)
1057 return -EINVAL;
1058
1059 if (!phba->cfg_enable_rrq)
1060 return -EINVAL;
19ca7609
JS
1061
1062 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
1063 if (phba->pport->load_flag & FC_UNLOADING) {
1064 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1065 goto out;
1066 }
1067
1068 /*
1069 * set the active bit even if there is no mem available.
1070 */
1071 if (NLP_CHK_FREE_REQ(ndlp))
1072 goto out;
1073
1074 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1075 goto out;
1076
cff261f6
JS
1077 if (!ndlp->active_rrqs_xri_bitmap)
1078 goto out;
1079
1080 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
1081 goto out;
1082
19ca7609 1083 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
1084 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1085 if (!rrq) {
1086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1087 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1088 " DID:0x%x Send:%d\n",
1089 xritag, rxid, ndlp->nlp_DID, send_rrq);
1090 return -EINVAL;
1091 }
e5771b4d
JS
1092 if (phba->cfg_enable_rrq == 1)
1093 rrq->send_rrq = send_rrq;
1094 else
1095 rrq->send_rrq = 0;
b42c07c8 1096 rrq->xritag = xritag;
256ec0d0
JS
1097 rrq->rrq_stop_time = jiffies +
1098 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
1099 rrq->ndlp = ndlp;
1100 rrq->nlp_DID = ndlp->nlp_DID;
1101 rrq->vport = ndlp->vport;
1102 rrq->rxid = rxid;
b42c07c8
JS
1103 spin_lock_irqsave(&phba->hbalock, iflags);
1104 empty = list_empty(&phba->active_rrq_list);
1105 list_add_tail(&rrq->list, &phba->active_rrq_list);
1106 phba->hba_flag |= HBA_RRQ_ACTIVE;
1107 if (empty)
1108 lpfc_worker_wake_up(phba);
1109 spin_unlock_irqrestore(&phba->hbalock, iflags);
1110 return 0;
1111out:
1112 spin_unlock_irqrestore(&phba->hbalock, iflags);
1113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1114 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1115 " DID:0x%x Send:%d\n",
1116 xritag, rxid, ndlp->nlp_DID, send_rrq);
1117 return -EINVAL;
19ca7609
JS
1118}
1119
da0436e9 1120/**
895427bd 1121 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 1122 * @phba: Pointer to HBA context object.
19ca7609 1123 * @piocb: Pointer to the iocbq.
da0436e9 1124 *
dafe8cea 1125 * This function is called with the ring lock held. This function
6d368e53 1126 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
1127 * list is not empty then it is successful, it returns pointer to the newly
1128 * allocated sglq object else it returns NULL.
1129 **/
1130static struct lpfc_sglq *
895427bd 1131__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 1132{
895427bd 1133 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 1134 struct lpfc_sglq *sglq = NULL;
19ca7609 1135 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
1136 struct lpfc_scsi_buf *lpfc_cmd;
1137 struct lpfc_nodelist *ndlp;
1138 int found = 0;
1139
1c2ba475
JT
1140 lockdep_assert_held(&phba->hbalock);
1141
19ca7609
JS
1142 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1143 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1144 ndlp = lpfc_cmd->rdata->pnode;
be858b65 1145 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 1146 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 1147 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
1148 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1149 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1150 ndlp = NULL;
1151 else
1152 ndlp = piocbq->context_un.ndlp;
1153 } else {
19ca7609 1154 ndlp = piocbq->context1;
6c7cf486 1155 }
19ca7609 1156
895427bd
JS
1157 spin_lock(&phba->sli4_hba.sgl_list_lock);
1158 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
1159 start_sglq = sglq;
1160 while (!found) {
1161 if (!sglq)
d11f54b7 1162 break;
895427bd
JS
1163 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1164 test_bit(sglq->sli4_lxritag,
1165 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
1166 /* This xri has an rrq outstanding for this DID.
1167 * put it back in the list and get another xri.
1168 */
895427bd 1169 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 1170 sglq = NULL;
895427bd 1171 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
1172 struct lpfc_sglq, list);
1173 if (sglq == start_sglq) {
14041bd1 1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609
JS
1175 sglq = NULL;
1176 break;
1177 } else
1178 continue;
1179 }
1180 sglq->ndlp = ndlp;
1181 found = 1;
6d368e53 1182 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
1183 sglq->state = SGL_ALLOCATED;
1184 }
895427bd 1185 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1186 return sglq;
1187}
1188
f358dd0c
JS
1189/**
1190 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1191 * @phba: Pointer to HBA context object.
1192 * @piocb: Pointer to the iocbq.
1193 *
1194 * This function is called with the sgl_list lock held. This function
1195 * gets a new driver sglq object from the sglq list. If the
1196 * list is not empty then it is successful, it returns pointer to the newly
1197 * allocated sglq object else it returns NULL.
1198 **/
1199struct lpfc_sglq *
1200__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1201{
1202 struct list_head *lpfc_nvmet_sgl_list;
1203 struct lpfc_sglq *sglq = NULL;
1204
1205 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1206
1207 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1208
1209 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1210 if (!sglq)
1211 return NULL;
1212 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1213 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1214 return sglq;
1215}
1216
e59058c4 1217/**
3621a710 1218 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1219 * @phba: Pointer to HBA context object.
1220 *
1221 * This function is called with no lock held. This function
1222 * allocates a new driver iocb object from the iocb pool. If the
1223 * allocation is successful, it returns pointer to the newly
1224 * allocated iocb object else it returns NULL.
1225 **/
2e0fef85
JS
1226struct lpfc_iocbq *
1227lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1228{
1229 struct lpfc_iocbq * iocbq = NULL;
1230 unsigned long iflags;
1231
1232 spin_lock_irqsave(&phba->hbalock, iflags);
1233 iocbq = __lpfc_sli_get_iocbq(phba);
1234 spin_unlock_irqrestore(&phba->hbalock, iflags);
1235 return iocbq;
1236}
1237
4f774513
JS
1238/**
1239 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1240 * @phba: Pointer to HBA context object.
1241 * @iocbq: Pointer to driver iocb object.
1242 *
1243 * This function is called with hbalock held to release driver
1244 * iocb object to the iocb pool. The iotag in the iocb object
1245 * does not change for each use of the iocb object. This function
1246 * clears all other fields of the iocb object when it is freed.
1247 * The sqlq structure that holds the xritag and phys and virtual
1248 * mappings for the scatter gather list is retrieved from the
1249 * active array of sglq. The get of the sglq pointer also clears
1250 * the entry in the array. If the status of the IO indiactes that
1251 * this IO was aborted then the sglq entry it put on the
1252 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1253 * IO has good status or fails for any other reason then the sglq
895427bd 1254 * entry is added to the free list (lpfc_els_sgl_list).
4f774513
JS
1255 **/
1256static void
1257__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1258{
1259 struct lpfc_sglq *sglq;
1260 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1261 unsigned long iflag = 0;
895427bd 1262 struct lpfc_sli_ring *pring;
4f774513 1263
1c2ba475
JT
1264 lockdep_assert_held(&phba->hbalock);
1265
4f774513
JS
1266 if (iocbq->sli4_xritag == NO_XRI)
1267 sglq = NULL;
1268 else
6d368e53
JS
1269 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1270
0e9bb8d7 1271
4f774513 1272 if (sglq) {
f358dd0c
JS
1273 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1275 iflag);
1276 sglq->state = SGL_FREED;
1277 sglq->ndlp = NULL;
1278 list_add_tail(&sglq->list,
1279 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1280 spin_unlock_irqrestore(
1281 &phba->sli4_hba.sgl_list_lock, iflag);
1282 goto out;
1283 }
1284
895427bd 1285 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1286 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1287 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1288 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1289 iflag);
4f774513 1290 list_add(&sglq->list,
895427bd 1291 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1292 spin_unlock_irqrestore(
895427bd 1293 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1294 } else {
895427bd
JS
1295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1296 iflag);
0f65ff68 1297 sglq->state = SGL_FREED;
19ca7609 1298 sglq->ndlp = NULL;
fedd3b7b 1299 list_add_tail(&sglq->list,
895427bd
JS
1300 &phba->sli4_hba.lpfc_els_sgl_list);
1301 spin_unlock_irqrestore(
1302 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1303
1304 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1305 if (!list_empty(&pring->txq))
2a9bf3d0 1306 lpfc_worker_wake_up(phba);
0f65ff68 1307 }
4f774513
JS
1308 }
1309
f358dd0c 1310out:
4f774513
JS
1311 /*
1312 * Clean all volatile data fields, preserve iotag and node struct.
1313 */
1314 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1315 iocbq->sli4_lxritag = NO_XRI;
4f774513 1316 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1317 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1318 LPFC_IO_NVME_LS);
4f774513
JS
1319 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1320}
1321
2a9bf3d0 1322
e59058c4 1323/**
3772a991 1324 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1325 * @phba: Pointer to HBA context object.
1326 * @iocbq: Pointer to driver iocb object.
1327 *
1328 * This function is called with hbalock held to release driver
1329 * iocb object to the iocb pool. The iotag in the iocb object
1330 * does not change for each use of the iocb object. This function
1331 * clears all other fields of the iocb object when it is freed.
1332 **/
a6ababd2 1333static void
3772a991 1334__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1335{
2e0fef85 1336 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1337
1c2ba475 1338 lockdep_assert_held(&phba->hbalock);
0e9bb8d7 1339
604a3e30
JB
1340 /*
1341 * Clean all volatile data fields, preserve iotag and node struct.
1342 */
1343 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1344 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1345 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1346}
1347
3772a991
JS
1348/**
1349 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1350 * @phba: Pointer to HBA context object.
1351 * @iocbq: Pointer to driver iocb object.
1352 *
1353 * This function is called with hbalock held to release driver
1354 * iocb object to the iocb pool. The iotag in the iocb object
1355 * does not change for each use of the iocb object. This function
1356 * clears all other fields of the iocb object when it is freed.
1357 **/
1358static void
1359__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1360{
1c2ba475
JT
1361 lockdep_assert_held(&phba->hbalock);
1362
3772a991 1363 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1364 phba->iocb_cnt--;
3772a991
JS
1365}
1366
e59058c4 1367/**
3621a710 1368 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1369 * @phba: Pointer to HBA context object.
1370 * @iocbq: Pointer to driver iocb object.
1371 *
1372 * This function is called with no lock held to release the iocb to
1373 * iocb pool.
1374 **/
2e0fef85
JS
1375void
1376lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1377{
1378 unsigned long iflags;
1379
1380 /*
1381 * Clean all volatile data fields, preserve iotag and node struct.
1382 */
1383 spin_lock_irqsave(&phba->hbalock, iflags);
1384 __lpfc_sli_release_iocbq(phba, iocbq);
1385 spin_unlock_irqrestore(&phba->hbalock, iflags);
1386}
1387
a257bf90
JS
1388/**
1389 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1390 * @phba: Pointer to HBA context object.
1391 * @iocblist: List of IOCBs.
1392 * @ulpstatus: ULP status in IOCB command field.
1393 * @ulpWord4: ULP word-4 in IOCB command field.
1394 *
1395 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1396 * on the list by invoking the complete callback function associated with the
1397 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1398 * fields.
1399 **/
1400void
1401lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1402 uint32_t ulpstatus, uint32_t ulpWord4)
1403{
1404 struct lpfc_iocbq *piocb;
1405
1406 while (!list_empty(iocblist)) {
1407 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
a257bf90
JS
1408 if (!piocb->iocb_cmpl)
1409 lpfc_sli_release_iocbq(phba, piocb);
1410 else {
1411 piocb->iocb.ulpStatus = ulpstatus;
1412 piocb->iocb.un.ulpWord[4] = ulpWord4;
1413 (piocb->iocb_cmpl) (phba, piocb, piocb);
1414 }
1415 }
1416 return;
1417}
1418
e59058c4 1419/**
3621a710
JS
1420 * lpfc_sli_iocb_cmd_type - Get the iocb type
1421 * @iocb_cmnd: iocb command code.
e59058c4
JS
1422 *
1423 * This function is called by ring event handler function to get the iocb type.
1424 * This function translates the iocb command to an iocb command type used to
1425 * decide the final disposition of each completed IOCB.
1426 * The function returns
1427 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1428 * LPFC_SOL_IOCB if it is a solicited iocb completion
1429 * LPFC_ABORT_IOCB if it is an abort iocb
1430 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1431 *
1432 * The caller is not required to hold any lock.
1433 **/
dea3101e 1434static lpfc_iocb_type
1435lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1436{
1437 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1438
1439 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1440 return 0;
1441
1442 switch (iocb_cmnd) {
1443 case CMD_XMIT_SEQUENCE_CR:
1444 case CMD_XMIT_SEQUENCE_CX:
1445 case CMD_XMIT_BCAST_CN:
1446 case CMD_XMIT_BCAST_CX:
1447 case CMD_ELS_REQUEST_CR:
1448 case CMD_ELS_REQUEST_CX:
1449 case CMD_CREATE_XRI_CR:
1450 case CMD_CREATE_XRI_CX:
1451 case CMD_GET_RPI_CN:
1452 case CMD_XMIT_ELS_RSP_CX:
1453 case CMD_GET_RPI_CR:
1454 case CMD_FCP_IWRITE_CR:
1455 case CMD_FCP_IWRITE_CX:
1456 case CMD_FCP_IREAD_CR:
1457 case CMD_FCP_IREAD_CX:
1458 case CMD_FCP_ICMND_CR:
1459 case CMD_FCP_ICMND_CX:
f5603511
JS
1460 case CMD_FCP_TSEND_CX:
1461 case CMD_FCP_TRSP_CX:
1462 case CMD_FCP_TRECEIVE_CX:
1463 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1464 case CMD_ADAPTER_MSG:
1465 case CMD_ADAPTER_DUMP:
1466 case CMD_XMIT_SEQUENCE64_CR:
1467 case CMD_XMIT_SEQUENCE64_CX:
1468 case CMD_XMIT_BCAST64_CN:
1469 case CMD_XMIT_BCAST64_CX:
1470 case CMD_ELS_REQUEST64_CR:
1471 case CMD_ELS_REQUEST64_CX:
1472 case CMD_FCP_IWRITE64_CR:
1473 case CMD_FCP_IWRITE64_CX:
1474 case CMD_FCP_IREAD64_CR:
1475 case CMD_FCP_IREAD64_CX:
1476 case CMD_FCP_ICMND64_CR:
1477 case CMD_FCP_ICMND64_CX:
f5603511
JS
1478 case CMD_FCP_TSEND64_CX:
1479 case CMD_FCP_TRSP64_CX:
1480 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1481 case CMD_GEN_REQUEST64_CR:
1482 case CMD_GEN_REQUEST64_CX:
1483 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1484 case DSSCMD_IWRITE64_CR:
1485 case DSSCMD_IWRITE64_CX:
1486 case DSSCMD_IREAD64_CR:
1487 case DSSCMD_IREAD64_CX:
dea3101e 1488 type = LPFC_SOL_IOCB;
1489 break;
1490 case CMD_ABORT_XRI_CN:
1491 case CMD_ABORT_XRI_CX:
1492 case CMD_CLOSE_XRI_CN:
1493 case CMD_CLOSE_XRI_CX:
1494 case CMD_XRI_ABORTED_CX:
1495 case CMD_ABORT_MXRI64_CN:
6669f9bb 1496 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1497 type = LPFC_ABORT_IOCB;
1498 break;
1499 case CMD_RCV_SEQUENCE_CX:
1500 case CMD_RCV_ELS_REQ_CX:
1501 case CMD_RCV_SEQUENCE64_CX:
1502 case CMD_RCV_ELS_REQ64_CX:
57127f15 1503 case CMD_ASYNC_STATUS:
ed957684
JS
1504 case CMD_IOCB_RCV_SEQ64_CX:
1505 case CMD_IOCB_RCV_ELS64_CX:
1506 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1507 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1508 type = LPFC_UNSOL_IOCB;
1509 break;
3163f725
JS
1510 case CMD_IOCB_XMIT_MSEQ64_CR:
1511 case CMD_IOCB_XMIT_MSEQ64_CX:
1512 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1513 case CMD_IOCB_RCV_ELS_LIST64_CX:
1514 case CMD_IOCB_CLOSE_EXTENDED_CN:
1515 case CMD_IOCB_ABORT_EXTENDED_CN:
1516 case CMD_IOCB_RET_HBQE64_CN:
1517 case CMD_IOCB_FCP_IBIDIR64_CR:
1518 case CMD_IOCB_FCP_IBIDIR64_CX:
1519 case CMD_IOCB_FCP_ITASKMGT64_CX:
1520 case CMD_IOCB_LOGENTRY_CN:
1521 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1522 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1523 __func__, iocb_cmnd);
3163f725
JS
1524 type = LPFC_UNKNOWN_IOCB;
1525 break;
dea3101e 1526 default:
1527 type = LPFC_UNKNOWN_IOCB;
1528 break;
1529 }
1530
1531 return type;
1532}
1533
e59058c4 1534/**
3621a710 1535 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1536 * @phba: Pointer to HBA context object.
1537 *
1538 * This function is called from SLI initialization code
1539 * to configure every ring of the HBA's SLI interface. The
1540 * caller is not required to hold any lock. This function issues
1541 * a config_ring mailbox command for each ring.
1542 * This function returns zero if successful else returns a negative
1543 * error code.
1544 **/
dea3101e 1545static int
ed957684 1546lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1547{
1548 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1549 LPFC_MBOXQ_t *pmb;
1550 MAILBOX_t *pmbox;
1551 int i, rc, ret = 0;
dea3101e 1552
ed957684
JS
1553 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1554 if (!pmb)
1555 return -ENOMEM;
04c68496 1556 pmbox = &pmb->u.mb;
ed957684 1557 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1558 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1559 lpfc_config_ring(phba, i, pmb);
1560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1561 if (rc != MBX_SUCCESS) {
92d7f7b0 1562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1563 "0446 Adapter failed to init (%d), "
dea3101e 1564 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1565 "ring %d\n",
e8b62011
JS
1566 rc, pmbox->mbxCommand,
1567 pmbox->mbxStatus, i);
2e0fef85 1568 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1569 ret = -ENXIO;
1570 break;
dea3101e 1571 }
1572 }
ed957684
JS
1573 mempool_free(pmb, phba->mbox_mem_pool);
1574 return ret;
dea3101e 1575}
1576
e59058c4 1577/**
3621a710 1578 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1579 * @phba: Pointer to HBA context object.
1580 * @pring: Pointer to driver SLI ring object.
1581 * @piocb: Pointer to the driver iocb object.
1582 *
1583 * This function is called with hbalock held. The function adds the
1584 * new iocb to txcmplq of the given ring. This function always returns
1585 * 0. If this function is called for ELS ring, this function checks if
1586 * there is a vport associated with the ELS command. This function also
1587 * starts els_tmofunc timer if this is an ELS command.
1588 **/
dea3101e 1589static int
2e0fef85
JS
1590lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1591 struct lpfc_iocbq *piocb)
dea3101e 1592{
1c2ba475
JT
1593 lockdep_assert_held(&phba->hbalock);
1594
2319f847 1595 BUG_ON(!piocb);
22466da5 1596
dea3101e 1597 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1598 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 1599
92d7f7b0
JS
1600 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1601 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1602 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1603 BUG_ON(!piocb->vport);
1604 if (!(piocb->vport->load_flag & FC_UNLOADING))
1605 mod_timer(&piocb->vport->els_tmofunc,
1606 jiffies +
1607 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1608 }
dea3101e 1609
2e0fef85 1610 return 0;
dea3101e 1611}
1612
e59058c4 1613/**
3621a710 1614 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1615 * @phba: Pointer to HBA context object.
1616 * @pring: Pointer to driver SLI ring object.
1617 *
1618 * This function is called with hbalock held to get next
1619 * iocb in txq of the given ring. If there is any iocb in
1620 * the txq, the function returns first iocb in the list after
1621 * removing the iocb from the list, else it returns NULL.
1622 **/
2a9bf3d0 1623struct lpfc_iocbq *
2e0fef85 1624lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1625{
dea3101e 1626 struct lpfc_iocbq *cmd_iocb;
1627
1c2ba475
JT
1628 lockdep_assert_held(&phba->hbalock);
1629
858c9f6c 1630 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1631 return cmd_iocb;
dea3101e 1632}
1633
e59058c4 1634/**
3621a710 1635 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1636 * @phba: Pointer to HBA context object.
1637 * @pring: Pointer to driver SLI ring object.
1638 *
1639 * This function is called with hbalock held and the caller must post the
1640 * iocb without releasing the lock. If the caller releases the lock,
1641 * iocb slot returned by the function is not guaranteed to be available.
1642 * The function returns pointer to the next available iocb slot if there
1643 * is available slot in the ring, else it returns NULL.
1644 * If the get index of the ring is ahead of the put index, the function
1645 * will post an error attention event to the worker thread to take the
1646 * HBA to offline state.
1647 **/
dea3101e 1648static IOCB_t *
1649lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1650{
34b02dcd 1651 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1652 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1653
1654 lockdep_assert_held(&phba->hbalock);
1655
7e56aa25
JS
1656 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1657 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1658 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1659
7e56aa25
JS
1660 if (unlikely(pring->sli.sli3.local_getidx ==
1661 pring->sli.sli3.next_cmdidx)) {
dea3101e 1662
7e56aa25 1663 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1664
7e56aa25 1665 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1667 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1668 "is bigger than cmd ring %d\n",
e8b62011 1669 pring->ringno,
7e56aa25
JS
1670 pring->sli.sli3.local_getidx,
1671 max_cmd_idx);
dea3101e 1672
2e0fef85 1673 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1674 /*
1675 * All error attention handlers are posted to
1676 * worker thread
1677 */
1678 phba->work_ha |= HA_ERATT;
1679 phba->work_hs = HS_FFER3;
92d7f7b0 1680
5e9d9b82 1681 lpfc_worker_wake_up(phba);
dea3101e 1682
1683 return NULL;
1684 }
1685
7e56aa25 1686 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1687 return NULL;
1688 }
1689
ed957684 1690 return lpfc_cmd_iocb(phba, pring);
dea3101e 1691}
1692
e59058c4 1693/**
3621a710 1694 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1695 * @phba: Pointer to HBA context object.
1696 * @iocbq: Pointer to driver iocb object.
1697 *
1698 * This function gets an iotag for the iocb. If there is no unused iotag and
1699 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1700 * array and assigns a new iotag.
1701 * The function returns the allocated iotag if successful, else returns zero.
1702 * Zero is not a valid iotag.
1703 * The caller is not required to hold any lock.
1704 **/
604a3e30 1705uint16_t
2e0fef85 1706lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1707{
2e0fef85
JS
1708 struct lpfc_iocbq **new_arr;
1709 struct lpfc_iocbq **old_arr;
604a3e30
JB
1710 size_t new_len;
1711 struct lpfc_sli *psli = &phba->sli;
1712 uint16_t iotag;
dea3101e 1713
2e0fef85 1714 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1715 iotag = psli->last_iotag;
1716 if(++iotag < psli->iocbq_lookup_len) {
1717 psli->last_iotag = iotag;
1718 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1719 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1720 iocbq->iotag = iotag;
1721 return iotag;
2e0fef85 1722 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1723 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1724 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85 1725 spin_unlock_irq(&phba->hbalock);
6396bb22 1726 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
604a3e30
JB
1727 GFP_KERNEL);
1728 if (new_arr) {
2e0fef85 1729 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1730 old_arr = psli->iocbq_lookup;
1731 if (new_len <= psli->iocbq_lookup_len) {
1732 /* highly unprobable case */
1733 kfree(new_arr);
1734 iotag = psli->last_iotag;
1735 if(++iotag < psli->iocbq_lookup_len) {
1736 psli->last_iotag = iotag;
1737 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1738 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1739 iocbq->iotag = iotag;
1740 return iotag;
1741 }
2e0fef85 1742 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1743 return 0;
1744 }
1745 if (psli->iocbq_lookup)
1746 memcpy(new_arr, old_arr,
1747 ((psli->last_iotag + 1) *
311464ec 1748 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1749 psli->iocbq_lookup = new_arr;
1750 psli->iocbq_lookup_len = new_len;
1751 psli->last_iotag = iotag;
1752 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1753 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1754 iocbq->iotag = iotag;
1755 kfree(old_arr);
1756 return iotag;
1757 }
8f6d98d2 1758 } else
2e0fef85 1759 spin_unlock_irq(&phba->hbalock);
dea3101e 1760
bc73905a 1761 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1762 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1763 psli->last_iotag);
dea3101e 1764
604a3e30 1765 return 0;
dea3101e 1766}
1767
e59058c4 1768/**
3621a710 1769 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1770 * @phba: Pointer to HBA context object.
1771 * @pring: Pointer to driver SLI ring object.
1772 * @iocb: Pointer to iocb slot in the ring.
1773 * @nextiocb: Pointer to driver iocb object which need to be
1774 * posted to firmware.
1775 *
1776 * This function is called with hbalock held to post a new iocb to
1777 * the firmware. This function copies the new iocb to ring iocb slot and
1778 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1779 * a completion call back for this iocb else the function will free the
1780 * iocb object.
1781 **/
dea3101e 1782static void
1783lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1784 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1785{
1c2ba475 1786 lockdep_assert_held(&phba->hbalock);
dea3101e 1787 /*
604a3e30 1788 * Set up an iotag
dea3101e 1789 */
604a3e30 1790 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1791
e2a0a9d6 1792
a58cbd52
JS
1793 if (pring->ringno == LPFC_ELS_RING) {
1794 lpfc_debugfs_slow_ring_trc(phba,
1795 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1796 *(((uint32_t *) &nextiocb->iocb) + 4),
1797 *(((uint32_t *) &nextiocb->iocb) + 6),
1798 *(((uint32_t *) &nextiocb->iocb) + 7));
1799 }
1800
dea3101e 1801 /*
1802 * Issue iocb command to adapter
1803 */
92d7f7b0 1804 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1805 wmb();
1806 pring->stats.iocb_cmd++;
1807
1808 /*
1809 * If there is no completion routine to call, we can release the
1810 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1811 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1812 */
1813 if (nextiocb->iocb_cmpl)
1814 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1815 else
2e0fef85 1816 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1817
1818 /*
1819 * Let the HBA know what IOCB slot will be the next one the
1820 * driver will put a command into.
1821 */
7e56aa25
JS
1822 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1823 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1824}
1825
e59058c4 1826/**
3621a710 1827 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1828 * @phba: Pointer to HBA context object.
1829 * @pring: Pointer to driver SLI ring object.
1830 *
1831 * The caller is not required to hold any lock for calling this function.
1832 * This function updates the chip attention bits for the ring to inform firmware
1833 * that there are pending work to be done for this ring and requests an
1834 * interrupt when there is space available in the ring. This function is
1835 * called when the driver is unable to post more iocbs to the ring due
1836 * to unavailability of space in the ring.
1837 **/
dea3101e 1838static void
2e0fef85 1839lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1840{
1841 int ringno = pring->ringno;
1842
1843 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1844
1845 wmb();
1846
1847 /*
1848 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1849 * The HBA will tell us when an IOCB entry is available.
1850 */
1851 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1852 readl(phba->CAregaddr); /* flush */
1853
1854 pring->stats.iocb_cmd_full++;
1855}
1856
e59058c4 1857/**
3621a710 1858 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1859 * @phba: Pointer to HBA context object.
1860 * @pring: Pointer to driver SLI ring object.
1861 *
1862 * This function updates the chip attention register bit for the
1863 * given ring to inform HBA that there is more work to be done
1864 * in this ring. The caller is not required to hold any lock.
1865 **/
dea3101e 1866static void
2e0fef85 1867lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1868{
1869 int ringno = pring->ringno;
1870
1871 /*
1872 * Tell the HBA that there is work to do in this ring.
1873 */
34b02dcd
JS
1874 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1875 wmb();
1876 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1877 readl(phba->CAregaddr); /* flush */
1878 }
dea3101e 1879}
1880
e59058c4 1881/**
3621a710 1882 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1883 * @phba: Pointer to HBA context object.
1884 * @pring: Pointer to driver SLI ring object.
1885 *
1886 * This function is called with hbalock held to post pending iocbs
1887 * in the txq to the firmware. This function is called when driver
1888 * detects space available in the ring.
1889 **/
dea3101e 1890static void
2e0fef85 1891lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1892{
1893 IOCB_t *iocb;
1894 struct lpfc_iocbq *nextiocb;
1895
1c2ba475
JT
1896 lockdep_assert_held(&phba->hbalock);
1897
dea3101e 1898 /*
1899 * Check to see if:
1900 * (a) there is anything on the txq to send
1901 * (b) link is up
1902 * (c) link attention events can be processed (fcp ring only)
1903 * (d) IOCB processing is not blocked by the outstanding mbox command.
1904 */
0e9bb8d7
JS
1905
1906 if (lpfc_is_link_up(phba) &&
1907 (!list_empty(&pring->txq)) &&
895427bd 1908 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1909 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1910
1911 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1912 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1913 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1914
1915 if (iocb)
1916 lpfc_sli_update_ring(phba, pring);
1917 else
1918 lpfc_sli_update_full_ring(phba, pring);
1919 }
1920
1921 return;
1922}
1923
e59058c4 1924/**
3621a710 1925 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1926 * @phba: Pointer to HBA context object.
1927 * @hbqno: HBQ number.
1928 *
1929 * This function is called with hbalock held to get the next
1930 * available slot for the given HBQ. If there is free slot
1931 * available for the HBQ it will return pointer to the next available
1932 * HBQ entry else it will return NULL.
1933 **/
a6ababd2 1934static struct lpfc_hbq_entry *
ed957684
JS
1935lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1936{
1937 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1938
1c2ba475
JT
1939 lockdep_assert_held(&phba->hbalock);
1940
ed957684
JS
1941 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1942 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1943 hbqp->next_hbqPutIdx = 0;
1944
1945 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1946 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1947 uint32_t getidx = le32_to_cpu(raw_index);
1948
1949 hbqp->local_hbqGetIdx = getidx;
1950
1951 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1952 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1953 LOG_SLI | LOG_VPORT,
e8b62011 1954 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1955 "%u is > than hbqp->entry_count %u\n",
e8b62011 1956 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1957 hbqp->entry_count);
1958
1959 phba->link_state = LPFC_HBA_ERROR;
1960 return NULL;
1961 }
1962
1963 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1964 return NULL;
1965 }
1966
51ef4c26
JS
1967 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1968 hbqp->hbqPutIdx;
ed957684
JS
1969}
1970
e59058c4 1971/**
3621a710 1972 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1973 * @phba: Pointer to HBA context object.
1974 *
1975 * This function is called with no lock held to free all the
1976 * hbq buffers while uninitializing the SLI interface. It also
1977 * frees the HBQ buffers returned by the firmware but not yet
1978 * processed by the upper layers.
1979 **/
ed957684
JS
1980void
1981lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1982{
92d7f7b0
JS
1983 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1984 struct hbq_dmabuf *hbq_buf;
3163f725 1985 unsigned long flags;
51ef4c26 1986 int i, hbq_count;
ed957684 1987
51ef4c26 1988 hbq_count = lpfc_sli_hbq_count();
ed957684 1989 /* Return all memory used by all HBQs */
3163f725 1990 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1991 for (i = 0; i < hbq_count; ++i) {
1992 list_for_each_entry_safe(dmabuf, next_dmabuf,
1993 &phba->hbqs[i].hbq_buffer_list, list) {
1994 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1995 list_del(&hbq_buf->dbuf.list);
1996 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1997 }
a8adb832 1998 phba->hbqs[i].buffer_count = 0;
ed957684 1999 }
3163f725
JS
2000
2001 /* Mark the HBQs not in use */
2002 phba->hbq_in_use = 0;
2003 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
2004}
2005
e59058c4 2006/**
3621a710 2007 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
2008 * @phba: Pointer to HBA context object.
2009 * @hbqno: HBQ number.
2010 * @hbq_buf: Pointer to HBQ buffer.
2011 *
2012 * This function is called with the hbalock held to post a
2013 * hbq buffer to the firmware. If the function finds an empty
2014 * slot in the HBQ, it will post the buffer. The function will return
2015 * pointer to the hbq entry if it successfully post the buffer
2016 * else it will return NULL.
2017 **/
3772a991 2018static int
ed957684 2019lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 2020 struct hbq_dmabuf *hbq_buf)
3772a991 2021{
1c2ba475 2022 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2023 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2024}
2025
2026/**
2027 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2028 * @phba: Pointer to HBA context object.
2029 * @hbqno: HBQ number.
2030 * @hbq_buf: Pointer to HBQ buffer.
2031 *
2032 * This function is called with the hbalock held to post a hbq buffer to the
2033 * firmware. If the function finds an empty slot in the HBQ, it will post the
2034 * buffer and place it on the hbq_buffer_list. The function will return zero if
2035 * it successfully post the buffer else it will return an error.
2036 **/
2037static int
2038lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2039 struct hbq_dmabuf *hbq_buf)
ed957684
JS
2040{
2041 struct lpfc_hbq_entry *hbqe;
92d7f7b0 2042 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 2043
1c2ba475 2044 lockdep_assert_held(&phba->hbalock);
ed957684
JS
2045 /* Get next HBQ entry slot to use */
2046 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2047 if (hbqe) {
2048 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2049
92d7f7b0
JS
2050 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2051 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 2052 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 2053 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
2054 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2055 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2056 /* Sync SLIM */
ed957684
JS
2057 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2058 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 2059 /* flush */
ed957684 2060 readl(phba->hbq_put + hbqno);
51ef4c26 2061 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
2062 return 0;
2063 } else
2064 return -ENOMEM;
ed957684
JS
2065}
2066
4f774513
JS
2067/**
2068 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2069 * @phba: Pointer to HBA context object.
2070 * @hbqno: HBQ number.
2071 * @hbq_buf: Pointer to HBQ buffer.
2072 *
2073 * This function is called with the hbalock held to post an RQE to the SLI4
2074 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2075 * the hbq_buffer_list and return zero, otherwise it will return an error.
2076 **/
2077static int
2078lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2079 struct hbq_dmabuf *hbq_buf)
2080{
2081 int rc;
2082 struct lpfc_rqe hrqe;
2083 struct lpfc_rqe drqe;
895427bd
JS
2084 struct lpfc_queue *hrq;
2085 struct lpfc_queue *drq;
2086
2087 if (hbqno != LPFC_ELS_HBQ)
2088 return 1;
2089 hrq = phba->sli4_hba.hdr_rq;
2090 drq = phba->sli4_hba.dat_rq;
4f774513 2091
1c2ba475 2092 lockdep_assert_held(&phba->hbalock);
4f774513
JS
2093 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2094 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2095 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2096 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 2097 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
2098 if (rc < 0)
2099 return rc;
895427bd 2100 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
2101 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2102 return 0;
2103}
2104
e59058c4 2105/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
2106static struct lpfc_hbq_init lpfc_els_hbq = {
2107 .rn = 1,
def9c7a9 2108 .entry_count = 256,
92d7f7b0
JS
2109 .mask_count = 0,
2110 .profile = 0,
51ef4c26 2111 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 2112 .buffer_count = 0,
a257bf90
JS
2113 .init_count = 40,
2114 .add_count = 40,
92d7f7b0 2115};
ed957684 2116
e59058c4 2117/* Array of HBQs */
78b2d852 2118struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
2119 &lpfc_els_hbq,
2120};
ed957684 2121
e59058c4 2122/**
3621a710 2123 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
2124 * @phba: Pointer to HBA context object.
2125 * @hbqno: HBQ number.
2126 * @count: Number of HBQ buffers to be posted.
2127 *
d7c255b2
JS
2128 * This function is called with no lock held to post more hbq buffers to the
2129 * given HBQ. The function returns the number of HBQ buffers successfully
2130 * posted.
e59058c4 2131 **/
311464ec 2132static int
92d7f7b0 2133lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 2134{
d7c255b2 2135 uint32_t i, posted = 0;
3163f725 2136 unsigned long flags;
92d7f7b0 2137 struct hbq_dmabuf *hbq_buffer;
d7c255b2 2138 LIST_HEAD(hbq_buf_list);
eafe1df9 2139 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 2140 return 0;
51ef4c26 2141
d7c255b2
JS
2142 if ((phba->hbqs[hbqno].buffer_count + count) >
2143 lpfc_hbq_defs[hbqno]->entry_count)
2144 count = lpfc_hbq_defs[hbqno]->entry_count -
2145 phba->hbqs[hbqno].buffer_count;
2146 if (!count)
2147 return 0;
2148 /* Allocate HBQ entries */
2149 for (i = 0; i < count; i++) {
2150 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2151 if (!hbq_buffer)
2152 break;
2153 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2154 }
3163f725
JS
2155 /* Check whether HBQ is still in use */
2156 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 2157 if (!phba->hbq_in_use)
d7c255b2
JS
2158 goto err;
2159 while (!list_empty(&hbq_buf_list)) {
2160 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2161 dbuf.list);
2162 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2163 (hbqno << 16));
3772a991 2164 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 2165 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
2166 posted++;
2167 } else
51ef4c26 2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 2169 }
3163f725 2170 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2171 return posted;
2172err:
eafe1df9 2173 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2174 while (!list_empty(&hbq_buf_list)) {
2175 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2176 dbuf.list);
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178 }
2179 return 0;
ed957684
JS
2180}
2181
e59058c4 2182/**
3621a710 2183 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
2184 * @phba: Pointer to HBA context object.
2185 * @qno: HBQ number.
2186 *
2187 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
2188 * is called with no lock held. The function returns the number of HBQ entries
2189 * successfully allocated.
e59058c4 2190 **/
92d7f7b0
JS
2191int
2192lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 2193{
def9c7a9
JS
2194 if (phba->sli_rev == LPFC_SLI_REV4)
2195 return 0;
2196 else
2197 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2198 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 2199}
ed957684 2200
e59058c4 2201/**
3621a710 2202 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
2203 * @phba: Pointer to HBA context object.
2204 * @qno: HBQ queue number.
2205 *
2206 * This function is called from SLI initialization code path with
2207 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2208 * function returns the number of HBQ entries successfully allocated.
e59058c4 2209 **/
a6ababd2 2210static int
92d7f7b0
JS
2211lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2212{
def9c7a9
JS
2213 if (phba->sli_rev == LPFC_SLI_REV4)
2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2215 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2216 else
2217 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2218 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2219}
2220
3772a991
JS
2221/**
2222 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2223 * @phba: Pointer to HBA context object.
2224 * @hbqno: HBQ number.
2225 *
2226 * This function removes the first hbq buffer on an hbq list and returns a
2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2228 **/
2229static struct hbq_dmabuf *
2230lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2231{
2232 struct lpfc_dmabuf *d_buf;
2233
2234 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2235 if (!d_buf)
2236 return NULL;
2237 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2238}
2239
2d7dbc4c
JS
2240/**
2241 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2242 * @phba: Pointer to HBA context object.
2243 * @hbqno: HBQ number.
2244 *
2245 * This function removes the first RQ buffer on an RQ buffer list and returns a
2246 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2247 **/
2248static struct rqb_dmabuf *
2249lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2250{
2251 struct lpfc_dmabuf *h_buf;
2252 struct lpfc_rqb *rqbp;
2253
2254 rqbp = hrq->rqbp;
2255 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2256 struct lpfc_dmabuf, list);
2257 if (!h_buf)
2258 return NULL;
2259 rqbp->buffer_count--;
2260 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2261}
2262
e59058c4 2263/**
3621a710 2264 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2265 * @phba: Pointer to HBA context object.
2266 * @tag: Tag of the hbq buffer.
2267 *
71892418
SH
2268 * This function searches for the hbq buffer associated with the given tag in
2269 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2270 * otherwise it returns NULL.
e59058c4 2271 **/
a6ababd2 2272static struct hbq_dmabuf *
92d7f7b0 2273lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2274{
92d7f7b0
JS
2275 struct lpfc_dmabuf *d_buf;
2276 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2277 uint32_t hbqno;
2278
2279 hbqno = tag >> 16;
a0a74e45 2280 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2281 return NULL;
ed957684 2282
3772a991 2283 spin_lock_irq(&phba->hbalock);
51ef4c26 2284 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2285 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2286 if (hbq_buf->tag == tag) {
3772a991 2287 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2288 return hbq_buf;
ed957684
JS
2289 }
2290 }
3772a991 2291 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2293 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2294 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2295 return NULL;
ed957684
JS
2296}
2297
e59058c4 2298/**
3621a710 2299 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2300 * @phba: Pointer to HBA context object.
2301 * @hbq_buffer: Pointer to HBQ buffer.
2302 *
2303 * This function is called with hbalock. This function gives back
2304 * the hbq buffer to firmware. If the HBQ does not have space to
2305 * post the buffer, it will free the buffer.
2306 **/
ed957684 2307void
51ef4c26 2308lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2309{
2310 uint32_t hbqno;
2311
51ef4c26
JS
2312 if (hbq_buffer) {
2313 hbqno = hbq_buffer->tag >> 16;
3772a991 2314 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2315 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2316 }
2317}
2318
e59058c4 2319/**
3621a710 2320 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2321 * @mbxCommand: mailbox command code.
2322 *
2323 * This function is called by the mailbox event handler function to verify
2324 * that the completed mailbox command is a legitimate mailbox command. If the
2325 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2326 * and the mailbox event handler will take the HBA offline.
2327 **/
dea3101e 2328static int
2329lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2330{
2331 uint8_t ret;
2332
2333 switch (mbxCommand) {
2334 case MBX_LOAD_SM:
2335 case MBX_READ_NV:
2336 case MBX_WRITE_NV:
a8adb832 2337 case MBX_WRITE_VPARMS:
dea3101e 2338 case MBX_RUN_BIU_DIAG:
2339 case MBX_INIT_LINK:
2340 case MBX_DOWN_LINK:
2341 case MBX_CONFIG_LINK:
2342 case MBX_CONFIG_RING:
2343 case MBX_RESET_RING:
2344 case MBX_READ_CONFIG:
2345 case MBX_READ_RCONFIG:
2346 case MBX_READ_SPARM:
2347 case MBX_READ_STATUS:
2348 case MBX_READ_RPI:
2349 case MBX_READ_XRI:
2350 case MBX_READ_REV:
2351 case MBX_READ_LNK_STAT:
2352 case MBX_REG_LOGIN:
2353 case MBX_UNREG_LOGIN:
dea3101e 2354 case MBX_CLEAR_LA:
2355 case MBX_DUMP_MEMORY:
2356 case MBX_DUMP_CONTEXT:
2357 case MBX_RUN_DIAGS:
2358 case MBX_RESTART:
2359 case MBX_UPDATE_CFG:
2360 case MBX_DOWN_LOAD:
2361 case MBX_DEL_LD_ENTRY:
2362 case MBX_RUN_PROGRAM:
2363 case MBX_SET_MASK:
09372820 2364 case MBX_SET_VARIABLE:
dea3101e 2365 case MBX_UNREG_D_ID:
41415862 2366 case MBX_KILL_BOARD:
dea3101e 2367 case MBX_CONFIG_FARP:
41415862 2368 case MBX_BEACON:
dea3101e 2369 case MBX_LOAD_AREA:
2370 case MBX_RUN_BIU_DIAG64:
2371 case MBX_CONFIG_PORT:
2372 case MBX_READ_SPARM64:
2373 case MBX_READ_RPI64:
2374 case MBX_REG_LOGIN64:
76a95d75 2375 case MBX_READ_TOPOLOGY:
09372820 2376 case MBX_WRITE_WWN:
dea3101e 2377 case MBX_SET_DEBUG:
2378 case MBX_LOAD_EXP_ROM:
57127f15 2379 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2380 case MBX_REG_VPI:
2381 case MBX_UNREG_VPI:
858c9f6c 2382 case MBX_HEARTBEAT:
84774a4d
JS
2383 case MBX_PORT_CAPABILITIES:
2384 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2385 case MBX_SLI4_CONFIG:
2386 case MBX_SLI4_REQ_FTRS:
2387 case MBX_REG_FCFI:
2388 case MBX_UNREG_FCFI:
2389 case MBX_REG_VFI:
2390 case MBX_UNREG_VFI:
2391 case MBX_INIT_VPI:
2392 case MBX_INIT_VFI:
2393 case MBX_RESUME_RPI:
c7495937
JS
2394 case MBX_READ_EVENT_LOG_STATUS:
2395 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2396 case MBX_SECURITY_MGMT:
2397 case MBX_AUTH_PORT:
940eb687 2398 case MBX_ACCESS_VDATA:
dea3101e 2399 ret = mbxCommand;
2400 break;
2401 default:
2402 ret = MBX_SHUTDOWN;
2403 break;
2404 }
2e0fef85 2405 return ret;
dea3101e 2406}
e59058c4
JS
2407
2408/**
3621a710 2409 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2410 * @phba: Pointer to HBA context object.
2411 * @pmboxq: Pointer to mailbox command.
2412 *
2413 * This is completion handler function for mailbox commands issued from
2414 * lpfc_sli_issue_mbox_wait function. This function is called by the
2415 * mailbox event handler function with no lock held. This function
2416 * will wake up thread waiting on the wait queue pointed by context1
2417 * of the mailbox.
2418 **/
04c68496 2419void
2e0fef85 2420lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2421{
858c9f6c 2422 unsigned long drvr_flag;
e29d74f8 2423 struct completion *pmbox_done;
dea3101e 2424
2425 /*
e29d74f8 2426 * If pmbox_done is empty, the driver thread gave up waiting and
dea3101e 2427 * continued running.
2428 */
7054a606 2429 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2430 spin_lock_irqsave(&phba->hbalock, drvr_flag);
e29d74f8
JS
2431 pmbox_done = (struct completion *)pmboxq->context3;
2432 if (pmbox_done)
2433 complete(pmbox_done);
858c9f6c 2434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2435 return;
2436}
2437
e59058c4
JS
2438
2439/**
3621a710 2440 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2441 * @phba: Pointer to HBA context object.
2442 * @pmb: Pointer to mailbox object.
2443 *
2444 * This function is the default mailbox completion handler. It
2445 * frees the memory resources associated with the completed mailbox
2446 * command. If the completed command is a REG_LOGIN mailbox command,
2447 * this function will issue a UREG_LOGIN to re-claim the RPI.
2448 **/
dea3101e 2449void
2e0fef85 2450lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2451{
d439d286 2452 struct lpfc_vport *vport = pmb->vport;
dea3101e 2453 struct lpfc_dmabuf *mp;
d439d286 2454 struct lpfc_nodelist *ndlp;
5af5eee7 2455 struct Scsi_Host *shost;
04c68496 2456 uint16_t rpi, vpi;
7054a606
JS
2457 int rc;
2458
3e1f0718 2459 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
7054a606 2460
dea3101e 2461 if (mp) {
2462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2463 kfree(mp);
2464 }
7054a606
JS
2465
2466 /*
2467 * If a REG_LOGIN succeeded after node is destroyed or node
2468 * is in re-discovery driver need to cleanup the RPI.
2469 */
2e0fef85 2470 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2471 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2472 !pmb->u.mb.mbxStatus) {
2473 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2474 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2475 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2476 pmb->vport = vport;
92d7f7b0 2477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2479 if (rc != MBX_NOT_FINISHED)
2480 return;
2481 }
2482
695a814e
JS
2483 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2484 !(phba->pport->load_flag & FC_UNLOADING) &&
2485 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2486 shost = lpfc_shost_from_vport(vport);
2487 spin_lock_irq(shost->host_lock);
2488 vport->vpi_state |= LPFC_VPI_REGISTERED;
2489 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2490 spin_unlock_irq(shost->host_lock);
695a814e
JS
2491 }
2492
d439d286 2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 2494 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
d439d286 2495 lpfc_nlp_put(ndlp);
dea16bda
JS
2496 pmb->ctx_buf = NULL;
2497 pmb->ctx_ndlp = NULL;
2498 }
2499
2500 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2501 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2502
2503 /* Check to see if there are any deferred events to process */
2504 if (ndlp) {
2505 lpfc_printf_vlog(
2506 vport,
2507 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2508 "1438 UNREG cmpl deferred mbox x%x "
2509 "on NPort x%x Data: x%x x%x %p\n",
2510 ndlp->nlp_rpi, ndlp->nlp_DID,
2511 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2512
2513 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2514 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2515 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2516 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2517 }
2518 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2519 }
3e1f0718 2520 pmb->ctx_ndlp = NULL;
d439d286
JS
2521 }
2522
dcf2a4e0
JS
2523 /* Check security permission status on INIT_LINK mailbox command */
2524 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2525 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2526 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2527 "2860 SLI authentication is required "
2528 "for INIT_LINK but has not done yet\n");
2529
04c68496
JS
2530 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2531 lpfc_sli4_mbox_cmd_free(phba, pmb);
2532 else
2533 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2534}
be6bb941
JS
2535 /**
2536 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2537 * @phba: Pointer to HBA context object.
2538 * @pmb: Pointer to mailbox object.
2539 *
2540 * This function is the unreg rpi mailbox completion handler. It
2541 * frees the memory resources associated with the completed mailbox
2542 * command. An additional refrenece is put on the ndlp to prevent
2543 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2544 * the unreg mailbox command completes, this routine puts the
2545 * reference back.
2546 *
2547 **/
2548void
2549lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2550{
2551 struct lpfc_vport *vport = pmb->vport;
2552 struct lpfc_nodelist *ndlp;
2553
3e1f0718 2554 ndlp = pmb->ctx_ndlp;
be6bb941
JS
2555 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2556 if (phba->sli_rev == LPFC_SLI_REV4 &&
2557 (bf_get(lpfc_sli_intf_if_type,
27d6ac0a 2558 &phba->sli4_hba.sli_intf) >=
be6bb941
JS
2559 LPFC_SLI_INTF_IF_TYPE_2)) {
2560 if (ndlp) {
dea16bda
JS
2561 lpfc_printf_vlog(
2562 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2563 "0010 UNREG_LOGIN vpi:%x "
2564 "rpi:%x DID:%x defer x%x flg x%x "
2565 "map:%x %p\n",
2566 vport->vpi, ndlp->nlp_rpi,
2567 ndlp->nlp_DID, ndlp->nlp_defer_did,
2568 ndlp->nlp_flag,
2569 ndlp->nlp_usg_map, ndlp);
7c5e518c 2570 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941 2571 lpfc_nlp_put(ndlp);
dea16bda
JS
2572
2573 /* Check to see if there are any deferred
2574 * events to process
2575 */
2576 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2577 (ndlp->nlp_defer_did !=
2578 NLP_EVT_NOTHING_PENDING)) {
2579 lpfc_printf_vlog(
2580 vport, KERN_INFO, LOG_DISCOVERY,
2581 "4111 UNREG cmpl deferred "
2582 "clr x%x on "
2583 "NPort x%x Data: x%x %p\n",
2584 ndlp->nlp_rpi, ndlp->nlp_DID,
2585 ndlp->nlp_defer_did, ndlp);
2586 ndlp->nlp_defer_did =
2587 NLP_EVT_NOTHING_PENDING;
2588 lpfc_issue_els_plogi(
2589 vport, ndlp->nlp_DID, 0);
2590 }
2591 ndlp->nlp_flag &= ~NLP_UNREG_INP;
be6bb941
JS
2592 }
2593 }
2594 }
2595
2596 mempool_free(pmb, phba->mbox_mem_pool);
2597}
dea3101e 2598
e59058c4 2599/**
3621a710 2600 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2601 * @phba: Pointer to HBA context object.
2602 *
2603 * This function is called with no lock held. This function processes all
2604 * the completed mailbox commands and gives it to upper layers. The interrupt
2605 * service routine processes mailbox completion interrupt and adds completed
2606 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2607 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2608 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2609 * function returns the mailbox commands to the upper layer by calling the
2610 * completion handler function of each mailbox.
2611 **/
dea3101e 2612int
2e0fef85 2613lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2614{
92d7f7b0 2615 MAILBOX_t *pmbox;
dea3101e 2616 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2617 int rc;
2618 LIST_HEAD(cmplq);
dea3101e 2619
2620 phba->sli.slistat.mbox_event++;
2621
92d7f7b0
JS
2622 /* Get all completed mailboxe buffers into the cmplq */
2623 spin_lock_irq(&phba->hbalock);
2624 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2625 spin_unlock_irq(&phba->hbalock);
dea3101e 2626
92d7f7b0
JS
2627 /* Get a Mailbox buffer to setup mailbox commands for callback */
2628 do {
2629 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2630 if (pmb == NULL)
2631 break;
2e0fef85 2632
04c68496 2633 pmbox = &pmb->u.mb;
dea3101e 2634
858c9f6c
JS
2635 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2636 if (pmb->vport) {
2637 lpfc_debugfs_disc_trc(pmb->vport,
2638 LPFC_DISC_TRC_MBOX_VPORT,
2639 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2640 (uint32_t)pmbox->mbxCommand,
2641 pmbox->un.varWords[0],
2642 pmbox->un.varWords[1]);
2643 }
2644 else {
2645 lpfc_debugfs_disc_trc(phba->pport,
2646 LPFC_DISC_TRC_MBOX,
2647 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2648 (uint32_t)pmbox->mbxCommand,
2649 pmbox->un.varWords[0],
2650 pmbox->un.varWords[1]);
2651 }
2652 }
2653
dea3101e 2654 /*
2655 * It is a fatal error if unknown mbox command completion.
2656 */
2657 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2658 MBX_SHUTDOWN) {
af901ca1 2659 /* Unknown mailbox command compl */
92d7f7b0 2660 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2661 "(%d):0323 Unknown Mailbox command "
a183a15f 2662 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2663 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2664 pmbox->mbxCommand,
a183a15f
JS
2665 lpfc_sli_config_mbox_subsys_get(phba,
2666 pmb),
2667 lpfc_sli_config_mbox_opcode_get(phba,
2668 pmb));
2e0fef85 2669 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2670 phba->work_hs = HS_FFER3;
2671 lpfc_handle_eratt(phba);
92d7f7b0 2672 continue;
dea3101e 2673 }
2674
dea3101e 2675 if (pmbox->mbxStatus) {
2676 phba->sli.slistat.mbox_stat_err++;
2677 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2678 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2679 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2680 LOG_MBOX | LOG_SLI,
2681 "(%d):0305 Mbox cmd cmpl "
2682 "error - RETRYing Data: x%x "
2683 "(x%x/x%x) x%x x%x x%x\n",
2684 pmb->vport ? pmb->vport->vpi : 0,
2685 pmbox->mbxCommand,
2686 lpfc_sli_config_mbox_subsys_get(phba,
2687 pmb),
2688 lpfc_sli_config_mbox_opcode_get(phba,
2689 pmb),
2690 pmbox->mbxStatus,
2691 pmbox->un.varWords[0],
2692 pmb->vport->port_state);
dea3101e 2693 pmbox->mbxStatus = 0;
2694 pmbox->mbxOwner = OWN_HOST;
dea3101e 2695 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2696 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2697 continue;
dea3101e 2698 }
2699 }
2700
2701 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2702 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2703 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
e74c03c8
JS
2704 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2705 "x%x x%x x%x\n",
92d7f7b0 2706 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2707 pmbox->mbxCommand,
a183a15f
JS
2708 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2709 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2710 pmb->mbox_cmpl,
2711 *((uint32_t *) pmbox),
2712 pmbox->un.varWords[0],
2713 pmbox->un.varWords[1],
2714 pmbox->un.varWords[2],
2715 pmbox->un.varWords[3],
2716 pmbox->un.varWords[4],
2717 pmbox->un.varWords[5],
2718 pmbox->un.varWords[6],
e74c03c8
JS
2719 pmbox->un.varWords[7],
2720 pmbox->un.varWords[8],
2721 pmbox->un.varWords[9],
2722 pmbox->un.varWords[10]);
dea3101e 2723
92d7f7b0 2724 if (pmb->mbox_cmpl)
dea3101e 2725 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2726 } while (1);
2727 return 0;
2728}
dea3101e 2729
e59058c4 2730/**
3621a710 2731 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2732 * @phba: Pointer to HBA context object.
2733 * @pring: Pointer to driver SLI ring object.
2734 * @tag: buffer tag.
2735 *
2736 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2737 * is set in the tag the buffer is posted for a particular exchange,
2738 * the function will return the buffer without replacing the buffer.
2739 * If the buffer is for unsolicited ELS or CT traffic, this function
2740 * returns the buffer and also posts another buffer to the firmware.
2741 **/
76bb24ef
JS
2742static struct lpfc_dmabuf *
2743lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2744 struct lpfc_sli_ring *pring,
2745 uint32_t tag)
76bb24ef 2746{
9f1e1b50
JS
2747 struct hbq_dmabuf *hbq_entry;
2748
76bb24ef
JS
2749 if (tag & QUE_BUFTAG_BIT)
2750 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2751 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2752 if (!hbq_entry)
2753 return NULL;
2754 return &hbq_entry->dbuf;
76bb24ef 2755}
57127f15 2756
3772a991
JS
2757/**
2758 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2759 * @phba: Pointer to HBA context object.
2760 * @pring: Pointer to driver SLI ring object.
2761 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2762 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2763 * @fch_type: the type for the first frame of the sequence.
2764 *
2765 * This function is called with no lock held. This function uses the r_ctl and
2766 * type of the received sequence to find the correct callback function to call
2767 * to process the sequence.
2768 **/
2769static int
2770lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2771 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2772 uint32_t fch_type)
2773{
2774 int i;
2775
f358dd0c
JS
2776 switch (fch_type) {
2777 case FC_TYPE_NVME:
d613b6a7 2778 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
f358dd0c
JS
2779 return 1;
2780 default:
2781 break;
2782 }
2783
3772a991
JS
2784 /* unSolicited Responses */
2785 if (pring->prt[0].profile) {
2786 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2787 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2788 saveq);
2789 return 1;
2790 }
2791 /* We must search, based on rctl / type
2792 for the right routine */
2793 for (i = 0; i < pring->num_mask; i++) {
2794 if ((pring->prt[i].rctl == fch_r_ctl) &&
2795 (pring->prt[i].type == fch_type)) {
2796 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2797 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2798 (phba, pring, saveq);
2799 return 1;
2800 }
2801 }
2802 return 0;
2803}
e59058c4
JS
2804
2805/**
3621a710 2806 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2807 * @phba: Pointer to HBA context object.
2808 * @pring: Pointer to driver SLI ring object.
2809 * @saveq: Pointer to the unsolicited iocb.
2810 *
2811 * This function is called with no lock held by the ring event handler
2812 * when there is an unsolicited iocb posted to the response ring by the
2813 * firmware. This function gets the buffer associated with the iocbs
2814 * and calls the event handler for the ring. This function handles both
2815 * qring buffers and hbq buffers.
2816 * When the function returns 1 the caller can free the iocb object otherwise
2817 * upper layer functions will free the iocb objects.
2818 **/
dea3101e 2819static int
2820lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2821 struct lpfc_iocbq *saveq)
2822{
2823 IOCB_t * irsp;
2824 WORD5 * w5p;
2825 uint32_t Rctl, Type;
76bb24ef 2826 struct lpfc_iocbq *iocbq;
3163f725 2827 struct lpfc_dmabuf *dmzbuf;
dea3101e 2828
dea3101e 2829 irsp = &(saveq->iocb);
57127f15
JS
2830
2831 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2832 if (pring->lpfc_sli_rcv_async_status)
2833 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2834 else
2835 lpfc_printf_log(phba,
2836 KERN_WARNING,
2837 LOG_SLI,
2838 "0316 Ring %d handler: unexpected "
2839 "ASYNC_STATUS iocb received evt_code "
2840 "0x%x\n",
2841 pring->ringno,
2842 irsp->un.asyncstat.evt_code);
2843 return 1;
2844 }
2845
3163f725
JS
2846 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2847 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2848 if (irsp->ulpBdeCount > 0) {
2849 dmzbuf = lpfc_sli_get_buff(phba, pring,
2850 irsp->un.ulpWord[3]);
2851 lpfc_in_buf_free(phba, dmzbuf);
2852 }
2853
2854 if (irsp->ulpBdeCount > 1) {
2855 dmzbuf = lpfc_sli_get_buff(phba, pring,
2856 irsp->unsli3.sli3Words[3]);
2857 lpfc_in_buf_free(phba, dmzbuf);
2858 }
2859
2860 if (irsp->ulpBdeCount > 2) {
2861 dmzbuf = lpfc_sli_get_buff(phba, pring,
2862 irsp->unsli3.sli3Words[7]);
2863 lpfc_in_buf_free(phba, dmzbuf);
2864 }
2865
2866 return 1;
2867 }
2868
92d7f7b0 2869 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2870 if (irsp->ulpBdeCount != 0) {
2871 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2872 irsp->un.ulpWord[3]);
2873 if (!saveq->context2)
2874 lpfc_printf_log(phba,
2875 KERN_ERR,
2876 LOG_SLI,
2877 "0341 Ring %d Cannot find buffer for "
2878 "an unsolicited iocb. tag 0x%x\n",
2879 pring->ringno,
2880 irsp->un.ulpWord[3]);
76bb24ef
JS
2881 }
2882 if (irsp->ulpBdeCount == 2) {
2883 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2884 irsp->unsli3.sli3Words[7]);
2885 if (!saveq->context3)
2886 lpfc_printf_log(phba,
2887 KERN_ERR,
2888 LOG_SLI,
2889 "0342 Ring %d Cannot find buffer for an"
2890 " unsolicited iocb. tag 0x%x\n",
2891 pring->ringno,
2892 irsp->unsli3.sli3Words[7]);
2893 }
2894 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2895 irsp = &(iocbq->iocb);
76bb24ef
JS
2896 if (irsp->ulpBdeCount != 0) {
2897 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2898 irsp->un.ulpWord[3]);
9c2face6 2899 if (!iocbq->context2)
76bb24ef
JS
2900 lpfc_printf_log(phba,
2901 KERN_ERR,
2902 LOG_SLI,
2903 "0343 Ring %d Cannot find "
2904 "buffer for an unsolicited iocb"
2905 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2906 irsp->un.ulpWord[3]);
76bb24ef
JS
2907 }
2908 if (irsp->ulpBdeCount == 2) {
2909 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2910 irsp->unsli3.sli3Words[7]);
9c2face6 2911 if (!iocbq->context3)
76bb24ef
JS
2912 lpfc_printf_log(phba,
2913 KERN_ERR,
2914 LOG_SLI,
2915 "0344 Ring %d Cannot find "
2916 "buffer for an unsolicited "
2917 "iocb. tag 0x%x\n",
2918 pring->ringno,
2919 irsp->unsli3.sli3Words[7]);
2920 }
2921 }
92d7f7b0 2922 }
9c2face6
JS
2923 if (irsp->ulpBdeCount != 0 &&
2924 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2925 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2926 int found = 0;
2927
2928 /* search continue save q for same XRI */
2929 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2930 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2931 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2932 list_add_tail(&saveq->list, &iocbq->list);
2933 found = 1;
2934 break;
2935 }
2936 }
2937 if (!found)
2938 list_add_tail(&saveq->clist,
2939 &pring->iocb_continue_saveq);
2940 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2941 list_del_init(&iocbq->clist);
2942 saveq = iocbq;
2943 irsp = &(saveq->iocb);
2944 } else
2945 return 0;
2946 }
2947 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2948 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2949 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2950 Rctl = FC_RCTL_ELS_REQ;
2951 Type = FC_TYPE_ELS;
9c2face6
JS
2952 } else {
2953 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2954 Rctl = w5p->hcsw.Rctl;
2955 Type = w5p->hcsw.Type;
2956
2957 /* Firmware Workaround */
2958 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2959 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2960 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2961 Rctl = FC_RCTL_ELS_REQ;
2962 Type = FC_TYPE_ELS;
9c2face6
JS
2963 w5p->hcsw.Rctl = Rctl;
2964 w5p->hcsw.Type = Type;
2965 }
2966 }
92d7f7b0 2967
3772a991 2968 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2969 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2970 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2971 "Type x%x received\n",
e8b62011 2972 pring->ringno, Rctl, Type);
3772a991 2973
92d7f7b0 2974 return 1;
dea3101e 2975}
2976
e59058c4 2977/**
3621a710 2978 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2979 * @phba: Pointer to HBA context object.
2980 * @pring: Pointer to driver SLI ring object.
2981 * @prspiocb: Pointer to response iocb object.
2982 *
2983 * This function looks up the iocb_lookup table to get the command iocb
2984 * corresponding to the given response iocb using the iotag of the
341b2aa8
DK
2985 * response iocb. This function is called with the hbalock held
2986 * for sli3 devices or the ring_lock for sli4 devices.
e59058c4
JS
2987 * This function returns the command iocb object if it finds the command
2988 * iocb else returns NULL.
2989 **/
dea3101e 2990static struct lpfc_iocbq *
2e0fef85
JS
2991lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2992 struct lpfc_sli_ring *pring,
2993 struct lpfc_iocbq *prspiocb)
dea3101e 2994{
dea3101e 2995 struct lpfc_iocbq *cmd_iocb = NULL;
2996 uint16_t iotag;
1c2ba475 2997 lockdep_assert_held(&phba->hbalock);
dea3101e 2998
604a3e30
JB
2999 iotag = prspiocb->iocb.ulpIoTag;
3000
3001 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3002 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 3003 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
3004 /* remove from txcmpl queue list */
3005 list_del_init(&cmd_iocb->list);
4f2e66c6 3006 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
89533e9b 3007 return cmd_iocb;
2a9bf3d0 3008 }
dea3101e 3009 }
3010
dea3101e 3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 3012 "0317 iotag x%x is out of "
604a3e30 3013 "range: max iotag x%x wd0 x%x\n",
e8b62011 3014 iotag, phba->sli.last_iotag,
604a3e30 3015 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 3016 return NULL;
3017}
3018
3772a991
JS
3019/**
3020 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3021 * @phba: Pointer to HBA context object.
3022 * @pring: Pointer to driver SLI ring object.
3023 * @iotag: IOCB tag.
3024 *
3025 * This function looks up the iocb_lookup table to get the command iocb
3026 * corresponding to the given iotag. This function is called with the
3027 * hbalock held.
3028 * This function returns the command iocb object if it finds the command
3029 * iocb else returns NULL.
3030 **/
3031static struct lpfc_iocbq *
3032lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3033 struct lpfc_sli_ring *pring, uint16_t iotag)
3034{
895427bd 3035 struct lpfc_iocbq *cmd_iocb = NULL;
3772a991 3036
1c2ba475 3037 lockdep_assert_held(&phba->hbalock);
3772a991
JS
3038 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3039 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
3040 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3041 /* remove from txcmpl queue list */
3042 list_del_init(&cmd_iocb->list);
3043 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4f2e66c6 3044 return cmd_iocb;
2a9bf3d0 3045 }
3772a991 3046 }
89533e9b 3047
3772a991 3048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
3049 "0372 iotag x%x lookup error: max iotag (x%x) "
3050 "iocb_flag x%x\n",
3051 iotag, phba->sli.last_iotag,
3052 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
3053 return NULL;
3054}
3055
e59058c4 3056/**
3621a710 3057 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
3058 * @phba: Pointer to HBA context object.
3059 * @pring: Pointer to driver SLI ring object.
3060 * @saveq: Pointer to the response iocb to be processed.
3061 *
3062 * This function is called by the ring event handler for non-fcp
3063 * rings when there is a new response iocb in the response ring.
3064 * The caller is not required to hold any locks. This function
3065 * gets the command iocb associated with the response iocb and
3066 * calls the completion handler for the command iocb. If there
3067 * is no completion handler, the function will free the resources
3068 * associated with command iocb. If the response iocb is for
3069 * an already aborted command iocb, the status of the completion
3070 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3071 * This function always returns 1.
3072 **/
dea3101e 3073static int
2e0fef85 3074lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 3075 struct lpfc_iocbq *saveq)
3076{
2e0fef85 3077 struct lpfc_iocbq *cmdiocbp;
dea3101e 3078 int rc = 1;
3079 unsigned long iflag;
3080
3081 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
341b2aa8
DK
3082 if (phba->sli_rev == LPFC_SLI_REV4)
3083 spin_lock_irqsave(&pring->ring_lock, iflag);
3084 else
3085 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 3086 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
341b2aa8
DK
3087 if (phba->sli_rev == LPFC_SLI_REV4)
3088 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3089 else
3090 spin_unlock_irqrestore(&phba->hbalock, iflag);
2e0fef85 3091
dea3101e 3092 if (cmdiocbp) {
3093 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
3094 /*
3095 * If an ELS command failed send an event to mgmt
3096 * application.
3097 */
3098 if (saveq->iocb.ulpStatus &&
3099 (pring->ringno == LPFC_ELS_RING) &&
3100 (cmdiocbp->iocb.ulpCommand ==
3101 CMD_ELS_REQUEST64_CR))
3102 lpfc_send_els_failure_event(phba,
3103 cmdiocbp, saveq);
3104
dea3101e 3105 /*
3106 * Post all ELS completions to the worker thread.
3107 * All other are passed to the completion callback.
3108 */
3109 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
3110 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3111 (cmdiocbp->iocb_flag &
3112 LPFC_DRIVER_ABORTED)) {
3113 spin_lock_irqsave(&phba->hbalock,
3114 iflag);
07951076
JS
3115 cmdiocbp->iocb_flag &=
3116 ~LPFC_DRIVER_ABORTED;
341af102
JS
3117 spin_unlock_irqrestore(&phba->hbalock,
3118 iflag);
07951076
JS
3119 saveq->iocb.ulpStatus =
3120 IOSTAT_LOCAL_REJECT;
3121 saveq->iocb.un.ulpWord[4] =
3122 IOERR_SLI_ABORTED;
0ff10d46
JS
3123
3124 /* Firmware could still be in progress
3125 * of DMAing payload, so don't free data
3126 * buffer till after a hbeat.
3127 */
341af102
JS
3128 spin_lock_irqsave(&phba->hbalock,
3129 iflag);
0ff10d46 3130 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
3131 spin_unlock_irqrestore(&phba->hbalock,
3132 iflag);
3133 }
0f65ff68
JS
3134 if (phba->sli_rev == LPFC_SLI_REV4) {
3135 if (saveq->iocb_flag &
3136 LPFC_EXCHANGE_BUSY) {
3137 /* Set cmdiocb flag for the
3138 * exchange busy so sgl (xri)
3139 * will not be released until
3140 * the abort xri is received
3141 * from hba.
3142 */
3143 spin_lock_irqsave(
3144 &phba->hbalock, iflag);
3145 cmdiocbp->iocb_flag |=
3146 LPFC_EXCHANGE_BUSY;
3147 spin_unlock_irqrestore(
3148 &phba->hbalock, iflag);
3149 }
3150 if (cmdiocbp->iocb_flag &
3151 LPFC_DRIVER_ABORTED) {
3152 /*
3153 * Clear LPFC_DRIVER_ABORTED
3154 * bit in case it was driver
3155 * initiated abort.
3156 */
3157 spin_lock_irqsave(
3158 &phba->hbalock, iflag);
3159 cmdiocbp->iocb_flag &=
3160 ~LPFC_DRIVER_ABORTED;
3161 spin_unlock_irqrestore(
3162 &phba->hbalock, iflag);
3163 cmdiocbp->iocb.ulpStatus =
3164 IOSTAT_LOCAL_REJECT;
3165 cmdiocbp->iocb.un.ulpWord[4] =
3166 IOERR_ABORT_REQUESTED;
3167 /*
3168 * For SLI4, irsiocb contains
3169 * NO_XRI in sli_xritag, it
3170 * shall not affect releasing
3171 * sgl (xri) process.
3172 */
3173 saveq->iocb.ulpStatus =
3174 IOSTAT_LOCAL_REJECT;
3175 saveq->iocb.un.ulpWord[4] =
3176 IOERR_SLI_ABORTED;
3177 spin_lock_irqsave(
3178 &phba->hbalock, iflag);
3179 saveq->iocb_flag |=
3180 LPFC_DELAY_MEM_FREE;
3181 spin_unlock_irqrestore(
3182 &phba->hbalock, iflag);
3183 }
07951076 3184 }
dea3101e 3185 }
2e0fef85 3186 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
3187 } else
3188 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 3189 } else {
3190 /*
3191 * Unknown initiating command based on the response iotag.
3192 * This could be the case on the ELS ring because of
3193 * lpfc_els_abort().
3194 */
3195 if (pring->ringno != LPFC_ELS_RING) {
3196 /*
3197 * Ring <ringno> handler: unexpected completion IoTag
3198 * <IoTag>
3199 */
a257bf90 3200 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
3201 "0322 Ring %d handler: "
3202 "unexpected completion IoTag x%x "
3203 "Data: x%x x%x x%x x%x\n",
3204 pring->ringno,
3205 saveq->iocb.ulpIoTag,
3206 saveq->iocb.ulpStatus,
3207 saveq->iocb.un.ulpWord[4],
3208 saveq->iocb.ulpCommand,
3209 saveq->iocb.ulpContext);
dea3101e 3210 }
3211 }
68876920 3212
dea3101e 3213 return rc;
3214}
3215
e59058c4 3216/**
3621a710 3217 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
3218 * @phba: Pointer to HBA context object.
3219 * @pring: Pointer to driver SLI ring object.
3220 *
3221 * This function is called from the iocb ring event handlers when
3222 * put pointer is ahead of the get pointer for a ring. This function signal
3223 * an error attention condition to the worker thread and the worker
3224 * thread will transition the HBA to offline state.
3225 **/
2e0fef85
JS
3226static void
3227lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 3228{
34b02dcd 3229 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 3230 /*
025dfdaf 3231 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
3232 * rsp ring <portRspMax>
3233 */
3234 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3235 "0312 Ring %d handler: portRspPut %d "
025dfdaf 3236 "is bigger than rsp ring %d\n",
e8b62011 3237 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 3238 pring->sli.sli3.numRiocb);
875fbdfe 3239
2e0fef85 3240 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
3241
3242 /*
3243 * All error attention handlers are posted to
3244 * worker thread
3245 */
3246 phba->work_ha |= HA_ERATT;
3247 phba->work_hs = HS_FFER3;
92d7f7b0 3248
5e9d9b82 3249 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
3250
3251 return;
3252}
3253
9399627f 3254/**
3621a710 3255 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
3256 * @ptr: Pointer to address of HBA context object.
3257 *
3258 * This function is invoked by the Error Attention polling timer when the
3259 * timer times out. It will check the SLI Error Attention register for
3260 * possible attention events. If so, it will post an Error Attention event
3261 * and wake up worker thread to process it. Otherwise, it will set up the
3262 * Error Attention polling timer for the next poll.
3263 **/
f22eb4d3 3264void lpfc_poll_eratt(struct timer_list *t)
9399627f
JS
3265{
3266 struct lpfc_hba *phba;
eb016566 3267 uint32_t eratt = 0;
aa6fbb75 3268 uint64_t sli_intr, cnt;
9399627f 3269
f22eb4d3 3270 phba = from_timer(phba, t, eratt_poll);
9399627f 3271
aa6fbb75
JS
3272 /* Here we will also keep track of interrupts per sec of the hba */
3273 sli_intr = phba->sli.slistat.sli_intr;
3274
3275 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3276 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3277 sli_intr);
3278 else
3279 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3280
65791f1f
JS
3281 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3282 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3283 phba->sli.slistat.sli_ips = cnt;
3284
3285 phba->sli.slistat.sli_prev_intr = sli_intr;
3286
9399627f
JS
3287 /* Check chip HA register for error event */
3288 eratt = lpfc_sli_check_eratt(phba);
3289
3290 if (eratt)
3291 /* Tell the worker thread there is work to do */
3292 lpfc_worker_wake_up(phba);
3293 else
3294 /* Restart the timer for next eratt poll */
256ec0d0
JS
3295 mod_timer(&phba->eratt_poll,
3296 jiffies +
65791f1f 3297 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3298 return;
3299}
3300
875fbdfe 3301
e59058c4 3302/**
3621a710 3303 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3304 * @phba: Pointer to HBA context object.
3305 * @pring: Pointer to driver SLI ring object.
3306 * @mask: Host attention register mask for this ring.
3307 *
3308 * This function is called from the interrupt context when there is a ring
3309 * event for the fcp ring. The caller does not hold any lock.
3310 * The function processes each response iocb in the response ring until it
25985edc 3311 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3312 * LE bit set. The function will call the completion handler of the command iocb
3313 * if the response iocb indicates a completion for a command iocb or it is
3314 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3315 * function if this is an unsolicited iocb.
dea3101e 3316 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3317 * to check it explicitly.
3318 */
3319int
2e0fef85
JS
3320lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3321 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3322{
34b02dcd 3323 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3324 IOCB_t *irsp = NULL;
87f6eaff 3325 IOCB_t *entry = NULL;
dea3101e 3326 struct lpfc_iocbq *cmdiocbq = NULL;
3327 struct lpfc_iocbq rspiocbq;
dea3101e 3328 uint32_t status;
3329 uint32_t portRspPut, portRspMax;
3330 int rc = 1;
3331 lpfc_iocb_type type;
3332 unsigned long iflag;
3333 uint32_t rsp_cmpl = 0;
dea3101e 3334
2e0fef85 3335 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3336 pring->stats.iocb_event++;
3337
dea3101e 3338 /*
3339 * The next available response entry should never exceed the maximum
3340 * entries. If it does, treat it as an adapter hardware error.
3341 */
7e56aa25 3342 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3343 portRspPut = le32_to_cpu(pgp->rspPutInx);
3344 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3345 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3346 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3347 return 1;
3348 }
45ed1190
JS
3349 if (phba->fcp_ring_in_use) {
3350 spin_unlock_irqrestore(&phba->hbalock, iflag);
3351 return 1;
3352 } else
3353 phba->fcp_ring_in_use = 1;
dea3101e 3354
3355 rmb();
7e56aa25 3356 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3357 /*
3358 * Fetch an entry off the ring and copy it into a local data
3359 * structure. The copy involves a byte-swap since the
3360 * network byte order and pci byte orders are different.
3361 */
ed957684 3362 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3363 phba->last_completion_time = jiffies;
875fbdfe 3364
7e56aa25
JS
3365 if (++pring->sli.sli3.rspidx >= portRspMax)
3366 pring->sli.sli3.rspidx = 0;
875fbdfe 3367
87f6eaff
JSEC
3368 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3369 (uint32_t *) &rspiocbq.iocb,
ed957684 3370 phba->iocb_rsp_size);
a4bc3379 3371 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3372 irsp = &rspiocbq.iocb;
3373
dea3101e 3374 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3375 pring->stats.iocb_rsp++;
3376 rsp_cmpl++;
3377
3378 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3379 /*
3380 * If resource errors reported from HBA, reduce
3381 * queuedepths of the SCSI device.
3382 */
3383 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3384 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3385 IOERR_NO_RESOURCES)) {
92d7f7b0 3386 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3387 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3388 spin_lock_irqsave(&phba->hbalock, iflag);
3389 }
3390
dea3101e 3391 /* Rsp ring <ringno> error: IOCB */
3392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3393 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3394 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3395 pring->ringno,
92d7f7b0
JS
3396 irsp->un.ulpWord[0],
3397 irsp->un.ulpWord[1],
3398 irsp->un.ulpWord[2],
3399 irsp->un.ulpWord[3],
3400 irsp->un.ulpWord[4],
3401 irsp->un.ulpWord[5],
d7c255b2
JS
3402 *(uint32_t *)&irsp->un1,
3403 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3404 }
3405
3406 switch (type) {
3407 case LPFC_ABORT_IOCB:
3408 case LPFC_SOL_IOCB:
3409 /*
3410 * Idle exchange closed via ABTS from port. No iocb
3411 * resources need to be recovered.
3412 */
3413 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3415 "0333 IOCB cmd 0x%x"
dca9479b 3416 " processed. Skipping"
92d7f7b0 3417 " completion\n",
dca9479b 3418 irsp->ulpCommand);
dea3101e 3419 break;
3420 }
3421
604a3e30
JB
3422 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3423 &rspiocbq);
0f65ff68
JS
3424 if (unlikely(!cmdiocbq))
3425 break;
3426 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3427 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3428 if (cmdiocbq->iocb_cmpl) {
3429 spin_unlock_irqrestore(&phba->hbalock, iflag);
3430 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3431 &rspiocbq);
3432 spin_lock_irqsave(&phba->hbalock, iflag);
3433 }
dea3101e 3434 break;
a4bc3379 3435 case LPFC_UNSOL_IOCB:
2e0fef85 3436 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3437 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3438 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3439 break;
dea3101e 3440 default:
3441 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3442 char adaptermsg[LPFC_MAX_ADPTMSG];
3443 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3444 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3445 MAX_MSG_DATA);
898eb71c
JP
3446 dev_warn(&((phba->pcidev)->dev),
3447 "lpfc%d: %s\n",
dea3101e 3448 phba->brd_no, adaptermsg);
3449 } else {
3450 /* Unknown IOCB command */
3451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3452 "0334 Unknown IOCB command "
92d7f7b0 3453 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3454 type, irsp->ulpCommand,
92d7f7b0
JS
3455 irsp->ulpStatus,
3456 irsp->ulpIoTag,
3457 irsp->ulpContext);
dea3101e 3458 }
3459 break;
3460 }
3461
3462 /*
3463 * The response IOCB has been processed. Update the ring
3464 * pointer in SLIM. If the port response put pointer has not
3465 * been updated, sync the pgp->rspPutInx and fetch the new port
3466 * response put pointer.
3467 */
7e56aa25
JS
3468 writel(pring->sli.sli3.rspidx,
3469 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3470
7e56aa25 3471 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3472 portRspPut = le32_to_cpu(pgp->rspPutInx);
3473 }
3474
3475 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3476 pring->stats.iocb_rsp_full++;
3477 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3478 writel(status, phba->CAregaddr);
3479 readl(phba->CAregaddr);
3480 }
3481 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3482 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3483 pring->stats.iocb_cmd_empty++;
3484
3485 /* Force update of the local copy of cmdGetInx */
7e56aa25 3486 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3487 lpfc_sli_resume_iocb(phba, pring);
3488
3489 if ((pring->lpfc_sli_cmd_available))
3490 (pring->lpfc_sli_cmd_available) (phba, pring);
3491
3492 }
3493
45ed1190 3494 phba->fcp_ring_in_use = 0;
2e0fef85 3495 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3496 return rc;
3497}
3498
e59058c4 3499/**
3772a991
JS
3500 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3501 * @phba: Pointer to HBA context object.
3502 * @pring: Pointer to driver SLI ring object.
3503 * @rspiocbp: Pointer to driver response IOCB object.
3504 *
3505 * This function is called from the worker thread when there is a slow-path
3506 * response IOCB to process. This function chains all the response iocbs until
3507 * seeing the iocb with the LE bit set. The function will call
3508 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3509 * completion of a command iocb. The function will call the
3510 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3511 * The function frees the resources or calls the completion handler if this
3512 * iocb is an abort completion. The function returns NULL when the response
3513 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3514 * this function shall chain the iocb on to the iocb_continueq and return the
3515 * response iocb passed in.
3516 **/
3517static struct lpfc_iocbq *
3518lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3519 struct lpfc_iocbq *rspiocbp)
3520{
3521 struct lpfc_iocbq *saveq;
3522 struct lpfc_iocbq *cmdiocbp;
3523 struct lpfc_iocbq *next_iocb;
3524 IOCB_t *irsp = NULL;
3525 uint32_t free_saveq;
3526 uint8_t iocb_cmd_type;
3527 lpfc_iocb_type type;
3528 unsigned long iflag;
3529 int rc;
3530
3531 spin_lock_irqsave(&phba->hbalock, iflag);
3532 /* First add the response iocb to the countinueq list */
3533 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3534 pring->iocb_continueq_cnt++;
3535
70f23fd6 3536 /* Now, determine whether the list is completed for processing */
3772a991
JS
3537 irsp = &rspiocbp->iocb;
3538 if (irsp->ulpLe) {
3539 /*
3540 * By default, the driver expects to free all resources
3541 * associated with this iocb completion.
3542 */
3543 free_saveq = 1;
3544 saveq = list_get_first(&pring->iocb_continueq,
3545 struct lpfc_iocbq, list);
3546 irsp = &(saveq->iocb);
3547 list_del_init(&pring->iocb_continueq);
3548 pring->iocb_continueq_cnt = 0;
3549
3550 pring->stats.iocb_rsp++;
3551
3552 /*
3553 * If resource errors reported from HBA, reduce
3554 * queuedepths of the SCSI device.
3555 */
3556 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3557 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3558 IOERR_NO_RESOURCES)) {
3772a991
JS
3559 spin_unlock_irqrestore(&phba->hbalock, iflag);
3560 phba->lpfc_rampdown_queue_depth(phba);
3561 spin_lock_irqsave(&phba->hbalock, iflag);
3562 }
3563
3564 if (irsp->ulpStatus) {
3565 /* Rsp ring <ringno> error: IOCB */
3566 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3567 "0328 Rsp Ring %d error: "
3568 "IOCB Data: "
3569 "x%x x%x x%x x%x "
3570 "x%x x%x x%x x%x "
3571 "x%x x%x x%x x%x "
3572 "x%x x%x x%x x%x\n",
3573 pring->ringno,
3574 irsp->un.ulpWord[0],
3575 irsp->un.ulpWord[1],
3576 irsp->un.ulpWord[2],
3577 irsp->un.ulpWord[3],
3578 irsp->un.ulpWord[4],
3579 irsp->un.ulpWord[5],
3580 *(((uint32_t *) irsp) + 6),
3581 *(((uint32_t *) irsp) + 7),
3582 *(((uint32_t *) irsp) + 8),
3583 *(((uint32_t *) irsp) + 9),
3584 *(((uint32_t *) irsp) + 10),
3585 *(((uint32_t *) irsp) + 11),
3586 *(((uint32_t *) irsp) + 12),
3587 *(((uint32_t *) irsp) + 13),
3588 *(((uint32_t *) irsp) + 14),
3589 *(((uint32_t *) irsp) + 15));
3590 }
3591
3592 /*
3593 * Fetch the IOCB command type and call the correct completion
3594 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3595 * get freed back to the lpfc_iocb_list by the discovery
3596 * kernel thread.
3597 */
3598 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3599 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3600 switch (type) {
3601 case LPFC_SOL_IOCB:
3602 spin_unlock_irqrestore(&phba->hbalock, iflag);
3603 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3604 spin_lock_irqsave(&phba->hbalock, iflag);
3605 break;
3606
3607 case LPFC_UNSOL_IOCB:
3608 spin_unlock_irqrestore(&phba->hbalock, iflag);
3609 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3610 spin_lock_irqsave(&phba->hbalock, iflag);
3611 if (!rc)
3612 free_saveq = 0;
3613 break;
3614
3615 case LPFC_ABORT_IOCB:
3616 cmdiocbp = NULL;
3617 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3618 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3619 saveq);
3620 if (cmdiocbp) {
3621 /* Call the specified completion routine */
3622 if (cmdiocbp->iocb_cmpl) {
3623 spin_unlock_irqrestore(&phba->hbalock,
3624 iflag);
3625 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3626 saveq);
3627 spin_lock_irqsave(&phba->hbalock,
3628 iflag);
3629 } else
3630 __lpfc_sli_release_iocbq(phba,
3631 cmdiocbp);
3632 }
3633 break;
3634
3635 case LPFC_UNKNOWN_IOCB:
3636 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3637 char adaptermsg[LPFC_MAX_ADPTMSG];
3638 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3639 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3640 MAX_MSG_DATA);
3641 dev_warn(&((phba->pcidev)->dev),
3642 "lpfc%d: %s\n",
3643 phba->brd_no, adaptermsg);
3644 } else {
3645 /* Unknown IOCB command */
3646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3647 "0335 Unknown IOCB "
3648 "command Data: x%x "
3649 "x%x x%x x%x\n",
3650 irsp->ulpCommand,
3651 irsp->ulpStatus,
3652 irsp->ulpIoTag,
3653 irsp->ulpContext);
3654 }
3655 break;
3656 }
3657
3658 if (free_saveq) {
3659 list_for_each_entry_safe(rspiocbp, next_iocb,
3660 &saveq->list, list) {
61f35bff 3661 list_del_init(&rspiocbp->list);
3772a991
JS
3662 __lpfc_sli_release_iocbq(phba, rspiocbp);
3663 }
3664 __lpfc_sli_release_iocbq(phba, saveq);
3665 }
3666 rspiocbp = NULL;
3667 }
3668 spin_unlock_irqrestore(&phba->hbalock, iflag);
3669 return rspiocbp;
3670}
3671
3672/**
3673 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3674 * @phba: Pointer to HBA context object.
3675 * @pring: Pointer to driver SLI ring object.
3676 * @mask: Host attention register mask for this ring.
3677 *
3772a991
JS
3678 * This routine wraps the actual slow_ring event process routine from the
3679 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3680 **/
3772a991 3681void
2e0fef85
JS
3682lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3683 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3684{
3685 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3686}
3687
3688/**
3689 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3690 * @phba: Pointer to HBA context object.
3691 * @pring: Pointer to driver SLI ring object.
3692 * @mask: Host attention register mask for this ring.
3693 *
3694 * This function is called from the worker thread when there is a ring event
3695 * for non-fcp rings. The caller does not hold any lock. The function will
3696 * remove each response iocb in the response ring and calls the handle
3697 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3698 **/
3699static void
3700lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3701 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3702{
34b02dcd 3703 struct lpfc_pgp *pgp;
dea3101e 3704 IOCB_t *entry;
3705 IOCB_t *irsp = NULL;
3706 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3707 uint32_t portRspPut, portRspMax;
dea3101e 3708 unsigned long iflag;
3772a991 3709 uint32_t status;
dea3101e 3710
34b02dcd 3711 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3712 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3713 pring->stats.iocb_event++;
3714
dea3101e 3715 /*
3716 * The next available response entry should never exceed the maximum
3717 * entries. If it does, treat it as an adapter hardware error.
3718 */
7e56aa25 3719 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3720 portRspPut = le32_to_cpu(pgp->rspPutInx);
3721 if (portRspPut >= portRspMax) {
3722 /*
025dfdaf 3723 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3724 * rsp ring <portRspMax>
3725 */
ed957684 3726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3727 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3728 "is bigger than rsp ring %d\n",
e8b62011 3729 pring->ringno, portRspPut, portRspMax);
dea3101e 3730
2e0fef85
JS
3731 phba->link_state = LPFC_HBA_ERROR;
3732 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3733
3734 phba->work_hs = HS_FFER3;
3735 lpfc_handle_eratt(phba);
3736
3772a991 3737 return;
dea3101e 3738 }
3739
3740 rmb();
7e56aa25 3741 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3742 /*
3743 * Build a completion list and call the appropriate handler.
3744 * The process is to get the next available response iocb, get
3745 * a free iocb from the list, copy the response data into the
3746 * free iocb, insert to the continuation list, and update the
3747 * next response index to slim. This process makes response
3748 * iocb's in the ring available to DMA as fast as possible but
3749 * pays a penalty for a copy operation. Since the iocb is
3750 * only 32 bytes, this penalty is considered small relative to
3751 * the PCI reads for register values and a slim write. When
3752 * the ulpLe field is set, the entire Command has been
3753 * received.
3754 */
ed957684
JS
3755 entry = lpfc_resp_iocb(phba, pring);
3756
858c9f6c 3757 phba->last_completion_time = jiffies;
2e0fef85 3758 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3759 if (rspiocbp == NULL) {
3760 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3761 "completion.\n", __func__);
dea3101e 3762 break;
3763 }
3764
ed957684
JS
3765 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3766 phba->iocb_rsp_size);
dea3101e 3767 irsp = &rspiocbp->iocb;
3768
7e56aa25
JS
3769 if (++pring->sli.sli3.rspidx >= portRspMax)
3770 pring->sli.sli3.rspidx = 0;
dea3101e 3771
a58cbd52
JS
3772 if (pring->ringno == LPFC_ELS_RING) {
3773 lpfc_debugfs_slow_ring_trc(phba,
3774 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3775 *(((uint32_t *) irsp) + 4),
3776 *(((uint32_t *) irsp) + 6),
3777 *(((uint32_t *) irsp) + 7));
3778 }
3779
7e56aa25
JS
3780 writel(pring->sli.sli3.rspidx,
3781 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3782
3772a991
JS
3783 spin_unlock_irqrestore(&phba->hbalock, iflag);
3784 /* Handle the response IOCB */
3785 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3786 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3787
3788 /*
3789 * If the port response put pointer has not been updated, sync
3790 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3791 * response put pointer.
3792 */
7e56aa25 3793 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3794 portRspPut = le32_to_cpu(pgp->rspPutInx);
3795 }
7e56aa25 3796 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3797
92d7f7b0 3798 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3799 /* At least one response entry has been freed */
3800 pring->stats.iocb_rsp_full++;
3801 /* SET RxRE_RSP in Chip Att register */
3802 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3803 writel(status, phba->CAregaddr);
3804 readl(phba->CAregaddr); /* flush */
3805 }
3806 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3807 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3808 pring->stats.iocb_cmd_empty++;
3809
3810 /* Force update of the local copy of cmdGetInx */
7e56aa25 3811 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3812 lpfc_sli_resume_iocb(phba, pring);
3813
3814 if ((pring->lpfc_sli_cmd_available))
3815 (pring->lpfc_sli_cmd_available) (phba, pring);
3816
3817 }
3818
2e0fef85 3819 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3820 return;
dea3101e 3821}
3822
4f774513
JS
3823/**
3824 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3825 * @phba: Pointer to HBA context object.
3826 * @pring: Pointer to driver SLI ring object.
3827 * @mask: Host attention register mask for this ring.
3828 *
3829 * This function is called from the worker thread when there is a pending
3830 * ELS response iocb on the driver internal slow-path response iocb worker
3831 * queue. The caller does not hold any lock. The function will remove each
3832 * response iocb from the response worker queue and calls the handle
3833 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3834 **/
3835static void
3836lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3837 struct lpfc_sli_ring *pring, uint32_t mask)
3838{
3839 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3840 struct hbq_dmabuf *dmabuf;
3841 struct lpfc_cq_event *cq_event;
4f774513 3842 unsigned long iflag;
0ef01a2d 3843 int count = 0;
4f774513 3844
45ed1190
JS
3845 spin_lock_irqsave(&phba->hbalock, iflag);
3846 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3847 spin_unlock_irqrestore(&phba->hbalock, iflag);
3848 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3849 /* Get the response iocb from the head of work queue */
3850 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3851 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3852 cq_event, struct lpfc_cq_event, list);
4f774513 3853 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3854
3855 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3856 case CQE_CODE_COMPL_WQE:
3857 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3858 cq_event);
45ed1190
JS
3859 /* Translate ELS WCQE to response IOCBQ */
3860 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3861 irspiocbq);
3862 if (irspiocbq)
3863 lpfc_sli_sp_handle_rspiocb(phba, pring,
3864 irspiocbq);
0ef01a2d 3865 count++;
4d9ab994
JS
3866 break;
3867 case CQE_CODE_RECEIVE:
7851fe2c 3868 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3869 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3870 cq_event);
3871 lpfc_sli4_handle_received_buffer(phba, dmabuf);
0ef01a2d 3872 count++;
4d9ab994
JS
3873 break;
3874 default:
3875 break;
3876 }
0ef01a2d
JS
3877
3878 /* Limit the number of events to 64 to avoid soft lockups */
3879 if (count == 64)
3880 break;
4f774513
JS
3881 }
3882}
3883
e59058c4 3884/**
3621a710 3885 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3886 * @phba: Pointer to HBA context object.
3887 * @pring: Pointer to driver SLI ring object.
3888 *
3889 * This function aborts all iocbs in the given ring and frees all the iocb
3890 * objects in txq. This function issues an abort iocb for all the iocb commands
3891 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3892 * the return of this function. The caller is not required to hold any locks.
3893 **/
2e0fef85 3894void
dea3101e 3895lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3896{
2534ba75 3897 LIST_HEAD(completions);
dea3101e 3898 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3899
92d7f7b0
JS
3900 if (pring->ringno == LPFC_ELS_RING) {
3901 lpfc_fabric_abort_hba(phba);
3902 }
3903
dea3101e 3904 /* Error everything on txq and txcmplq
3905 * First do the txq.
3906 */
db55fba8
JS
3907 if (phba->sli_rev >= LPFC_SLI_REV4) {
3908 spin_lock_irq(&pring->ring_lock);
3909 list_splice_init(&pring->txq, &completions);
3910 pring->txq_cnt = 0;
3911 spin_unlock_irq(&pring->ring_lock);
dea3101e 3912
db55fba8
JS
3913 spin_lock_irq(&phba->hbalock);
3914 /* Next issue ABTS for everything on the txcmplq */
3915 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3916 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3917 spin_unlock_irq(&phba->hbalock);
3918 } else {
3919 spin_lock_irq(&phba->hbalock);
3920 list_splice_init(&pring->txq, &completions);
3921 pring->txq_cnt = 0;
dea3101e 3922
db55fba8
JS
3923 /* Next issue ABTS for everything on the txcmplq */
3924 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3925 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3926 spin_unlock_irq(&phba->hbalock);
3927 }
dea3101e 3928
a257bf90
JS
3929 /* Cancel all the IOCBs from the completions list */
3930 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3931 IOERR_SLI_ABORTED);
dea3101e 3932}
3933
895427bd
JS
3934/**
3935 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3936 * @phba: Pointer to HBA context object.
3937 * @pring: Pointer to driver SLI ring object.
3938 *
3939 * This function aborts all iocbs in the given ring and frees all the iocb
3940 * objects in txq. This function issues an abort iocb for all the iocb commands
3941 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3942 * the return of this function. The caller is not required to hold any locks.
3943 **/
3944void
3945lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3946{
3947 LIST_HEAD(completions);
3948 struct lpfc_iocbq *iocb, *next_iocb;
3949
3950 if (pring->ringno == LPFC_ELS_RING)
3951 lpfc_fabric_abort_hba(phba);
3952
3953 spin_lock_irq(&phba->hbalock);
3954 /* Next issue ABTS for everything on the txcmplq */
3955 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3956 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3957 spin_unlock_irq(&phba->hbalock);
3958}
3959
3960
db55fba8
JS
3961/**
3962 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3963 * @phba: Pointer to HBA context object.
3964 * @pring: Pointer to driver SLI ring object.
3965 *
3966 * This function aborts all iocbs in FCP rings and frees all the iocb
3967 * objects in txq. This function issues an abort iocb for all the iocb commands
3968 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3969 * the return of this function. The caller is not required to hold any locks.
3970 **/
3971void
3972lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3973{
3974 struct lpfc_sli *psli = &phba->sli;
3975 struct lpfc_sli_ring *pring;
3976 uint32_t i;
3977
3978 /* Look on all the FCP Rings for the iotag */
3979 if (phba->sli_rev >= LPFC_SLI_REV4) {
3980 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
895427bd 3981 pring = phba->sli4_hba.fcp_wq[i]->pring;
db55fba8
JS
3982 lpfc_sli_abort_iocb_ring(phba, pring);
3983 }
3984 } else {
895427bd 3985 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
3986 lpfc_sli_abort_iocb_ring(phba, pring);
3987 }
3988}
3989
895427bd
JS
3990/**
3991 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3992 * @phba: Pointer to HBA context object.
3993 *
3994 * This function aborts all wqes in NVME rings. This function issues an
3995 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3996 * the txcmplq is not guaranteed to complete before the return of this
3997 * function. The caller is not required to hold any locks.
3998 **/
3999void
4000lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
4001{
4002 struct lpfc_sli_ring *pring;
4003 uint32_t i;
4004
4005 if (phba->sli_rev < LPFC_SLI_REV4)
4006 return;
4007
4008 /* Abort all IO on each NVME ring. */
4009 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4010 pring = phba->sli4_hba.nvme_wq[i]->pring;
4011 lpfc_sli_abort_wqe_ring(phba, pring);
4012 }
4013}
4014
db55fba8 4015
a8e497d5 4016/**
3621a710 4017 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
4018 * @phba: Pointer to HBA context object.
4019 *
4020 * This function flushes all iocbs in the fcp ring and frees all the iocb
4021 * objects in txq and txcmplq. This function will not issue abort iocbs
4022 * for all the iocb commands in txcmplq, they will just be returned with
4023 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4024 * slot has been permanently disabled.
4025 **/
4026void
4027lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4028{
4029 LIST_HEAD(txq);
4030 LIST_HEAD(txcmplq);
a8e497d5
JS
4031 struct lpfc_sli *psli = &phba->sli;
4032 struct lpfc_sli_ring *pring;
db55fba8 4033 uint32_t i;
c1dd9111 4034 struct lpfc_iocbq *piocb, *next_iocb;
a8e497d5
JS
4035
4036 spin_lock_irq(&phba->hbalock);
4f2e66c6
JS
4037 /* Indicate the I/O queues are flushed */
4038 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
4039 spin_unlock_irq(&phba->hbalock);
4040
db55fba8
JS
4041 /* Look on all the FCP Rings for the iotag */
4042 if (phba->sli_rev >= LPFC_SLI_REV4) {
4043 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
895427bd 4044 pring = phba->sli4_hba.fcp_wq[i]->pring;
db55fba8
JS
4045
4046 spin_lock_irq(&pring->ring_lock);
4047 /* Retrieve everything on txq */
4048 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4049 list_for_each_entry_safe(piocb, next_iocb,
4050 &pring->txcmplq, list)
4051 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4052 /* Retrieve everything on the txcmplq */
4053 list_splice_init(&pring->txcmplq, &txcmplq);
4054 pring->txq_cnt = 0;
4055 pring->txcmplq_cnt = 0;
4056 spin_unlock_irq(&pring->ring_lock);
4057
4058 /* Flush the txq */
4059 lpfc_sli_cancel_iocbs(phba, &txq,
4060 IOSTAT_LOCAL_REJECT,
4061 IOERR_SLI_DOWN);
4062 /* Flush the txcmpq */
4063 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4064 IOSTAT_LOCAL_REJECT,
4065 IOERR_SLI_DOWN);
4066 }
4067 } else {
895427bd 4068 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 4069
db55fba8
JS
4070 spin_lock_irq(&phba->hbalock);
4071 /* Retrieve everything on txq */
4072 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4073 list_for_each_entry_safe(piocb, next_iocb,
4074 &pring->txcmplq, list)
4075 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4076 /* Retrieve everything on the txcmplq */
4077 list_splice_init(&pring->txcmplq, &txcmplq);
4078 pring->txq_cnt = 0;
4079 pring->txcmplq_cnt = 0;
4080 spin_unlock_irq(&phba->hbalock);
4081
4082 /* Flush the txq */
4083 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4084 IOERR_SLI_DOWN);
4085 /* Flush the txcmpq */
4086 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4087 IOERR_SLI_DOWN);
4088 }
a8e497d5
JS
4089}
4090
895427bd
JS
4091/**
4092 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4093 * @phba: Pointer to HBA context object.
4094 *
4095 * This function flushes all wqes in the nvme rings and frees all resources
4096 * in the txcmplq. This function does not issue abort wqes for the IO
4097 * commands in txcmplq, they will just be returned with
4098 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4099 * slot has been permanently disabled.
4100 **/
4101void
4102lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4103{
4104 LIST_HEAD(txcmplq);
4105 struct lpfc_sli_ring *pring;
4106 uint32_t i;
c1dd9111 4107 struct lpfc_iocbq *piocb, *next_iocb;
895427bd
JS
4108
4109 if (phba->sli_rev < LPFC_SLI_REV4)
4110 return;
4111
4112 /* Hint to other driver operations that a flush is in progress. */
4113 spin_lock_irq(&phba->hbalock);
4114 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4115 spin_unlock_irq(&phba->hbalock);
4116
4117 /* Cycle through all NVME rings and complete each IO with
4118 * a local driver reason code. This is a flush so no
4119 * abort exchange to FW.
4120 */
4121 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4122 pring = phba->sli4_hba.nvme_wq[i]->pring;
4123
895427bd 4124 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
4125 list_for_each_entry_safe(piocb, next_iocb,
4126 &pring->txcmplq, list)
4127 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4128 /* Retrieve everything on the txcmplq */
895427bd
JS
4129 list_splice_init(&pring->txcmplq, &txcmplq);
4130 pring->txcmplq_cnt = 0;
4131 spin_unlock_irq(&pring->ring_lock);
4132
4133 /* Flush the txcmpq &&&PAE */
4134 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4135 IOSTAT_LOCAL_REJECT,
4136 IOERR_SLI_DOWN);
4137 }
4138}
4139
e59058c4 4140/**
3772a991 4141 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
4142 * @phba: Pointer to HBA context object.
4143 * @mask: Bit mask to be checked.
4144 *
4145 * This function reads the host status register and compares
4146 * with the provided bit mask to check if HBA completed
4147 * the restart. This function will wait in a loop for the
4148 * HBA to complete restart. If the HBA does not restart within
4149 * 15 iterations, the function will reset the HBA again. The
4150 * function returns 1 when HBA fail to restart otherwise returns
4151 * zero.
4152 **/
3772a991
JS
4153static int
4154lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 4155{
41415862
JW
4156 uint32_t status;
4157 int i = 0;
4158 int retval = 0;
dea3101e 4159
41415862 4160 /* Read the HBA Host Status Register */
9940b97b
JS
4161 if (lpfc_readl(phba->HSregaddr, &status))
4162 return 1;
dea3101e 4163
41415862
JW
4164 /*
4165 * Check status register every 100ms for 5 retries, then every
4166 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4167 * every 2.5 sec for 4.
4168 * Break our of the loop if errors occurred during init.
4169 */
4170 while (((status & mask) != mask) &&
4171 !(status & HS_FFERM) &&
4172 i++ < 20) {
dea3101e 4173
41415862
JW
4174 if (i <= 5)
4175 msleep(10);
4176 else if (i <= 10)
4177 msleep(500);
4178 else
4179 msleep(2500);
dea3101e 4180
41415862 4181 if (i == 15) {
2e0fef85 4182 /* Do post */
92d7f7b0 4183 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
4184 lpfc_sli_brdrestart(phba);
4185 }
4186 /* Read the HBA Host Status Register */
9940b97b
JS
4187 if (lpfc_readl(phba->HSregaddr, &status)) {
4188 retval = 1;
4189 break;
4190 }
41415862 4191 }
dea3101e 4192
41415862
JW
4193 /* Check to see if any errors occurred during init */
4194 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
4195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4196 "2751 Adapter failed to restart, "
4197 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4198 status,
4199 readl(phba->MBslimaddr + 0xa8),
4200 readl(phba->MBslimaddr + 0xac));
2e0fef85 4201 phba->link_state = LPFC_HBA_ERROR;
41415862 4202 retval = 1;
dea3101e 4203 }
dea3101e 4204
41415862
JW
4205 return retval;
4206}
dea3101e 4207
da0436e9
JS
4208/**
4209 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4210 * @phba: Pointer to HBA context object.
4211 * @mask: Bit mask to be checked.
4212 *
4213 * This function checks the host status register to check if HBA is
4214 * ready. This function will wait in a loop for the HBA to be ready
4215 * If the HBA is not ready , the function will will reset the HBA PCI
4216 * function again. The function returns 1 when HBA fail to be ready
4217 * otherwise returns zero.
4218 **/
4219static int
4220lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4221{
4222 uint32_t status;
4223 int retval = 0;
4224
4225 /* Read the HBA Host Status Register */
4226 status = lpfc_sli4_post_status_check(phba);
4227
4228 if (status) {
4229 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4230 lpfc_sli_brdrestart(phba);
4231 status = lpfc_sli4_post_status_check(phba);
4232 }
4233
4234 /* Check to see if any errors occurred during init */
4235 if (status) {
4236 phba->link_state = LPFC_HBA_ERROR;
4237 retval = 1;
4238 } else
4239 phba->sli4_hba.intr_enable = 0;
4240
4241 return retval;
4242}
4243
4244/**
4245 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4246 * @phba: Pointer to HBA context object.
4247 * @mask: Bit mask to be checked.
4248 *
4249 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4250 * from the API jump table function pointer from the lpfc_hba struct.
4251 **/
4252int
4253lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4254{
4255 return phba->lpfc_sli_brdready(phba, mask);
4256}
4257
9290831f
JS
4258#define BARRIER_TEST_PATTERN (0xdeadbeef)
4259
e59058c4 4260/**
3621a710 4261 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
4262 * @phba: Pointer to HBA context object.
4263 *
1b51197d
JS
4264 * This function is called before resetting an HBA. This function is called
4265 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 4266 **/
2e0fef85 4267void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 4268{
65a29c16
JS
4269 uint32_t __iomem *resp_buf;
4270 uint32_t __iomem *mbox_buf;
9290831f 4271 volatile uint32_t mbox;
9940b97b 4272 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
4273 int i;
4274 uint8_t hdrtype;
4275
1c2ba475
JT
4276 lockdep_assert_held(&phba->hbalock);
4277
9290831f
JS
4278 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4279 if (hdrtype != 0x80 ||
4280 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4281 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4282 return;
4283
4284 /*
4285 * Tell the other part of the chip to suspend temporarily all
4286 * its DMA activity.
4287 */
65a29c16 4288 resp_buf = phba->MBslimaddr;
9290831f
JS
4289
4290 /* Disable the error attention */
9940b97b
JS
4291 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4292 return;
9290831f
JS
4293 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4294 readl(phba->HCregaddr); /* flush */
2e0fef85 4295 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4296
9940b97b
JS
4297 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4298 return;
4299 if (ha_copy & HA_ERATT) {
9290831f
JS
4300 /* Clear Chip error bit */
4301 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4302 phba->pport->stopped = 1;
9290831f
JS
4303 }
4304
4305 mbox = 0;
4306 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4307 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4308
4309 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4310 mbox_buf = phba->MBslimaddr;
9290831f
JS
4311 writel(mbox, mbox_buf);
4312
9940b97b
JS
4313 for (i = 0; i < 50; i++) {
4314 if (lpfc_readl((resp_buf + 1), &resp_data))
4315 return;
4316 if (resp_data != ~(BARRIER_TEST_PATTERN))
4317 mdelay(1);
4318 else
4319 break;
4320 }
4321 resp_data = 0;
4322 if (lpfc_readl((resp_buf + 1), &resp_data))
4323 return;
4324 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4325 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4326 phba->pport->stopped)
9290831f
JS
4327 goto restore_hc;
4328 else
4329 goto clear_errat;
4330 }
4331
4332 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4333 resp_data = 0;
4334 for (i = 0; i < 500; i++) {
4335 if (lpfc_readl(resp_buf, &resp_data))
4336 return;
4337 if (resp_data != mbox)
4338 mdelay(1);
4339 else
4340 break;
4341 }
9290831f
JS
4342
4343clear_errat:
4344
9940b97b
JS
4345 while (++i < 500) {
4346 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4347 return;
4348 if (!(ha_copy & HA_ERATT))
4349 mdelay(1);
4350 else
4351 break;
4352 }
9290831f
JS
4353
4354 if (readl(phba->HAregaddr) & HA_ERATT) {
4355 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4356 phba->pport->stopped = 1;
9290831f
JS
4357 }
4358
4359restore_hc:
2e0fef85 4360 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4361 writel(hc_copy, phba->HCregaddr);
4362 readl(phba->HCregaddr); /* flush */
4363}
4364
e59058c4 4365/**
3621a710 4366 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4367 * @phba: Pointer to HBA context object.
4368 *
4369 * This function issues a kill_board mailbox command and waits for
4370 * the error attention interrupt. This function is called for stopping
4371 * the firmware processing. The caller is not required to hold any
4372 * locks. This function calls lpfc_hba_down_post function to free
4373 * any pending commands after the kill. The function will return 1 when it
4374 * fails to kill the board else will return 0.
4375 **/
41415862 4376int
2e0fef85 4377lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4378{
4379 struct lpfc_sli *psli;
4380 LPFC_MBOXQ_t *pmb;
4381 uint32_t status;
4382 uint32_t ha_copy;
4383 int retval;
4384 int i = 0;
dea3101e 4385
41415862 4386 psli = &phba->sli;
dea3101e 4387
41415862 4388 /* Kill HBA */
ed957684 4389 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4390 "0329 Kill HBA Data: x%x x%x\n",
4391 phba->pport->port_state, psli->sli_flag);
41415862 4392
98c9ea5c
JS
4393 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4394 if (!pmb)
41415862 4395 return 1;
41415862
JW
4396
4397 /* Disable the error attention */
2e0fef85 4398 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4399 if (lpfc_readl(phba->HCregaddr, &status)) {
4400 spin_unlock_irq(&phba->hbalock);
4401 mempool_free(pmb, phba->mbox_mem_pool);
4402 return 1;
4403 }
41415862
JW
4404 status &= ~HC_ERINT_ENA;
4405 writel(status, phba->HCregaddr);
4406 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4407 phba->link_flag |= LS_IGNORE_ERATT;
4408 spin_unlock_irq(&phba->hbalock);
41415862
JW
4409
4410 lpfc_kill_board(phba, pmb);
4411 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4412 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4413
4414 if (retval != MBX_SUCCESS) {
4415 if (retval != MBX_BUSY)
4416 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4418 "2752 KILL_BOARD command failed retval %d\n",
4419 retval);
2e0fef85
JS
4420 spin_lock_irq(&phba->hbalock);
4421 phba->link_flag &= ~LS_IGNORE_ERATT;
4422 spin_unlock_irq(&phba->hbalock);
41415862
JW
4423 return 1;
4424 }
4425
f4b4c68f
JS
4426 spin_lock_irq(&phba->hbalock);
4427 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4428 spin_unlock_irq(&phba->hbalock);
9290831f 4429
41415862
JW
4430 mempool_free(pmb, phba->mbox_mem_pool);
4431
4432 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4433 * attention every 100ms for 3 seconds. If we don't get ERATT after
4434 * 3 seconds we still set HBA_ERROR state because the status of the
4435 * board is now undefined.
4436 */
9940b97b
JS
4437 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4438 return 1;
41415862
JW
4439 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4440 mdelay(100);
9940b97b
JS
4441 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4442 return 1;
41415862
JW
4443 }
4444
4445 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4446 if (ha_copy & HA_ERATT) {
4447 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4448 phba->pport->stopped = 1;
9290831f 4449 }
2e0fef85 4450 spin_lock_irq(&phba->hbalock);
41415862 4451 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4452 psli->mbox_active = NULL;
2e0fef85
JS
4453 phba->link_flag &= ~LS_IGNORE_ERATT;
4454 spin_unlock_irq(&phba->hbalock);
41415862 4455
41415862 4456 lpfc_hba_down_post(phba);
2e0fef85 4457 phba->link_state = LPFC_HBA_ERROR;
41415862 4458
2e0fef85 4459 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 4460}
4461
e59058c4 4462/**
3772a991 4463 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4464 * @phba: Pointer to HBA context object.
4465 *
4466 * This function resets the HBA by writing HC_INITFF to the control
4467 * register. After the HBA resets, this function resets all the iocb ring
4468 * indices. This function disables PCI layer parity checking during
4469 * the reset.
4470 * This function returns 0 always.
4471 * The caller is not required to hold any locks.
4472 **/
41415862 4473int
2e0fef85 4474lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4475{
41415862 4476 struct lpfc_sli *psli;
dea3101e 4477 struct lpfc_sli_ring *pring;
41415862 4478 uint16_t cfg_value;
dea3101e 4479 int i;
dea3101e 4480
41415862 4481 psli = &phba->sli;
dea3101e 4482
41415862
JW
4483 /* Reset HBA */
4484 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4485 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4486 (phba->pport) ? phba->pport->port_state : 0,
4487 psli->sli_flag);
dea3101e 4488
4489 /* perform board reset */
4490 phba->fc_eventTag = 0;
4d9ab994 4491 phba->link_events = 0;
4492b739
JS
4492 if (phba->pport) {
4493 phba->pport->fc_myDID = 0;
4494 phba->pport->fc_prevDID = 0;
4495 }
dea3101e 4496
41415862
JW
4497 /* Turn off parity checking and serr during the physical reset */
4498 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4499 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4500 (cfg_value &
4501 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4502
3772a991
JS
4503 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4504
41415862
JW
4505 /* Now toggle INITFF bit in the Host Control Register */
4506 writel(HC_INITFF, phba->HCregaddr);
4507 mdelay(1);
4508 readl(phba->HCregaddr); /* flush */
4509 writel(0, phba->HCregaddr);
4510 readl(phba->HCregaddr); /* flush */
4511
4512 /* Restore PCI cmd register */
4513 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 4514
4515 /* Initialize relevant SLI info */
41415862 4516 for (i = 0; i < psli->num_rings; i++) {
895427bd 4517 pring = &psli->sli3_ring[i];
dea3101e 4518 pring->flag = 0;
7e56aa25
JS
4519 pring->sli.sli3.rspidx = 0;
4520 pring->sli.sli3.next_cmdidx = 0;
4521 pring->sli.sli3.local_getidx = 0;
4522 pring->sli.sli3.cmdidx = 0;
dea3101e 4523 pring->missbufcnt = 0;
4524 }
dea3101e 4525
2e0fef85 4526 phba->link_state = LPFC_WARM_START;
41415862
JW
4527 return 0;
4528}
4529
e59058c4 4530/**
da0436e9
JS
4531 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4532 * @phba: Pointer to HBA context object.
4533 *
4534 * This function resets a SLI4 HBA. This function disables PCI layer parity
4535 * checking during resets the device. The caller is not required to hold
4536 * any locks.
4537 *
4538 * This function returns 0 always.
4539 **/
4540int
4541lpfc_sli4_brdreset(struct lpfc_hba *phba)
4542{
4543 struct lpfc_sli *psli = &phba->sli;
4544 uint16_t cfg_value;
0293635e 4545 int rc = 0;
da0436e9
JS
4546
4547 /* Reset HBA */
4548 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4549 "0295 Reset HBA Data: x%x x%x x%x\n",
4550 phba->pport->port_state, psli->sli_flag,
4551 phba->hba_flag);
da0436e9
JS
4552
4553 /* perform board reset */
4554 phba->fc_eventTag = 0;
4d9ab994 4555 phba->link_events = 0;
da0436e9
JS
4556 phba->pport->fc_myDID = 0;
4557 phba->pport->fc_prevDID = 0;
4558
da0436e9
JS
4559 spin_lock_irq(&phba->hbalock);
4560 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4561 phba->fcf.fcf_flag = 0;
da0436e9
JS
4562 spin_unlock_irq(&phba->hbalock);
4563
0293635e
JS
4564 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4565 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4566 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4567 return rc;
4568 }
4569
da0436e9
JS
4570 /* Now physically reset the device */
4571 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4572 "0389 Performing PCI function reset!\n");
be858b65
JS
4573
4574 /* Turn off parity checking and serr during the physical reset */
4575 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4576 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4577 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4578
88318816 4579 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4580 rc = lpfc_pci_function_reset(phba);
da0436e9 4581
be858b65
JS
4582 /* Restore PCI cmd register */
4583 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4584
27b01b82 4585 return rc;
da0436e9
JS
4586}
4587
4588/**
4589 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4590 * @phba: Pointer to HBA context object.
4591 *
4592 * This function is called in the SLI initialization code path to
4593 * restart the HBA. The caller is not required to hold any lock.
4594 * This function writes MBX_RESTART mailbox command to the SLIM and
4595 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4596 * function to free any pending commands. The function enables
4597 * POST only during the first initialization. The function returns zero.
4598 * The function does not guarantee completion of MBX_RESTART mailbox
4599 * command before the return of this function.
4600 **/
da0436e9
JS
4601static int
4602lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4603{
4604 MAILBOX_t *mb;
4605 struct lpfc_sli *psli;
41415862
JW
4606 volatile uint32_t word0;
4607 void __iomem *to_slim;
0d878419 4608 uint32_t hba_aer_enabled;
41415862 4609
2e0fef85 4610 spin_lock_irq(&phba->hbalock);
41415862 4611
0d878419
JS
4612 /* Take PCIe device Advanced Error Reporting (AER) state */
4613 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4614
41415862
JW
4615 psli = &phba->sli;
4616
4617 /* Restart HBA */
4618 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4619 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4620 (phba->pport) ? phba->pport->port_state : 0,
4621 psli->sli_flag);
41415862
JW
4622
4623 word0 = 0;
4624 mb = (MAILBOX_t *) &word0;
4625 mb->mbxCommand = MBX_RESTART;
4626 mb->mbxHc = 1;
4627
9290831f
JS
4628 lpfc_reset_barrier(phba);
4629
41415862
JW
4630 to_slim = phba->MBslimaddr;
4631 writel(*(uint32_t *) mb, to_slim);
4632 readl(to_slim); /* flush */
4633
4634 /* Only skip post after fc_ffinit is completed */
4492b739 4635 if (phba->pport && phba->pport->port_state)
41415862 4636 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4637 else
41415862 4638 word0 = 0; /* This is really setting up word1 */
65a29c16 4639 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4640 writel(*(uint32_t *) mb, to_slim);
4641 readl(to_slim); /* flush */
dea3101e 4642
41415862 4643 lpfc_sli_brdreset(phba);
4492b739
JS
4644 if (phba->pport)
4645 phba->pport->stopped = 0;
2e0fef85 4646 phba->link_state = LPFC_INIT_START;
da0436e9 4647 phba->hba_flag = 0;
2e0fef85 4648 spin_unlock_irq(&phba->hbalock);
41415862 4649
64ba8818 4650 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4651 psli->stats_start = ktime_get_seconds();
64ba8818 4652
eaf15d5b
JS
4653 /* Give the INITFF and Post time to settle. */
4654 mdelay(100);
41415862 4655
0d878419
JS
4656 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4657 if (hba_aer_enabled)
4658 pci_disable_pcie_error_reporting(phba->pcidev);
4659
41415862 4660 lpfc_hba_down_post(phba);
dea3101e 4661
4662 return 0;
4663}
4664
da0436e9
JS
4665/**
4666 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4667 * @phba: Pointer to HBA context object.
4668 *
4669 * This function is called in the SLI initialization code path to restart
4670 * a SLI4 HBA. The caller is not required to hold any lock.
4671 * At the end of the function, it calls lpfc_hba_down_post function to
4672 * free any pending commands.
4673 **/
4674static int
4675lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4676{
4677 struct lpfc_sli *psli = &phba->sli;
75baf696 4678 uint32_t hba_aer_enabled;
27b01b82 4679 int rc;
da0436e9
JS
4680
4681 /* Restart HBA */
4682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4683 "0296 Restart HBA Data: x%x x%x\n",
4684 phba->pport->port_state, psli->sli_flag);
4685
75baf696
JS
4686 /* Take PCIe device Advanced Error Reporting (AER) state */
4687 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4688
27b01b82 4689 rc = lpfc_sli4_brdreset(phba);
5a9eeff5
JS
4690 if (rc)
4691 return rc;
da0436e9
JS
4692
4693 spin_lock_irq(&phba->hbalock);
4694 phba->pport->stopped = 0;
4695 phba->link_state = LPFC_INIT_START;
4696 phba->hba_flag = 0;
4697 spin_unlock_irq(&phba->hbalock);
4698
4699 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4700 psli->stats_start = ktime_get_seconds();
da0436e9 4701
75baf696
JS
4702 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4703 if (hba_aer_enabled)
4704 pci_disable_pcie_error_reporting(phba->pcidev);
4705
da0436e9 4706 lpfc_hba_down_post(phba);
569dbe84 4707 lpfc_sli4_queue_destroy(phba);
da0436e9 4708
27b01b82 4709 return rc;
da0436e9
JS
4710}
4711
4712/**
4713 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4714 * @phba: Pointer to HBA context object.
4715 *
4716 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4717 * API jump table function pointer from the lpfc_hba struct.
4718**/
4719int
4720lpfc_sli_brdrestart(struct lpfc_hba *phba)
4721{
4722 return phba->lpfc_sli_brdrestart(phba);
4723}
4724
e59058c4 4725/**
3621a710 4726 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4727 * @phba: Pointer to HBA context object.
4728 *
4729 * This function is called after a HBA restart to wait for successful
4730 * restart of the HBA. Successful restart of the HBA is indicated by
4731 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4732 * iteration, the function will restart the HBA again. The function returns
4733 * zero if HBA successfully restarted else returns negative error code.
4734 **/
4492b739 4735int
dea3101e 4736lpfc_sli_chipset_init(struct lpfc_hba *phba)
4737{
4738 uint32_t status, i = 0;
4739
4740 /* Read the HBA Host Status Register */
9940b97b
JS
4741 if (lpfc_readl(phba->HSregaddr, &status))
4742 return -EIO;
dea3101e 4743
4744 /* Check status register to see what current state is */
4745 i = 0;
4746 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4747
dcf2a4e0
JS
4748 /* Check every 10ms for 10 retries, then every 100ms for 90
4749 * retries, then every 1 sec for 50 retires for a total of
4750 * ~60 seconds before reset the board again and check every
4751 * 1 sec for 50 retries. The up to 60 seconds before the
4752 * board ready is required by the Falcon FIPS zeroization
4753 * complete, and any reset the board in between shall cause
4754 * restart of zeroization, further delay the board ready.
dea3101e 4755 */
dcf2a4e0 4756 if (i++ >= 200) {
dea3101e 4757 /* Adapter failed to init, timeout, status reg
4758 <status> */
ed957684 4759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4760 "0436 Adapter failed to init, "
09372820
JS
4761 "timeout, status reg x%x, "
4762 "FW Data: A8 x%x AC x%x\n", status,
4763 readl(phba->MBslimaddr + 0xa8),
4764 readl(phba->MBslimaddr + 0xac));
2e0fef85 4765 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4766 return -ETIMEDOUT;
4767 }
4768
4769 /* Check to see if any errors occurred during init */
4770 if (status & HS_FFERM) {
4771 /* ERROR: During chipset initialization */
4772 /* Adapter failed to init, chipset, status reg
4773 <status> */
ed957684 4774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4775 "0437 Adapter failed to init, "
09372820
JS
4776 "chipset, status reg x%x, "
4777 "FW Data: A8 x%x AC x%x\n", status,
4778 readl(phba->MBslimaddr + 0xa8),
4779 readl(phba->MBslimaddr + 0xac));
2e0fef85 4780 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4781 return -EIO;
4782 }
4783
dcf2a4e0 4784 if (i <= 10)
dea3101e 4785 msleep(10);
dcf2a4e0
JS
4786 else if (i <= 100)
4787 msleep(100);
4788 else
4789 msleep(1000);
dea3101e 4790
dcf2a4e0
JS
4791 if (i == 150) {
4792 /* Do post */
92d7f7b0 4793 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4794 lpfc_sli_brdrestart(phba);
dea3101e 4795 }
4796 /* Read the HBA Host Status Register */
9940b97b
JS
4797 if (lpfc_readl(phba->HSregaddr, &status))
4798 return -EIO;
dea3101e 4799 }
4800
4801 /* Check to see if any errors occurred during init */
4802 if (status & HS_FFERM) {
4803 /* ERROR: During chipset initialization */
4804 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4806 "0438 Adapter failed to init, chipset, "
09372820
JS
4807 "status reg x%x, "
4808 "FW Data: A8 x%x AC x%x\n", status,
4809 readl(phba->MBslimaddr + 0xa8),
4810 readl(phba->MBslimaddr + 0xac));
2e0fef85 4811 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4812 return -EIO;
4813 }
4814
4815 /* Clear all interrupt enable conditions */
4816 writel(0, phba->HCregaddr);
4817 readl(phba->HCregaddr); /* flush */
4818
4819 /* setup host attn register */
4820 writel(0xffffffff, phba->HAregaddr);
4821 readl(phba->HAregaddr); /* flush */
4822 return 0;
4823}
4824
e59058c4 4825/**
3621a710 4826 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4827 *
4828 * This function calculates and returns the number of HBQs required to be
4829 * configured.
4830 **/
78b2d852 4831int
ed957684
JS
4832lpfc_sli_hbq_count(void)
4833{
92d7f7b0 4834 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4835}
4836
e59058c4 4837/**
3621a710 4838 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4839 *
4840 * This function adds the number of hbq entries in every HBQ to get
4841 * the total number of hbq entries required for the HBA and returns
4842 * the total count.
4843 **/
ed957684
JS
4844static int
4845lpfc_sli_hbq_entry_count(void)
4846{
4847 int hbq_count = lpfc_sli_hbq_count();
4848 int count = 0;
4849 int i;
4850
4851 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4852 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4853 return count;
4854}
4855
e59058c4 4856/**
3621a710 4857 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4858 *
4859 * This function calculates amount of memory required for all hbq entries
4860 * to be configured and returns the total memory required.
4861 **/
dea3101e 4862int
ed957684
JS
4863lpfc_sli_hbq_size(void)
4864{
4865 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4866}
4867
e59058c4 4868/**
3621a710 4869 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4870 * @phba: Pointer to HBA context object.
4871 *
4872 * This function is called during the SLI initialization to configure
4873 * all the HBQs and post buffers to the HBQ. The caller is not
4874 * required to hold any locks. This function will return zero if successful
4875 * else it will return negative error code.
4876 **/
ed957684
JS
4877static int
4878lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4879{
4880 int hbq_count = lpfc_sli_hbq_count();
4881 LPFC_MBOXQ_t *pmb;
4882 MAILBOX_t *pmbox;
4883 uint32_t hbqno;
4884 uint32_t hbq_entry_index;
ed957684 4885
92d7f7b0
JS
4886 /* Get a Mailbox buffer to setup mailbox
4887 * commands for HBA initialization
4888 */
ed957684
JS
4889 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4890
4891 if (!pmb)
4892 return -ENOMEM;
4893
04c68496 4894 pmbox = &pmb->u.mb;
ed957684
JS
4895
4896 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4897 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4898 phba->hbq_in_use = 1;
ed957684
JS
4899
4900 hbq_entry_index = 0;
4901 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4902 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4903 phba->hbqs[hbqno].hbqPutIdx = 0;
4904 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4905 phba->hbqs[hbqno].entry_count =
92d7f7b0 4906 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4907 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4908 hbq_entry_index, pmb);
ed957684
JS
4909 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4910
4911 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4912 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4913 mbxStatus <status>, ring <num> */
4914
4915 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4916 LOG_SLI | LOG_VPORT,
e8b62011 4917 "1805 Adapter failed to init. "
ed957684 4918 "Data: x%x x%x x%x\n",
e8b62011 4919 pmbox->mbxCommand,
ed957684
JS
4920 pmbox->mbxStatus, hbqno);
4921
4922 phba->link_state = LPFC_HBA_ERROR;
4923 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4924 return -ENXIO;
ed957684
JS
4925 }
4926 }
4927 phba->hbq_count = hbq_count;
4928
ed957684
JS
4929 mempool_free(pmb, phba->mbox_mem_pool);
4930
92d7f7b0 4931 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4932 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4933 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4934 return 0;
4935}
4936
4f774513
JS
4937/**
4938 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4939 * @phba: Pointer to HBA context object.
4940 *
4941 * This function is called during the SLI initialization to configure
4942 * all the HBQs and post buffers to the HBQ. The caller is not
4943 * required to hold any locks. This function will return zero if successful
4944 * else it will return negative error code.
4945 **/
4946static int
4947lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4948{
4949 phba->hbq_in_use = 1;
895427bd
JS
4950 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4951 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 4952 phba->hbq_count = 1;
895427bd 4953 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 4954 /* Initially populate or replenish the HBQs */
4f774513
JS
4955 return 0;
4956}
4957
e59058c4 4958/**
3621a710 4959 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4960 * @phba: Pointer to HBA context object.
4961 * @sli_mode: sli mode - 2/3
4962 *
183b8021 4963 * This function is called by the sli initialization code path
e59058c4
JS
4964 * to issue config_port mailbox command. This function restarts the
4965 * HBA firmware and issues a config_port mailbox command to configure
4966 * the SLI interface in the sli mode specified by sli_mode
4967 * variable. The caller is not required to hold any locks.
4968 * The function returns 0 if successful, else returns negative error
4969 * code.
4970 **/
9399627f
JS
4971int
4972lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4973{
4974 LPFC_MBOXQ_t *pmb;
4975 uint32_t resetcount = 0, rc = 0, done = 0;
4976
4977 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4978 if (!pmb) {
2e0fef85 4979 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4980 return -ENOMEM;
4981 }
4982
ed957684 4983 phba->sli_rev = sli_mode;
dea3101e 4984 while (resetcount < 2 && !done) {
2e0fef85 4985 spin_lock_irq(&phba->hbalock);
1c067a42 4986 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4987 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4988 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4989 lpfc_sli_brdrestart(phba);
dea3101e 4990 rc = lpfc_sli_chipset_init(phba);
4991 if (rc)
4992 break;
4993
2e0fef85 4994 spin_lock_irq(&phba->hbalock);
1c067a42 4995 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4996 spin_unlock_irq(&phba->hbalock);
dea3101e 4997 resetcount++;
4998
ed957684
JS
4999 /* Call pre CONFIG_PORT mailbox command initialization. A
5000 * value of 0 means the call was successful. Any other
5001 * nonzero value is a failure, but if ERESTART is returned,
5002 * the driver may reset the HBA and try again.
5003 */
dea3101e 5004 rc = lpfc_config_port_prep(phba);
5005 if (rc == -ERESTART) {
ed957684 5006 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 5007 continue;
34b02dcd 5008 } else if (rc)
dea3101e 5009 break;
6d368e53 5010
2e0fef85 5011 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 5012 lpfc_config_port(phba, pmb);
5013 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
5014 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5015 LPFC_SLI3_HBQ_ENABLED |
5016 LPFC_SLI3_CRP_ENABLED |
bc73905a
JS
5017 LPFC_SLI3_BG_ENABLED |
5018 LPFC_SLI3_DSS_ENABLED);
ed957684 5019 if (rc != MBX_SUCCESS) {
dea3101e 5020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5021 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 5022 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 5023 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 5024 spin_lock_irq(&phba->hbalock);
04c68496 5025 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
5026 spin_unlock_irq(&phba->hbalock);
5027 rc = -ENXIO;
04c68496
JS
5028 } else {
5029 /* Allow asynchronous mailbox command to go through */
5030 spin_lock_irq(&phba->hbalock);
5031 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5032 spin_unlock_irq(&phba->hbalock);
ed957684 5033 done = 1;
cb69f7de
JS
5034
5035 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5036 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5037 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5038 "3110 Port did not grant ASABT\n");
04c68496 5039 }
dea3101e 5040 }
ed957684
JS
5041 if (!done) {
5042 rc = -EINVAL;
5043 goto do_prep_failed;
5044 }
04c68496
JS
5045 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5046 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
5047 rc = -ENXIO;
5048 goto do_prep_failed;
5049 }
04c68496 5050 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 5051 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
5052 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5053 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5054 phba->max_vpi : phba->max_vports;
5055
34b02dcd
JS
5056 } else
5057 phba->max_vpi = 0;
bc73905a
JS
5058 phba->fips_level = 0;
5059 phba->fips_spec_rev = 0;
5060 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 5061 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
5062 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5063 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5064 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5065 "2850 Security Crypto Active. FIPS x%d "
5066 "(Spec Rev: x%d)",
5067 phba->fips_level, phba->fips_spec_rev);
5068 }
5069 if (pmb->u.mb.un.varCfgPort.sec_err) {
5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5071 "2856 Config Port Security Crypto "
5072 "Error: x%x ",
5073 pmb->u.mb.un.varCfgPort.sec_err);
5074 }
04c68496 5075 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 5076 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 5077 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 5078 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
5079
5080 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5081 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6 5082
f44ac12f
JS
5083 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5084 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5085 phba->cfg_enable_bg = 0;
5086 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
e2a0a9d6
JS
5087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5088 "0443 Adapter did not grant "
5089 "BlockGuard\n");
f44ac12f 5090 }
e2a0a9d6 5091 }
34b02dcd 5092 } else {
8f34f4ce 5093 phba->hbq_get = NULL;
34b02dcd 5094 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 5095 phba->max_vpi = 0;
ed957684 5096 }
92d7f7b0 5097do_prep_failed:
ed957684
JS
5098 mempool_free(pmb, phba->mbox_mem_pool);
5099 return rc;
5100}
5101
e59058c4
JS
5102
5103/**
183b8021 5104 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
5105 * @phba: Pointer to HBA context object.
5106 *
183b8021
MY
5107 * This function is the main SLI initialization function. This function
5108 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
5109 * error attention handler code. Caller is not required to hold any
5110 * locks. This function issues config_port mailbox command to configure
5111 * the SLI, setup iocb rings and HBQ rings. In the end the function
5112 * calls the config_port_post function to issue init_link mailbox
5113 * command and to start the discovery. The function will return zero
5114 * if successful, else it will return negative error code.
5115 **/
ed957684
JS
5116int
5117lpfc_sli_hba_setup(struct lpfc_hba *phba)
5118{
5119 uint32_t rc;
6d368e53
JS
5120 int mode = 3, i;
5121 int longs;
ed957684 5122
12247e81 5123 switch (phba->cfg_sli_mode) {
ed957684 5124 case 2:
78b2d852 5125 if (phba->cfg_enable_npiv) {
92d7f7b0 5126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 5127 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 5128 "parameter (%d) to auto (0).\n",
12247e81 5129 phba->cfg_sli_mode);
92d7f7b0
JS
5130 break;
5131 }
ed957684
JS
5132 mode = 2;
5133 break;
5134 case 0:
5135 case 3:
5136 break;
5137 default:
92d7f7b0 5138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
5139 "1819 Unrecognized sli_mode parameter: %d.\n",
5140 phba->cfg_sli_mode);
ed957684
JS
5141
5142 break;
5143 }
b5c53958 5144 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 5145
9399627f
JS
5146 rc = lpfc_sli_config_port(phba, mode);
5147
12247e81 5148 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 5149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
5150 "1820 Unable to select SLI-3. "
5151 "Not supported by adapter.\n");
ed957684 5152 if (rc && mode != 2)
9399627f 5153 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
5154 else if (rc && mode == 2)
5155 rc = lpfc_sli_config_port(phba, 3);
ed957684 5156 if (rc)
dea3101e 5157 goto lpfc_sli_hba_setup_error;
5158
0d878419
JS
5159 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5160 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5161 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5162 if (!rc) {
5163 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5164 "2709 This device supports "
5165 "Advanced Error Reporting (AER)\n");
5166 spin_lock_irq(&phba->hbalock);
5167 phba->hba_flag |= HBA_AER_ENABLED;
5168 spin_unlock_irq(&phba->hbalock);
5169 } else {
5170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5171 "2708 This device does not support "
b069d7eb
JS
5172 "Advanced Error Reporting (AER): %d\n",
5173 rc);
0d878419
JS
5174 phba->cfg_aer_support = 0;
5175 }
5176 }
5177
ed957684
JS
5178 if (phba->sli_rev == 3) {
5179 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5180 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
5181 } else {
5182 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5183 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 5184 phba->sli3_options = 0;
ed957684
JS
5185 }
5186
5187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
5188 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5189 phba->sli_rev, phba->max_vpi);
ed957684 5190 rc = lpfc_sli_ring_map(phba);
dea3101e 5191
5192 if (rc)
5193 goto lpfc_sli_hba_setup_error;
5194
6d368e53
JS
5195 /* Initialize VPIs. */
5196 if (phba->sli_rev == LPFC_SLI_REV3) {
5197 /*
5198 * The VPI bitmask and physical ID array are allocated
5199 * and initialized once only - at driver load. A port
5200 * reset doesn't need to reinitialize this memory.
5201 */
5202 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5203 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
6396bb22
KC
5204 phba->vpi_bmask = kcalloc(longs,
5205 sizeof(unsigned long),
6d368e53
JS
5206 GFP_KERNEL);
5207 if (!phba->vpi_bmask) {
5208 rc = -ENOMEM;
5209 goto lpfc_sli_hba_setup_error;
5210 }
5211
6396bb22
KC
5212 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5213 sizeof(uint16_t),
5214 GFP_KERNEL);
6d368e53
JS
5215 if (!phba->vpi_ids) {
5216 kfree(phba->vpi_bmask);
5217 rc = -ENOMEM;
5218 goto lpfc_sli_hba_setup_error;
5219 }
5220 for (i = 0; i < phba->max_vpi; i++)
5221 phba->vpi_ids[i] = i;
5222 }
5223 }
5224
9399627f 5225 /* Init HBQs */
ed957684
JS
5226 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5227 rc = lpfc_sli_hbq_setup(phba);
5228 if (rc)
5229 goto lpfc_sli_hba_setup_error;
5230 }
04c68496 5231 spin_lock_irq(&phba->hbalock);
dea3101e 5232 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 5233 spin_unlock_irq(&phba->hbalock);
dea3101e 5234
5235 rc = lpfc_config_port_post(phba);
5236 if (rc)
5237 goto lpfc_sli_hba_setup_error;
5238
ed957684
JS
5239 return rc;
5240
92d7f7b0 5241lpfc_sli_hba_setup_error:
2e0fef85 5242 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 5243 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5244 "0445 Firmware initialization failed\n");
dea3101e 5245 return rc;
5246}
5247
e59058c4 5248/**
da0436e9
JS
5249 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5250 * @phba: Pointer to HBA context object.
5251 * @mboxq: mailbox pointer.
5252 * This function issue a dump mailbox command to read config region
5253 * 23 and parse the records in the region and populate driver
5254 * data structure.
e59058c4 5255 **/
da0436e9 5256static int
ff78d8f9 5257lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 5258{
ff78d8f9 5259 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
5260 struct lpfc_dmabuf *mp;
5261 struct lpfc_mqe *mqe;
5262 uint32_t data_length;
5263 int rc;
dea3101e 5264
da0436e9
JS
5265 /* Program the default value of vlan_id and fc_map */
5266 phba->valid_vlan = 0;
5267 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5268 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5269 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 5270
ff78d8f9
JS
5271 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5272 if (!mboxq)
da0436e9
JS
5273 return -ENOMEM;
5274
ff78d8f9
JS
5275 mqe = &mboxq->u.mqe;
5276 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5277 rc = -ENOMEM;
5278 goto out_free_mboxq;
5279 }
5280
3e1f0718 5281 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
5282 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5283
5284 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5285 "(%d):2571 Mailbox cmd x%x Status x%x "
5286 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5287 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5288 "CQ: x%x x%x x%x x%x\n",
5289 mboxq->vport ? mboxq->vport->vpi : 0,
5290 bf_get(lpfc_mqe_command, mqe),
5291 bf_get(lpfc_mqe_status, mqe),
5292 mqe->un.mb_words[0], mqe->un.mb_words[1],
5293 mqe->un.mb_words[2], mqe->un.mb_words[3],
5294 mqe->un.mb_words[4], mqe->un.mb_words[5],
5295 mqe->un.mb_words[6], mqe->un.mb_words[7],
5296 mqe->un.mb_words[8], mqe->un.mb_words[9],
5297 mqe->un.mb_words[10], mqe->un.mb_words[11],
5298 mqe->un.mb_words[12], mqe->un.mb_words[13],
5299 mqe->un.mb_words[14], mqe->un.mb_words[15],
5300 mqe->un.mb_words[16], mqe->un.mb_words[50],
5301 mboxq->mcqe.word0,
5302 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5303 mboxq->mcqe.trailer);
5304
5305 if (rc) {
5306 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5307 kfree(mp);
ff78d8f9
JS
5308 rc = -EIO;
5309 goto out_free_mboxq;
da0436e9
JS
5310 }
5311 data_length = mqe->un.mb_words[5];
a0c87cbd 5312 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5313 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5314 kfree(mp);
ff78d8f9
JS
5315 rc = -EIO;
5316 goto out_free_mboxq;
d11e31dd 5317 }
dea3101e 5318
da0436e9
JS
5319 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5320 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5321 kfree(mp);
ff78d8f9
JS
5322 rc = 0;
5323
5324out_free_mboxq:
5325 mempool_free(mboxq, phba->mbox_mem_pool);
5326 return rc;
da0436e9 5327}
e59058c4
JS
5328
5329/**
da0436e9
JS
5330 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5331 * @phba: pointer to lpfc hba data structure.
5332 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5333 * @vpd: pointer to the memory to hold resulting port vpd data.
5334 * @vpd_size: On input, the number of bytes allocated to @vpd.
5335 * On output, the number of data bytes in @vpd.
e59058c4 5336 *
da0436e9
JS
5337 * This routine executes a READ_REV SLI4 mailbox command. In
5338 * addition, this routine gets the port vpd data.
5339 *
5340 * Return codes
af901ca1 5341 * 0 - successful
d439d286 5342 * -ENOMEM - could not allocated memory.
e59058c4 5343 **/
da0436e9
JS
5344static int
5345lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5346 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5347{
da0436e9
JS
5348 int rc = 0;
5349 uint32_t dma_size;
5350 struct lpfc_dmabuf *dmabuf;
5351 struct lpfc_mqe *mqe;
dea3101e 5352
da0436e9
JS
5353 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5354 if (!dmabuf)
5355 return -ENOMEM;
5356
5357 /*
5358 * Get a DMA buffer for the vpd data resulting from the READ_REV
5359 * mailbox command.
a257bf90 5360 */
da0436e9 5361 dma_size = *vpd_size;
1aee383d
JP
5362 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5363 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5364 if (!dmabuf->virt) {
5365 kfree(dmabuf);
5366 return -ENOMEM;
a257bf90
JS
5367 }
5368
da0436e9
JS
5369 /*
5370 * The SLI4 implementation of READ_REV conflicts at word1,
5371 * bits 31:16 and SLI4 adds vpd functionality not present
5372 * in SLI3. This code corrects the conflicts.
1dcb58e5 5373 */
da0436e9
JS
5374 lpfc_read_rev(phba, mboxq);
5375 mqe = &mboxq->u.mqe;
5376 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5377 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5378 mqe->un.read_rev.word1 &= 0x0000FFFF;
5379 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5380 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5381
5382 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5383 if (rc) {
5384 dma_free_coherent(&phba->pcidev->dev, dma_size,
5385 dmabuf->virt, dmabuf->phys);
def9c7a9 5386 kfree(dmabuf);
da0436e9
JS
5387 return -EIO;
5388 }
1dcb58e5 5389
da0436e9
JS
5390 /*
5391 * The available vpd length cannot be bigger than the
5392 * DMA buffer passed to the port. Catch the less than
5393 * case and update the caller's size.
5394 */
5395 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5396 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5397
d7c47992
JS
5398 memcpy(vpd, dmabuf->virt, *vpd_size);
5399
da0436e9
JS
5400 dma_free_coherent(&phba->pcidev->dev, dma_size,
5401 dmabuf->virt, dmabuf->phys);
5402 kfree(dmabuf);
5403 return 0;
dea3101e 5404}
5405
cd1c8301
JS
5406/**
5407 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5408 * @phba: pointer to lpfc hba data structure.
5409 *
5410 * This routine retrieves SLI4 device physical port name this PCI function
5411 * is attached to.
5412 *
5413 * Return codes
4907cb7b 5414 * 0 - successful
cd1c8301
JS
5415 * otherwise - failed to retrieve physical port name
5416 **/
5417static int
5418lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5419{
5420 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5421 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5422 struct lpfc_controller_attribute *cntl_attr;
5423 struct lpfc_mbx_get_port_name *get_port_name;
5424 void *virtaddr = NULL;
5425 uint32_t alloclen, reqlen;
5426 uint32_t shdr_status, shdr_add_status;
5427 union lpfc_sli4_cfg_shdr *shdr;
5428 char cport_name = 0;
5429 int rc;
5430
5431 /* We assume nothing at this point */
5432 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5433 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5434
5435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5436 if (!mboxq)
5437 return -ENOMEM;
cd1c8301 5438 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
5439 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5440 lpfc_sli4_read_config(phba);
5441 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5442 goto retrieve_ppname;
cd1c8301
JS
5443
5444 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5445 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5446 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5447 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5448 LPFC_SLI4_MBX_NEMBED);
5449 if (alloclen < reqlen) {
5450 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5451 "3084 Allocated DMA memory size (%d) is "
5452 "less than the requested DMA memory size "
5453 "(%d)\n", alloclen, reqlen);
5454 rc = -ENOMEM;
5455 goto out_free_mboxq;
5456 }
5457 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5458 virtaddr = mboxq->sge_array->addr[0];
5459 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5460 shdr = &mbx_cntl_attr->cfg_shdr;
5461 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5462 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5463 if (shdr_status || shdr_add_status || rc) {
5464 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5465 "3085 Mailbox x%x (x%x/x%x) failed, "
5466 "rc:x%x, status:x%x, add_status:x%x\n",
5467 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5468 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5469 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5470 rc, shdr_status, shdr_add_status);
5471 rc = -ENXIO;
5472 goto out_free_mboxq;
5473 }
5474 cntl_attr = &mbx_cntl_attr->cntl_attr;
5475 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5476 phba->sli4_hba.lnk_info.lnk_tp =
5477 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5478 phba->sli4_hba.lnk_info.lnk_no =
5479 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5480 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5481 "3086 lnk_type:%d, lnk_numb:%d\n",
5482 phba->sli4_hba.lnk_info.lnk_tp,
5483 phba->sli4_hba.lnk_info.lnk_no);
5484
5485retrieve_ppname:
5486 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5487 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5488 sizeof(struct lpfc_mbx_get_port_name) -
5489 sizeof(struct lpfc_sli4_cfg_mhdr),
5490 LPFC_SLI4_MBX_EMBED);
5491 get_port_name = &mboxq->u.mqe.un.get_port_name;
5492 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5493 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5494 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5495 phba->sli4_hba.lnk_info.lnk_tp);
5496 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5497 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5498 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5499 if (shdr_status || shdr_add_status || rc) {
5500 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5501 "3087 Mailbox x%x (x%x/x%x) failed: "
5502 "rc:x%x, status:x%x, add_status:x%x\n",
5503 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5504 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5505 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5506 rc, shdr_status, shdr_add_status);
5507 rc = -ENXIO;
5508 goto out_free_mboxq;
5509 }
5510 switch (phba->sli4_hba.lnk_info.lnk_no) {
5511 case LPFC_LINK_NUMBER_0:
5512 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5513 &get_port_name->u.response);
5514 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5515 break;
5516 case LPFC_LINK_NUMBER_1:
5517 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5518 &get_port_name->u.response);
5519 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5520 break;
5521 case LPFC_LINK_NUMBER_2:
5522 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5523 &get_port_name->u.response);
5524 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5525 break;
5526 case LPFC_LINK_NUMBER_3:
5527 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5528 &get_port_name->u.response);
5529 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5530 break;
5531 default:
5532 break;
5533 }
5534
5535 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5536 phba->Port[0] = cport_name;
5537 phba->Port[1] = '\0';
5538 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5539 "3091 SLI get port name: %s\n", phba->Port);
5540 }
5541
5542out_free_mboxq:
5543 if (rc != MBX_TIMEOUT) {
5544 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5545 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5546 else
5547 mempool_free(mboxq, phba->mbox_mem_pool);
5548 }
5549 return rc;
5550}
5551
e59058c4 5552/**
da0436e9
JS
5553 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5554 * @phba: pointer to lpfc hba data structure.
e59058c4 5555 *
da0436e9
JS
5556 * This routine is called to explicitly arm the SLI4 device's completion and
5557 * event queues
5558 **/
5559static void
5560lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5561{
895427bd 5562 int qidx;
b71413dd 5563 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
da0436e9 5564
b71413dd
JS
5565 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5566 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5567 if (sli4_hba->nvmels_cq)
5568 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
895427bd
JS
5569 LPFC_QUEUE_REARM);
5570
b71413dd 5571 if (sli4_hba->fcp_cq)
895427bd 5572 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
b71413dd 5573 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
895427bd
JS
5574 LPFC_QUEUE_REARM);
5575
b71413dd 5576 if (sli4_hba->nvme_cq)
895427bd 5577 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
b71413dd 5578 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
895427bd 5579 LPFC_QUEUE_REARM);
1ba981fd 5580
f38fa0bb 5581 if (phba->cfg_fof)
b71413dd 5582 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
1ba981fd 5583
b71413dd 5584 if (sli4_hba->hba_eq)
895427bd 5585 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
b71413dd
JS
5586 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5587 LPFC_QUEUE_REARM);
1ba981fd 5588
2d7dbc4c
JS
5589 if (phba->nvmet_support) {
5590 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
b71413dd
JS
5591 sli4_hba->sli4_cq_release(
5592 sli4_hba->nvmet_cqset[qidx],
2d7dbc4c
JS
5593 LPFC_QUEUE_REARM);
5594 }
2e90f4b5 5595 }
1ba981fd
JS
5596
5597 if (phba->cfg_fof)
b71413dd 5598 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
da0436e9
JS
5599}
5600
6d368e53
JS
5601/**
5602 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5603 * @phba: Pointer to HBA context object.
5604 * @type: The resource extent type.
b76f2dc9
JS
5605 * @extnt_count: buffer to hold port available extent count.
5606 * @extnt_size: buffer to hold element count per extent.
6d368e53 5607 *
b76f2dc9
JS
5608 * This function calls the port and retrievs the number of available
5609 * extents and their size for a particular extent type.
5610 *
5611 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5612 **/
b76f2dc9 5613int
6d368e53
JS
5614lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5615 uint16_t *extnt_count, uint16_t *extnt_size)
5616{
5617 int rc = 0;
5618 uint32_t length;
5619 uint32_t mbox_tmo;
5620 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5621 LPFC_MBOXQ_t *mbox;
5622
5623 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5624 if (!mbox)
5625 return -ENOMEM;
5626
5627 /* Find out how many extents are available for this resource type */
5628 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5629 sizeof(struct lpfc_sli4_cfg_mhdr));
5630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5631 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5632 length, LPFC_SLI4_MBX_EMBED);
5633
5634 /* Send an extents count of 0 - the GET doesn't use it. */
5635 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5636 LPFC_SLI4_MBX_EMBED);
5637 if (unlikely(rc)) {
5638 rc = -EIO;
5639 goto err_exit;
5640 }
5641
5642 if (!phba->sli4_hba.intr_enable)
5643 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5644 else {
a183a15f 5645 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5646 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5647 }
5648 if (unlikely(rc)) {
5649 rc = -EIO;
5650 goto err_exit;
5651 }
5652
5653 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5654 if (bf_get(lpfc_mbox_hdr_status,
5655 &rsrc_info->header.cfg_shdr.response)) {
5656 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5657 "2930 Failed to get resource extents "
5658 "Status 0x%x Add'l Status 0x%x\n",
5659 bf_get(lpfc_mbox_hdr_status,
5660 &rsrc_info->header.cfg_shdr.response),
5661 bf_get(lpfc_mbox_hdr_add_status,
5662 &rsrc_info->header.cfg_shdr.response));
5663 rc = -EIO;
5664 goto err_exit;
5665 }
5666
5667 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5668 &rsrc_info->u.rsp);
5669 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5670 &rsrc_info->u.rsp);
8a9d2e80
JS
5671
5672 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5673 "3162 Retrieved extents type-%d from port: count:%d, "
5674 "size:%d\n", type, *extnt_count, *extnt_size);
5675
5676err_exit:
6d368e53
JS
5677 mempool_free(mbox, phba->mbox_mem_pool);
5678 return rc;
5679}
5680
5681/**
5682 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5683 * @phba: Pointer to HBA context object.
5684 * @type: The extent type to check.
5685 *
5686 * This function reads the current available extents from the port and checks
5687 * if the extent count or extent size has changed since the last access.
5688 * Callers use this routine post port reset to understand if there is a
5689 * extent reprovisioning requirement.
5690 *
5691 * Returns:
5692 * -Error: error indicates problem.
5693 * 1: Extent count or size has changed.
5694 * 0: No changes.
5695 **/
5696static int
5697lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5698{
5699 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5700 uint16_t size_diff, rsrc_ext_size;
5701 int rc = 0;
5702 struct lpfc_rsrc_blks *rsrc_entry;
5703 struct list_head *rsrc_blk_list = NULL;
5704
5705 size_diff = 0;
5706 curr_ext_cnt = 0;
5707 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5708 &rsrc_ext_cnt,
5709 &rsrc_ext_size);
5710 if (unlikely(rc))
5711 return -EIO;
5712
5713 switch (type) {
5714 case LPFC_RSC_TYPE_FCOE_RPI:
5715 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5716 break;
5717 case LPFC_RSC_TYPE_FCOE_VPI:
5718 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5719 break;
5720 case LPFC_RSC_TYPE_FCOE_XRI:
5721 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5722 break;
5723 case LPFC_RSC_TYPE_FCOE_VFI:
5724 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5725 break;
5726 default:
5727 break;
5728 }
5729
5730 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5731 curr_ext_cnt++;
5732 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5733 size_diff++;
5734 }
5735
5736 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5737 rc = 1;
5738
5739 return rc;
5740}
5741
5742/**
5743 * lpfc_sli4_cfg_post_extnts -
5744 * @phba: Pointer to HBA context object.
5745 * @extnt_cnt - number of available extents.
5746 * @type - the extent type (rpi, xri, vfi, vpi).
5747 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5748 * @mbox - pointer to the caller's allocated mailbox structure.
5749 *
5750 * This function executes the extents allocation request. It also
5751 * takes care of the amount of memory needed to allocate or get the
5752 * allocated extents. It is the caller's responsibility to evaluate
5753 * the response.
5754 *
5755 * Returns:
5756 * -Error: Error value describes the condition found.
5757 * 0: if successful
5758 **/
5759static int
8a9d2e80 5760lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5761 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5762{
5763 int rc = 0;
5764 uint32_t req_len;
5765 uint32_t emb_len;
5766 uint32_t alloc_len, mbox_tmo;
5767
5768 /* Calculate the total requested length of the dma memory */
8a9d2e80 5769 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5770
5771 /*
5772 * Calculate the size of an embedded mailbox. The uint32_t
5773 * accounts for extents-specific word.
5774 */
5775 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5776 sizeof(uint32_t);
5777
5778 /*
5779 * Presume the allocation and response will fit into an embedded
5780 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5781 */
5782 *emb = LPFC_SLI4_MBX_EMBED;
5783 if (req_len > emb_len) {
8a9d2e80 5784 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5785 sizeof(union lpfc_sli4_cfg_shdr) +
5786 sizeof(uint32_t);
5787 *emb = LPFC_SLI4_MBX_NEMBED;
5788 }
5789
5790 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5791 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5792 req_len, *emb);
5793 if (alloc_len < req_len) {
5794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5795 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5796 "less than the requested DMA memory "
5797 "size (x%x)\n", alloc_len, req_len);
5798 return -ENOMEM;
5799 }
8a9d2e80 5800 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5801 if (unlikely(rc))
5802 return -EIO;
5803
5804 if (!phba->sli4_hba.intr_enable)
5805 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5806 else {
a183a15f 5807 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5808 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5809 }
5810
5811 if (unlikely(rc))
5812 rc = -EIO;
5813 return rc;
5814}
5815
5816/**
5817 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5818 * @phba: Pointer to HBA context object.
5819 * @type: The resource extent type to allocate.
5820 *
5821 * This function allocates the number of elements for the specified
5822 * resource type.
5823 **/
5824static int
5825lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5826{
5827 bool emb = false;
5828 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5829 uint16_t rsrc_id, rsrc_start, j, k;
5830 uint16_t *ids;
5831 int i, rc;
5832 unsigned long longs;
5833 unsigned long *bmask;
5834 struct lpfc_rsrc_blks *rsrc_blks;
5835 LPFC_MBOXQ_t *mbox;
5836 uint32_t length;
5837 struct lpfc_id_range *id_array = NULL;
5838 void *virtaddr = NULL;
5839 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5840 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5841 struct list_head *ext_blk_list;
5842
5843 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5844 &rsrc_cnt,
5845 &rsrc_size);
5846 if (unlikely(rc))
5847 return -EIO;
5848
5849 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5850 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5851 "3009 No available Resource Extents "
5852 "for resource type 0x%x: Count: 0x%x, "
5853 "Size 0x%x\n", type, rsrc_cnt,
5854 rsrc_size);
5855 return -ENOMEM;
5856 }
5857
8a9d2e80
JS
5858 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5859 "2903 Post resource extents type-0x%x: "
5860 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5861
5862 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863 if (!mbox)
5864 return -ENOMEM;
5865
8a9d2e80 5866 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5867 if (unlikely(rc)) {
5868 rc = -EIO;
5869 goto err_exit;
5870 }
5871
5872 /*
5873 * Figure out where the response is located. Then get local pointers
5874 * to the response data. The port does not guarantee to respond to
5875 * all extents counts request so update the local variable with the
5876 * allocated count from the port.
5877 */
5878 if (emb == LPFC_SLI4_MBX_EMBED) {
5879 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5880 id_array = &rsrc_ext->u.rsp.id[0];
5881 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5882 } else {
5883 virtaddr = mbox->sge_array->addr[0];
5884 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5885 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5886 id_array = &n_rsrc->id;
5887 }
5888
5889 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5890 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5891
5892 /*
5893 * Based on the resource size and count, correct the base and max
5894 * resource values.
5895 */
5896 length = sizeof(struct lpfc_rsrc_blks);
5897 switch (type) {
5898 case LPFC_RSC_TYPE_FCOE_RPI:
6396bb22 5899 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
5900 sizeof(unsigned long),
5901 GFP_KERNEL);
5902 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5903 rc = -ENOMEM;
5904 goto err_exit;
5905 }
6396bb22 5906 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5907 sizeof(uint16_t),
5908 GFP_KERNEL);
5909 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5910 kfree(phba->sli4_hba.rpi_bmask);
5911 rc = -ENOMEM;
5912 goto err_exit;
5913 }
5914
5915 /*
5916 * The next_rpi was initialized with the maximum available
5917 * count but the port may allocate a smaller number. Catch
5918 * that case and update the next_rpi.
5919 */
5920 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5921
5922 /* Initialize local ptrs for common extent processing later. */
5923 bmask = phba->sli4_hba.rpi_bmask;
5924 ids = phba->sli4_hba.rpi_ids;
5925 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5926 break;
5927 case LPFC_RSC_TYPE_FCOE_VPI:
6396bb22 5928 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
5929 GFP_KERNEL);
5930 if (unlikely(!phba->vpi_bmask)) {
5931 rc = -ENOMEM;
5932 goto err_exit;
5933 }
6396bb22 5934 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6d368e53
JS
5935 GFP_KERNEL);
5936 if (unlikely(!phba->vpi_ids)) {
5937 kfree(phba->vpi_bmask);
5938 rc = -ENOMEM;
5939 goto err_exit;
5940 }
5941
5942 /* Initialize local ptrs for common extent processing later. */
5943 bmask = phba->vpi_bmask;
5944 ids = phba->vpi_ids;
5945 ext_blk_list = &phba->lpfc_vpi_blk_list;
5946 break;
5947 case LPFC_RSC_TYPE_FCOE_XRI:
6396bb22 5948 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
5949 sizeof(unsigned long),
5950 GFP_KERNEL);
5951 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5952 rc = -ENOMEM;
5953 goto err_exit;
5954 }
8a9d2e80 5955 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 5956 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5957 sizeof(uint16_t),
5958 GFP_KERNEL);
5959 if (unlikely(!phba->sli4_hba.xri_ids)) {
5960 kfree(phba->sli4_hba.xri_bmask);
5961 rc = -ENOMEM;
5962 goto err_exit;
5963 }
5964
5965 /* Initialize local ptrs for common extent processing later. */
5966 bmask = phba->sli4_hba.xri_bmask;
5967 ids = phba->sli4_hba.xri_ids;
5968 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5969 break;
5970 case LPFC_RSC_TYPE_FCOE_VFI:
6396bb22 5971 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
5972 sizeof(unsigned long),
5973 GFP_KERNEL);
5974 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5975 rc = -ENOMEM;
5976 goto err_exit;
5977 }
6396bb22 5978 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5979 sizeof(uint16_t),
5980 GFP_KERNEL);
5981 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5982 kfree(phba->sli4_hba.vfi_bmask);
5983 rc = -ENOMEM;
5984 goto err_exit;
5985 }
5986
5987 /* Initialize local ptrs for common extent processing later. */
5988 bmask = phba->sli4_hba.vfi_bmask;
5989 ids = phba->sli4_hba.vfi_ids;
5990 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5991 break;
5992 default:
5993 /* Unsupported Opcode. Fail call. */
5994 id_array = NULL;
5995 bmask = NULL;
5996 ids = NULL;
5997 ext_blk_list = NULL;
5998 goto err_exit;
5999 }
6000
6001 /*
6002 * Complete initializing the extent configuration with the
6003 * allocated ids assigned to this function. The bitmask serves
6004 * as an index into the array and manages the available ids. The
6005 * array just stores the ids communicated to the port via the wqes.
6006 */
6007 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6008 if ((i % 2) == 0)
6009 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6010 &id_array[k]);
6011 else
6012 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6013 &id_array[k]);
6014
6015 rsrc_blks = kzalloc(length, GFP_KERNEL);
6016 if (unlikely(!rsrc_blks)) {
6017 rc = -ENOMEM;
6018 kfree(bmask);
6019 kfree(ids);
6020 goto err_exit;
6021 }
6022 rsrc_blks->rsrc_start = rsrc_id;
6023 rsrc_blks->rsrc_size = rsrc_size;
6024 list_add_tail(&rsrc_blks->list, ext_blk_list);
6025 rsrc_start = rsrc_id;
895427bd 6026 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6d368e53 6027 phba->sli4_hba.scsi_xri_start = rsrc_start +
895427bd
JS
6028 lpfc_sli4_get_iocb_cnt(phba);
6029 phba->sli4_hba.nvme_xri_start =
6030 phba->sli4_hba.scsi_xri_start +
6031 phba->sli4_hba.scsi_xri_max;
6032 }
6d368e53
JS
6033
6034 while (rsrc_id < (rsrc_start + rsrc_size)) {
6035 ids[j] = rsrc_id;
6036 rsrc_id++;
6037 j++;
6038 }
6039 /* Entire word processed. Get next word.*/
6040 if ((i % 2) == 1)
6041 k++;
6042 }
6043 err_exit:
6044 lpfc_sli4_mbox_cmd_free(phba, mbox);
6045 return rc;
6046}
6047
895427bd
JS
6048
6049
6d368e53
JS
6050/**
6051 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6052 * @phba: Pointer to HBA context object.
6053 * @type: the extent's type.
6054 *
6055 * This function deallocates all extents of a particular resource type.
6056 * SLI4 does not allow for deallocating a particular extent range. It
6057 * is the caller's responsibility to release all kernel memory resources.
6058 **/
6059static int
6060lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6061{
6062 int rc;
6063 uint32_t length, mbox_tmo = 0;
6064 LPFC_MBOXQ_t *mbox;
6065 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6066 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6067
6068 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6069 if (!mbox)
6070 return -ENOMEM;
6071
6072 /*
6073 * This function sends an embedded mailbox because it only sends the
6074 * the resource type. All extents of this type are released by the
6075 * port.
6076 */
6077 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6078 sizeof(struct lpfc_sli4_cfg_mhdr));
6079 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6080 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6081 length, LPFC_SLI4_MBX_EMBED);
6082
6083 /* Send an extents count of 0 - the dealloc doesn't use it. */
6084 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6085 LPFC_SLI4_MBX_EMBED);
6086 if (unlikely(rc)) {
6087 rc = -EIO;
6088 goto out_free_mbox;
6089 }
6090 if (!phba->sli4_hba.intr_enable)
6091 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6092 else {
a183a15f 6093 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
6094 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6095 }
6096 if (unlikely(rc)) {
6097 rc = -EIO;
6098 goto out_free_mbox;
6099 }
6100
6101 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6102 if (bf_get(lpfc_mbox_hdr_status,
6103 &dealloc_rsrc->header.cfg_shdr.response)) {
6104 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6105 "2919 Failed to release resource extents "
6106 "for type %d - Status 0x%x Add'l Status 0x%x. "
6107 "Resource memory not released.\n",
6108 type,
6109 bf_get(lpfc_mbox_hdr_status,
6110 &dealloc_rsrc->header.cfg_shdr.response),
6111 bf_get(lpfc_mbox_hdr_add_status,
6112 &dealloc_rsrc->header.cfg_shdr.response));
6113 rc = -EIO;
6114 goto out_free_mbox;
6115 }
6116
6117 /* Release kernel memory resources for the specific type. */
6118 switch (type) {
6119 case LPFC_RSC_TYPE_FCOE_VPI:
6120 kfree(phba->vpi_bmask);
6121 kfree(phba->vpi_ids);
6122 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6123 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6124 &phba->lpfc_vpi_blk_list, list) {
6125 list_del_init(&rsrc_blk->list);
6126 kfree(rsrc_blk);
6127 }
16a3a208 6128 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6129 break;
6130 case LPFC_RSC_TYPE_FCOE_XRI:
6131 kfree(phba->sli4_hba.xri_bmask);
6132 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6133 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6134 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6135 list_del_init(&rsrc_blk->list);
6136 kfree(rsrc_blk);
6137 }
6138 break;
6139 case LPFC_RSC_TYPE_FCOE_VFI:
6140 kfree(phba->sli4_hba.vfi_bmask);
6141 kfree(phba->sli4_hba.vfi_ids);
6142 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6143 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6144 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6145 list_del_init(&rsrc_blk->list);
6146 kfree(rsrc_blk);
6147 }
6148 break;
6149 case LPFC_RSC_TYPE_FCOE_RPI:
6150 /* RPI bitmask and physical id array are cleaned up earlier. */
6151 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6152 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6153 list_del_init(&rsrc_blk->list);
6154 kfree(rsrc_blk);
6155 }
6156 break;
6157 default:
6158 break;
6159 }
6160
6161 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6162
6163 out_free_mbox:
6164 mempool_free(mbox, phba->mbox_mem_pool);
6165 return rc;
6166}
6167
bd4b3e5c 6168static void
7bdedb34
JS
6169lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6170 uint32_t feature)
65791f1f 6171{
65791f1f 6172 uint32_t len;
65791f1f 6173
65791f1f
JS
6174 len = sizeof(struct lpfc_mbx_set_feature) -
6175 sizeof(struct lpfc_sli4_cfg_mhdr);
6176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6177 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6178 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
6179
6180 switch (feature) {
6181 case LPFC_SET_UE_RECOVERY:
6182 bf_set(lpfc_mbx_set_feature_UER,
6183 &mbox->u.mqe.un.set_feature, 1);
6184 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6185 mbox->u.mqe.un.set_feature.param_len = 8;
6186 break;
6187 case LPFC_SET_MDS_DIAGS:
6188 bf_set(lpfc_mbx_set_feature_mds,
6189 &mbox->u.mqe.un.set_feature, 1);
6190 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
ae9e28f3 6191 &mbox->u.mqe.un.set_feature, 1);
7bdedb34
JS
6192 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6193 mbox->u.mqe.un.set_feature.param_len = 8;
6194 break;
65791f1f 6195 }
7bdedb34
JS
6196
6197 return;
65791f1f
JS
6198}
6199
1165a5c2
JS
6200/**
6201 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6202 * @phba: Pointer to HBA context object.
6203 *
6204 * Disable FW logging into host memory on the adapter. To
6205 * be done before reading logs from the host memory.
6206 **/
6207void
6208lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6209{
6210 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6211
6212 ras_fwlog->ras_active = false;
6213
6214 /* Disable FW logging to host memory */
6215 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6216 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6217}
6218
d2cc9bcd
JS
6219/**
6220 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6221 * @phba: Pointer to HBA context object.
6222 *
6223 * This function is called to free memory allocated for RAS FW logging
6224 * support in the driver.
6225 **/
6226void
6227lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6228{
6229 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6230 struct lpfc_dmabuf *dmabuf, *next;
6231
6232 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6233 list_for_each_entry_safe(dmabuf, next,
6234 &ras_fwlog->fwlog_buff_list,
6235 list) {
6236 list_del(&dmabuf->list);
6237 dma_free_coherent(&phba->pcidev->dev,
6238 LPFC_RAS_MAX_ENTRY_SIZE,
6239 dmabuf->virt, dmabuf->phys);
6240 kfree(dmabuf);
6241 }
6242 }
6243
6244 if (ras_fwlog->lwpd.virt) {
6245 dma_free_coherent(&phba->pcidev->dev,
6246 sizeof(uint32_t) * 2,
6247 ras_fwlog->lwpd.virt,
6248 ras_fwlog->lwpd.phys);
6249 ras_fwlog->lwpd.virt = NULL;
6250 }
6251
6252 ras_fwlog->ras_active = false;
6253}
6254
6255/**
6256 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6257 * @phba: Pointer to HBA context object.
6258 * @fwlog_buff_count: Count of buffers to be created.
6259 *
6260 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6261 * to update FW log is posted to the adapter.
6262 * Buffer count is calculated based on module param ras_fwlog_buffsize
6263 * Size of each buffer posted to FW is 64K.
6264 **/
6265
6266static int
6267lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6268 uint32_t fwlog_buff_count)
6269{
6270 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6271 struct lpfc_dmabuf *dmabuf;
6272 int rc = 0, i = 0;
6273
6274 /* Initialize List */
6275 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6276
6277 /* Allocate memory for the LWPD */
6278 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6279 sizeof(uint32_t) * 2,
6280 &ras_fwlog->lwpd.phys,
6281 GFP_KERNEL);
6282 if (!ras_fwlog->lwpd.virt) {
cb34990b 6283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6284 "6185 LWPD Memory Alloc Failed\n");
6285
6286 return -ENOMEM;
6287 }
6288
6289 ras_fwlog->fw_buffcount = fwlog_buff_count;
6290 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6291 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6292 GFP_KERNEL);
6293 if (!dmabuf) {
6294 rc = -ENOMEM;
6295 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6296 "6186 Memory Alloc failed FW logging");
6297 goto free_mem;
6298 }
6299
359d0ac1 6300 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
d2cc9bcd
JS
6301 LPFC_RAS_MAX_ENTRY_SIZE,
6302 &dmabuf->phys,
6303 GFP_KERNEL);
6304 if (!dmabuf->virt) {
6305 kfree(dmabuf);
6306 rc = -ENOMEM;
6307 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6308 "6187 DMA Alloc Failed FW logging");
6309 goto free_mem;
6310 }
d2cc9bcd
JS
6311 dmabuf->buffer_tag = i;
6312 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6313 }
6314
6315free_mem:
6316 if (rc)
6317 lpfc_sli4_ras_dma_free(phba);
6318
6319 return rc;
6320}
6321
6322/**
6323 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6324 * @phba: pointer to lpfc hba data structure.
6325 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6326 *
6327 * Completion handler for driver's RAS MBX command to the device.
6328 **/
6329static void
6330lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6331{
6332 MAILBOX_t *mb;
6333 union lpfc_sli4_cfg_shdr *shdr;
6334 uint32_t shdr_status, shdr_add_status;
6335 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6336
6337 mb = &pmb->u.mb;
6338
6339 shdr = (union lpfc_sli4_cfg_shdr *)
6340 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6341 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6342 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6343
6344 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
cb34990b 6345 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
d2cc9bcd
JS
6346 "6188 FW LOG mailbox "
6347 "completed with status x%x add_status x%x,"
6348 " mbx status x%x\n",
6349 shdr_status, shdr_add_status, mb->mbxStatus);
cb34990b
JS
6350
6351 ras_fwlog->ras_hwsupport = false;
d2cc9bcd
JS
6352 goto disable_ras;
6353 }
6354
6355 ras_fwlog->ras_active = true;
6356 mempool_free(pmb, phba->mbox_mem_pool);
6357
6358 return;
6359
6360disable_ras:
6361 /* Free RAS DMA memory */
6362 lpfc_sli4_ras_dma_free(phba);
6363 mempool_free(pmb, phba->mbox_mem_pool);
6364}
6365
6366/**
6367 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6368 * @phba: pointer to lpfc hba data structure.
6369 * @fwlog_level: Logging verbosity level.
6370 * @fwlog_enable: Enable/Disable logging.
6371 *
6372 * Initialize memory and post mailbox command to enable FW logging in host
6373 * memory.
6374 **/
6375int
6376lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6377 uint32_t fwlog_level,
6378 uint32_t fwlog_enable)
6379{
6380 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6381 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6382 struct lpfc_dmabuf *dmabuf;
6383 LPFC_MBOXQ_t *mbox;
6384 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6385 int rc = 0;
6386
6387 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6388 phba->cfg_ras_fwlog_buffsize);
6389 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6390
6391 /*
6392 * If re-enabling FW logging support use earlier allocated
6393 * DMA buffers while posting MBX command.
6394 **/
6395 if (!ras_fwlog->lwpd.virt) {
6396 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6397 if (rc) {
6398 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
cb34990b 6399 "6189 FW Log Memory Allocation Failed");
d2cc9bcd
JS
6400 return rc;
6401 }
6402 }
6403
6404 /* Setup Mailbox command */
6405 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6406 if (!mbox) {
cb34990b 6407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6408 "6190 RAS MBX Alloc Failed");
6409 rc = -ENOMEM;
6410 goto mem_free;
6411 }
6412
6413 ras_fwlog->fw_loglevel = fwlog_level;
6414 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6415 sizeof(struct lpfc_sli4_cfg_mhdr));
6416
6417 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6418 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6419 len, LPFC_SLI4_MBX_EMBED);
6420
6421 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6422 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6423 fwlog_enable);
6424 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6425 ras_fwlog->fw_loglevel);
6426 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6427 ras_fwlog->fw_buffcount);
6428 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6429 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6430
6431 /* Update DMA buffer address */
6432 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6433 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6434
6435 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6436 putPaddrLow(dmabuf->phys);
6437
6438 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6439 putPaddrHigh(dmabuf->phys);
6440 }
6441
6442 /* Update LPWD address */
6443 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6444 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6445
6446 mbox->vport = phba->pport;
6447 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6448
6449 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6450
6451 if (rc == MBX_NOT_FINISHED) {
cb34990b
JS
6452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6453 "6191 FW-Log Mailbox failed. "
d2cc9bcd
JS
6454 "status %d mbxStatus : x%x", rc,
6455 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6456 mempool_free(mbox, phba->mbox_mem_pool);
6457 rc = -EIO;
6458 goto mem_free;
6459 } else
6460 rc = 0;
6461mem_free:
6462 if (rc)
6463 lpfc_sli4_ras_dma_free(phba);
6464
6465 return rc;
6466}
6467
6468/**
6469 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6470 * @phba: Pointer to HBA context object.
6471 *
6472 * Check if RAS is supported on the adapter and initialize it.
6473 **/
6474void
6475lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6476{
6477 /* Check RAS FW Log needs to be enabled or not */
6478 if (lpfc_check_fwlog_support(phba))
6479 return;
6480
6481 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6482 LPFC_RAS_ENABLE_LOGGING);
6483}
6484
6d368e53
JS
6485/**
6486 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6487 * @phba: Pointer to HBA context object.
6488 *
6489 * This function allocates all SLI4 resource identifiers.
6490 **/
6491int
6492lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6493{
6494 int i, rc, error = 0;
6495 uint16_t count, base;
6496 unsigned long longs;
6497
ff78d8f9
JS
6498 if (!phba->sli4_hba.rpi_hdrs_in_use)
6499 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
6500 if (phba->sli4_hba.extents_in_use) {
6501 /*
6502 * The port supports resource extents. The XRI, VPI, VFI, RPI
6503 * resource extent count must be read and allocated before
6504 * provisioning the resource id arrays.
6505 */
6506 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6507 LPFC_IDX_RSRC_RDY) {
6508 /*
6509 * Extent-based resources are set - the driver could
6510 * be in a port reset. Figure out if any corrective
6511 * actions need to be taken.
6512 */
6513 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6514 LPFC_RSC_TYPE_FCOE_VFI);
6515 if (rc != 0)
6516 error++;
6517 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6518 LPFC_RSC_TYPE_FCOE_VPI);
6519 if (rc != 0)
6520 error++;
6521 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6522 LPFC_RSC_TYPE_FCOE_XRI);
6523 if (rc != 0)
6524 error++;
6525 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6526 LPFC_RSC_TYPE_FCOE_RPI);
6527 if (rc != 0)
6528 error++;
6529
6530 /*
6531 * It's possible that the number of resources
6532 * provided to this port instance changed between
6533 * resets. Detect this condition and reallocate
6534 * resources. Otherwise, there is no action.
6535 */
6536 if (error) {
6537 lpfc_printf_log(phba, KERN_INFO,
6538 LOG_MBOX | LOG_INIT,
6539 "2931 Detected extent resource "
6540 "change. Reallocating all "
6541 "extents.\n");
6542 rc = lpfc_sli4_dealloc_extent(phba,
6543 LPFC_RSC_TYPE_FCOE_VFI);
6544 rc = lpfc_sli4_dealloc_extent(phba,
6545 LPFC_RSC_TYPE_FCOE_VPI);
6546 rc = lpfc_sli4_dealloc_extent(phba,
6547 LPFC_RSC_TYPE_FCOE_XRI);
6548 rc = lpfc_sli4_dealloc_extent(phba,
6549 LPFC_RSC_TYPE_FCOE_RPI);
6550 } else
6551 return 0;
6552 }
6553
6554 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6555 if (unlikely(rc))
6556 goto err_exit;
6557
6558 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6559 if (unlikely(rc))
6560 goto err_exit;
6561
6562 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6563 if (unlikely(rc))
6564 goto err_exit;
6565
6566 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6567 if (unlikely(rc))
6568 goto err_exit;
6569 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6570 LPFC_IDX_RSRC_RDY);
6571 return rc;
6572 } else {
6573 /*
6574 * The port does not support resource extents. The XRI, VPI,
6575 * VFI, RPI resource ids were determined from READ_CONFIG.
6576 * Just allocate the bitmasks and provision the resource id
6577 * arrays. If a port reset is active, the resources don't
6578 * need any action - just exit.
6579 */
6580 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6581 LPFC_IDX_RSRC_RDY) {
6582 lpfc_sli4_dealloc_resource_identifiers(phba);
6583 lpfc_sli4_remove_rpis(phba);
6584 }
6d368e53
JS
6585 /* RPIs. */
6586 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6587 if (count <= 0) {
6588 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6589 "3279 Invalid provisioning of "
6590 "rpi:%d\n", count);
6591 rc = -EINVAL;
6592 goto err_exit;
6593 }
6d368e53
JS
6594 base = phba->sli4_hba.max_cfg_param.rpi_base;
6595 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6596 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6597 sizeof(unsigned long),
6598 GFP_KERNEL);
6599 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6600 rc = -ENOMEM;
6601 goto err_exit;
6602 }
6396bb22 6603 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6604 GFP_KERNEL);
6605 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6606 rc = -ENOMEM;
6607 goto free_rpi_bmask;
6608 }
6609
6610 for (i = 0; i < count; i++)
6611 phba->sli4_hba.rpi_ids[i] = base + i;
6612
6613 /* VPIs. */
6614 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6615 if (count <= 0) {
6616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6617 "3280 Invalid provisioning of "
6618 "vpi:%d\n", count);
6619 rc = -EINVAL;
6620 goto free_rpi_ids;
6621 }
6d368e53
JS
6622 base = phba->sli4_hba.max_cfg_param.vpi_base;
6623 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6624 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6625 GFP_KERNEL);
6626 if (unlikely(!phba->vpi_bmask)) {
6627 rc = -ENOMEM;
6628 goto free_rpi_ids;
6629 }
6396bb22 6630 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6631 GFP_KERNEL);
6632 if (unlikely(!phba->vpi_ids)) {
6633 rc = -ENOMEM;
6634 goto free_vpi_bmask;
6635 }
6636
6637 for (i = 0; i < count; i++)
6638 phba->vpi_ids[i] = base + i;
6639
6640 /* XRIs. */
6641 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6642 if (count <= 0) {
6643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6644 "3281 Invalid provisioning of "
6645 "xri:%d\n", count);
6646 rc = -EINVAL;
6647 goto free_vpi_ids;
6648 }
6d368e53
JS
6649 base = phba->sli4_hba.max_cfg_param.xri_base;
6650 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6651 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6652 sizeof(unsigned long),
6653 GFP_KERNEL);
6654 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6655 rc = -ENOMEM;
6656 goto free_vpi_ids;
6657 }
41899be7 6658 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6659 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6660 GFP_KERNEL);
6661 if (unlikely(!phba->sli4_hba.xri_ids)) {
6662 rc = -ENOMEM;
6663 goto free_xri_bmask;
6664 }
6665
6666 for (i = 0; i < count; i++)
6667 phba->sli4_hba.xri_ids[i] = base + i;
6668
6669 /* VFIs. */
6670 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6671 if (count <= 0) {
6672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6673 "3282 Invalid provisioning of "
6674 "vfi:%d\n", count);
6675 rc = -EINVAL;
6676 goto free_xri_ids;
6677 }
6d368e53
JS
6678 base = phba->sli4_hba.max_cfg_param.vfi_base;
6679 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6680 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6681 sizeof(unsigned long),
6682 GFP_KERNEL);
6683 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6684 rc = -ENOMEM;
6685 goto free_xri_ids;
6686 }
6396bb22 6687 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6688 GFP_KERNEL);
6689 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6690 rc = -ENOMEM;
6691 goto free_vfi_bmask;
6692 }
6693
6694 for (i = 0; i < count; i++)
6695 phba->sli4_hba.vfi_ids[i] = base + i;
6696
6697 /*
6698 * Mark all resources ready. An HBA reset doesn't need
6699 * to reset the initialization.
6700 */
6701 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6702 LPFC_IDX_RSRC_RDY);
6703 return 0;
6704 }
6705
6706 free_vfi_bmask:
6707 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6708 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6709 free_xri_ids:
6710 kfree(phba->sli4_hba.xri_ids);
cd60be49 6711 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6712 free_xri_bmask:
6713 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6714 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6715 free_vpi_ids:
6716 kfree(phba->vpi_ids);
cd60be49 6717 phba->vpi_ids = NULL;
6d368e53
JS
6718 free_vpi_bmask:
6719 kfree(phba->vpi_bmask);
cd60be49 6720 phba->vpi_bmask = NULL;
6d368e53
JS
6721 free_rpi_ids:
6722 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6723 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6724 free_rpi_bmask:
6725 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6726 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6727 err_exit:
6728 return rc;
6729}
6730
6731/**
6732 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6733 * @phba: Pointer to HBA context object.
6734 *
6735 * This function allocates the number of elements for the specified
6736 * resource type.
6737 **/
6738int
6739lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6740{
6741 if (phba->sli4_hba.extents_in_use) {
6742 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6743 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6744 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6745 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6746 } else {
6747 kfree(phba->vpi_bmask);
16a3a208 6748 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6749 kfree(phba->vpi_ids);
6750 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6751 kfree(phba->sli4_hba.xri_bmask);
6752 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6753 kfree(phba->sli4_hba.vfi_bmask);
6754 kfree(phba->sli4_hba.vfi_ids);
6755 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6756 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6757 }
6758
6759 return 0;
6760}
6761
b76f2dc9
JS
6762/**
6763 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6764 * @phba: Pointer to HBA context object.
6765 * @type: The resource extent type.
6766 * @extnt_count: buffer to hold port extent count response
6767 * @extnt_size: buffer to hold port extent size response.
6768 *
6769 * This function calls the port to read the host allocated extents
6770 * for a particular type.
6771 **/
6772int
6773lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6774 uint16_t *extnt_cnt, uint16_t *extnt_size)
6775{
6776 bool emb;
6777 int rc = 0;
6778 uint16_t curr_blks = 0;
6779 uint32_t req_len, emb_len;
6780 uint32_t alloc_len, mbox_tmo;
6781 struct list_head *blk_list_head;
6782 struct lpfc_rsrc_blks *rsrc_blk;
6783 LPFC_MBOXQ_t *mbox;
6784 void *virtaddr = NULL;
6785 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6786 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6787 union lpfc_sli4_cfg_shdr *shdr;
6788
6789 switch (type) {
6790 case LPFC_RSC_TYPE_FCOE_VPI:
6791 blk_list_head = &phba->lpfc_vpi_blk_list;
6792 break;
6793 case LPFC_RSC_TYPE_FCOE_XRI:
6794 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6795 break;
6796 case LPFC_RSC_TYPE_FCOE_VFI:
6797 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6798 break;
6799 case LPFC_RSC_TYPE_FCOE_RPI:
6800 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6801 break;
6802 default:
6803 return -EIO;
6804 }
6805
6806 /* Count the number of extents currently allocatd for this type. */
6807 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6808 if (curr_blks == 0) {
6809 /*
6810 * The GET_ALLOCATED mailbox does not return the size,
6811 * just the count. The size should be just the size
6812 * stored in the current allocated block and all sizes
6813 * for an extent type are the same so set the return
6814 * value now.
6815 */
6816 *extnt_size = rsrc_blk->rsrc_size;
6817 }
6818 curr_blks++;
6819 }
6820
b76f2dc9
JS
6821 /*
6822 * Calculate the size of an embedded mailbox. The uint32_t
6823 * accounts for extents-specific word.
6824 */
6825 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6826 sizeof(uint32_t);
6827
6828 /*
6829 * Presume the allocation and response will fit into an embedded
6830 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6831 */
6832 emb = LPFC_SLI4_MBX_EMBED;
6833 req_len = emb_len;
6834 if (req_len > emb_len) {
6835 req_len = curr_blks * sizeof(uint16_t) +
6836 sizeof(union lpfc_sli4_cfg_shdr) +
6837 sizeof(uint32_t);
6838 emb = LPFC_SLI4_MBX_NEMBED;
6839 }
6840
6841 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6842 if (!mbox)
6843 return -ENOMEM;
6844 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6845
6846 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6847 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6848 req_len, emb);
6849 if (alloc_len < req_len) {
6850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6851 "2983 Allocated DMA memory size (x%x) is "
6852 "less than the requested DMA memory "
6853 "size (x%x)\n", alloc_len, req_len);
6854 rc = -ENOMEM;
6855 goto err_exit;
6856 }
6857 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6858 if (unlikely(rc)) {
6859 rc = -EIO;
6860 goto err_exit;
6861 }
6862
6863 if (!phba->sli4_hba.intr_enable)
6864 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6865 else {
a183a15f 6866 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
6867 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6868 }
6869
6870 if (unlikely(rc)) {
6871 rc = -EIO;
6872 goto err_exit;
6873 }
6874
6875 /*
6876 * Figure out where the response is located. Then get local pointers
6877 * to the response data. The port does not guarantee to respond to
6878 * all extents counts request so update the local variable with the
6879 * allocated count from the port.
6880 */
6881 if (emb == LPFC_SLI4_MBX_EMBED) {
6882 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6883 shdr = &rsrc_ext->header.cfg_shdr;
6884 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6885 } else {
6886 virtaddr = mbox->sge_array->addr[0];
6887 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6888 shdr = &n_rsrc->cfg_shdr;
6889 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6890 }
6891
6892 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6894 "2984 Failed to read allocated resources "
6895 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6896 type,
6897 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6898 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6899 rc = -EIO;
6900 goto err_exit;
6901 }
6902 err_exit:
6903 lpfc_sli4_mbox_cmd_free(phba, mbox);
6904 return rc;
6905}
6906
8a9d2e80 6907/**
0ef69968 6908 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 6909 * @phba: pointer to lpfc hba data structure.
895427bd
JS
6910 * @pring: Pointer to driver SLI ring object.
6911 * @sgl_list: linked link of sgl buffers to post
6912 * @cnt: number of linked list buffers
8a9d2e80 6913 *
895427bd 6914 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
6915 * repost them to the port by using SGL block post. This is needed after a
6916 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
6917 * of buffer sgls which contains contiguous xris and uses the non-embedded
6918 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
6919 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6920 * mailbox command for posting.
6921 *
6922 * Returns: 0 = success, non-zero failure.
6923 **/
6924static int
895427bd
JS
6925lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6926 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
6927{
6928 struct lpfc_sglq *sglq_entry = NULL;
6929 struct lpfc_sglq *sglq_entry_next = NULL;
6930 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
6931 int status, total_cnt;
6932 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
6933 int last_xritag = NO_XRI;
6934 LIST_HEAD(prep_sgl_list);
6935 LIST_HEAD(blck_sgl_list);
6936 LIST_HEAD(allc_sgl_list);
6937 LIST_HEAD(post_sgl_list);
6938 LIST_HEAD(free_sgl_list);
6939
38c20673 6940 spin_lock_irq(&phba->hbalock);
895427bd
JS
6941 spin_lock(&phba->sli4_hba.sgl_list_lock);
6942 list_splice_init(sgl_list, &allc_sgl_list);
6943 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6944 spin_unlock_irq(&phba->hbalock);
8a9d2e80 6945
895427bd 6946 total_cnt = cnt;
8a9d2e80
JS
6947 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6948 &allc_sgl_list, list) {
6949 list_del_init(&sglq_entry->list);
6950 block_cnt++;
6951 if ((last_xritag != NO_XRI) &&
6952 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6953 /* a hole in xri block, form a sgl posting block */
6954 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6955 post_cnt = block_cnt - 1;
6956 /* prepare list for next posting block */
6957 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6958 block_cnt = 1;
6959 } else {
6960 /* prepare list for next posting block */
6961 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6962 /* enough sgls for non-embed sgl mbox command */
6963 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6964 list_splice_init(&prep_sgl_list,
6965 &blck_sgl_list);
6966 post_cnt = block_cnt;
6967 block_cnt = 0;
6968 }
6969 }
6970 num_posted++;
6971
6972 /* keep track of last sgl's xritag */
6973 last_xritag = sglq_entry->sli4_xritag;
6974
895427bd
JS
6975 /* end of repost sgl list condition for buffers */
6976 if (num_posted == total_cnt) {
8a9d2e80
JS
6977 if (post_cnt == 0) {
6978 list_splice_init(&prep_sgl_list,
6979 &blck_sgl_list);
6980 post_cnt = block_cnt;
6981 } else if (block_cnt == 1) {
6982 status = lpfc_sli4_post_sgl(phba,
6983 sglq_entry->phys, 0,
6984 sglq_entry->sli4_xritag);
6985 if (!status) {
6986 /* successful, put sgl to posted list */
6987 list_add_tail(&sglq_entry->list,
6988 &post_sgl_list);
6989 } else {
6990 /* Failure, put sgl to free list */
6991 lpfc_printf_log(phba, KERN_WARNING,
6992 LOG_SLI,
895427bd 6993 "3159 Failed to post "
8a9d2e80
JS
6994 "sgl, xritag:x%x\n",
6995 sglq_entry->sli4_xritag);
6996 list_add_tail(&sglq_entry->list,
6997 &free_sgl_list);
711ea882 6998 total_cnt--;
8a9d2e80
JS
6999 }
7000 }
7001 }
7002
7003 /* continue until a nembed page worth of sgls */
7004 if (post_cnt == 0)
7005 continue;
7006
895427bd
JS
7007 /* post the buffer list sgls as a block */
7008 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7009 post_cnt);
8a9d2e80
JS
7010
7011 if (!status) {
7012 /* success, put sgl list to posted sgl list */
7013 list_splice_init(&blck_sgl_list, &post_sgl_list);
7014 } else {
7015 /* Failure, put sgl list to free sgl list */
7016 sglq_entry_first = list_first_entry(&blck_sgl_list,
7017 struct lpfc_sglq,
7018 list);
7019 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 7020 "3160 Failed to post sgl-list, "
8a9d2e80
JS
7021 "xritag:x%x-x%x\n",
7022 sglq_entry_first->sli4_xritag,
7023 (sglq_entry_first->sli4_xritag +
7024 post_cnt - 1));
7025 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 7026 total_cnt -= post_cnt;
8a9d2e80
JS
7027 }
7028
7029 /* don't reset xirtag due to hole in xri block */
7030 if (block_cnt == 0)
7031 last_xritag = NO_XRI;
7032
895427bd 7033 /* reset sgl post count for next round of posting */
8a9d2e80
JS
7034 post_cnt = 0;
7035 }
7036
895427bd 7037 /* free the sgls failed to post */
8a9d2e80
JS
7038 lpfc_free_sgl_list(phba, &free_sgl_list);
7039
895427bd 7040 /* push sgls posted to the available list */
8a9d2e80 7041 if (!list_empty(&post_sgl_list)) {
38c20673 7042 spin_lock_irq(&phba->hbalock);
895427bd
JS
7043 spin_lock(&phba->sli4_hba.sgl_list_lock);
7044 list_splice_init(&post_sgl_list, sgl_list);
7045 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7046 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
7047 } else {
7048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 7049 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
7050 return -EIO;
7051 }
895427bd
JS
7052
7053 /* return the number of XRIs actually posted */
7054 return total_cnt;
8a9d2e80
JS
7055}
7056
61bda8f7
JS
7057void
7058lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7059{
7060 uint32_t len;
7061
7062 len = sizeof(struct lpfc_mbx_set_host_data) -
7063 sizeof(struct lpfc_sli4_cfg_mhdr);
7064 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7065 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7066 LPFC_SLI4_MBX_EMBED);
7067
7068 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
7069 mbox->u.mqe.un.set_host_data.param_len =
7070 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
7071 snprintf(mbox->u.mqe.un.set_host_data.data,
7072 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7073 "Linux %s v"LPFC_DRIVER_VERSION,
7074 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7075}
7076
a8cf5dfe 7077int
6c621a22 7078lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
a8cf5dfe 7079 struct lpfc_queue *drq, int count, int idx)
6c621a22
JS
7080{
7081 int rc, i;
7082 struct lpfc_rqe hrqe;
7083 struct lpfc_rqe drqe;
7084 struct lpfc_rqb *rqbp;
411de511 7085 unsigned long flags;
6c621a22
JS
7086 struct rqb_dmabuf *rqb_buffer;
7087 LIST_HEAD(rqb_buf_list);
7088
411de511 7089 spin_lock_irqsave(&phba->hbalock, flags);
6c621a22
JS
7090 rqbp = hrq->rqbp;
7091 for (i = 0; i < count; i++) {
7092 /* IF RQ is already full, don't bother */
7093 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7094 break;
7095 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7096 if (!rqb_buffer)
7097 break;
7098 rqb_buffer->hrq = hrq;
7099 rqb_buffer->drq = drq;
a8cf5dfe 7100 rqb_buffer->idx = idx;
6c621a22
JS
7101 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7102 }
7103 while (!list_empty(&rqb_buf_list)) {
7104 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7105 hbuf.list);
7106
7107 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7108 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7109 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7110 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7111 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7112 if (rc < 0) {
411de511
JS
7113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7114 "6421 Cannot post to HRQ %d: %x %x %x "
7115 "DRQ %x %x\n",
7116 hrq->queue_id,
7117 hrq->host_index,
7118 hrq->hba_index,
7119 hrq->entry_count,
7120 drq->host_index,
7121 drq->hba_index);
6c621a22
JS
7122 rqbp->rqb_free_buffer(phba, rqb_buffer);
7123 } else {
7124 list_add_tail(&rqb_buffer->hbuf.list,
7125 &rqbp->rqb_buffer_list);
7126 rqbp->buffer_count++;
7127 }
7128 }
411de511 7129 spin_unlock_irqrestore(&phba->hbalock, flags);
6c621a22
JS
7130 return 1;
7131}
7132
da0436e9 7133/**
183b8021 7134 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
7135 * @phba: Pointer to HBA context object.
7136 *
183b8021
MY
7137 * This function is the main SLI4 device initialization PCI function. This
7138 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
7139 * HBA error attention handler code. Caller is not required to hold any
7140 * locks.
7141 **/
7142int
7143lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7144{
6c621a22 7145 int rc, i, cnt;
da0436e9
JS
7146 LPFC_MBOXQ_t *mboxq;
7147 struct lpfc_mqe *mqe;
7148 uint8_t *vpd;
7149 uint32_t vpd_size;
7150 uint32_t ftr_rsp = 0;
7151 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7152 struct lpfc_vport *vport = phba->pport;
7153 struct lpfc_dmabuf *mp;
2d7dbc4c 7154 struct lpfc_rqb *rqbp;
da0436e9
JS
7155
7156 /* Perform a PCI function reset to start from clean */
7157 rc = lpfc_pci_function_reset(phba);
7158 if (unlikely(rc))
7159 return -ENODEV;
7160
7161 /* Check the HBA Host Status Register for readyness */
7162 rc = lpfc_sli4_post_status_check(phba);
7163 if (unlikely(rc))
7164 return -ENODEV;
7165 else {
7166 spin_lock_irq(&phba->hbalock);
7167 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7168 spin_unlock_irq(&phba->hbalock);
7169 }
7170
7171 /*
7172 * Allocate a single mailbox container for initializing the
7173 * port.
7174 */
7175 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7176 if (!mboxq)
7177 return -ENOMEM;
7178
da0436e9 7179 /* Issue READ_REV to collect vpd and FW information. */
49198b37 7180 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
7181 vpd = kzalloc(vpd_size, GFP_KERNEL);
7182 if (!vpd) {
7183 rc = -ENOMEM;
7184 goto out_free_mbox;
7185 }
7186
7187 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
7188 if (unlikely(rc)) {
7189 kfree(vpd);
7190 goto out_free_mbox;
7191 }
572709e2 7192
da0436e9 7193 mqe = &mboxq->u.mqe;
f1126688 7194 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 7195 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 7196 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
7197 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7198 } else {
76a95d75 7199 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 7200 }
45ed1190
JS
7201
7202 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7203 LPFC_DCBX_CEE_MODE)
7204 phba->hba_flag |= HBA_FIP_SUPPORT;
7205 else
7206 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7207
4f2e66c6
JS
7208 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7209
c31098ce 7210 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
7211 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7212 "0376 READ_REV Error. SLI Level %d "
7213 "FCoE enabled %d\n",
76a95d75 7214 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 7215 rc = -EIO;
76a95d75
JS
7216 kfree(vpd);
7217 goto out_free_mbox;
da0436e9 7218 }
cd1c8301 7219
ff78d8f9
JS
7220 /*
7221 * Continue initialization with default values even if driver failed
7222 * to read FCoE param config regions, only read parameters if the
7223 * board is FCoE
7224 */
7225 if (phba->hba_flag & HBA_FCOE_MODE &&
7226 lpfc_sli4_read_fcoe_params(phba))
7227 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7228 "2570 Failed to read FCoE parameters\n");
7229
cd1c8301
JS
7230 /*
7231 * Retrieve sli4 device physical port name, failure of doing it
7232 * is considered as non-fatal.
7233 */
7234 rc = lpfc_sli4_retrieve_pport_name(phba);
7235 if (!rc)
7236 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7237 "3080 Successful retrieving SLI4 device "
7238 "physical port name: %s.\n", phba->Port);
7239
da0436e9
JS
7240 /*
7241 * Evaluate the read rev and vpd data. Populate the driver
7242 * state with the results. If this routine fails, the failure
7243 * is not fatal as the driver will use generic values.
7244 */
7245 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7246 if (unlikely(!rc)) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7248 "0377 Error %d parsing vpd. "
7249 "Using defaults.\n", rc);
7250 rc = 0;
7251 }
76a95d75 7252 kfree(vpd);
da0436e9 7253
f1126688
JS
7254 /* Save information as VPD data */
7255 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7256 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4e565cf0
JS
7257
7258 /*
7259 * This is because first G7 ASIC doesn't support the standard
7260 * 0x5a NVME cmd descriptor type/subtype
7261 */
7262 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7263 LPFC_SLI_INTF_IF_TYPE_6) &&
7264 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7265 (phba->vpd.rev.smRev == 0) &&
7266 (phba->cfg_nvme_embed_cmd == 1))
7267 phba->cfg_nvme_embed_cmd = 0;
7268
f1126688
JS
7269 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7270 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7271 &mqe->un.read_rev);
7272 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7273 &mqe->un.read_rev);
7274 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7275 &mqe->un.read_rev);
7276 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7277 &mqe->un.read_rev);
7278 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7279 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7280 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7281 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7282 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7283 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7284 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7285 "(%d):0380 READ_REV Status x%x "
7286 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7287 mboxq->vport ? mboxq->vport->vpi : 0,
7288 bf_get(lpfc_mqe_status, mqe),
7289 phba->vpd.rev.opFwName,
7290 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7291 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 7292
572709e2
JS
7293 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7294 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7295 if (phba->pport->cfg_lun_queue_depth > rc) {
7296 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7297 "3362 LUN queue depth changed from %d to %d\n",
7298 phba->pport->cfg_lun_queue_depth, rc);
7299 phba->pport->cfg_lun_queue_depth = rc;
7300 }
7301
65791f1f 7302 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
7303 LPFC_SLI_INTF_IF_TYPE_0) {
7304 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7305 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7306 if (rc == MBX_SUCCESS) {
7307 phba->hba_flag |= HBA_RECOVERABLE_UE;
7308 /* Set 1Sec interval to detect UE */
7309 phba->eratt_poll_interval = 1;
7310 phba->sli4_hba.ue_to_sr = bf_get(
7311 lpfc_mbx_set_feature_UESR,
7312 &mboxq->u.mqe.un.set_feature);
7313 phba->sli4_hba.ue_to_rp = bf_get(
7314 lpfc_mbx_set_feature_UERP,
7315 &mboxq->u.mqe.un.set_feature);
7316 }
7317 }
7318
7319 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7320 /* Enable MDS Diagnostics only if the SLI Port supports it */
7321 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7322 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7323 if (rc != MBX_SUCCESS)
7324 phba->mds_diags_support = 0;
7325 }
572709e2 7326
da0436e9
JS
7327 /*
7328 * Discover the port's supported feature set and match it against the
7329 * hosts requests.
7330 */
7331 lpfc_request_features(phba, mboxq);
7332 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7333 if (unlikely(rc)) {
7334 rc = -EIO;
76a95d75 7335 goto out_free_mbox;
da0436e9
JS
7336 }
7337
7338 /*
7339 * The port must support FCP initiator mode as this is the
7340 * only mode running in the host.
7341 */
7342 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7343 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7344 "0378 No support for fcpi mode.\n");
7345 ftr_rsp++;
7346 }
0bc2b7c5
JS
7347
7348 /* Performance Hints are ONLY for FCoE */
7349 if (phba->hba_flag & HBA_FCOE_MODE) {
7350 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7351 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7352 else
7353 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7354 }
7355
da0436e9
JS
7356 /*
7357 * If the port cannot support the host's requested features
7358 * then turn off the global config parameters to disable the
7359 * feature in the driver. This is not a fatal error.
7360 */
f44ac12f
JS
7361 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7362 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7363 phba->cfg_enable_bg = 0;
7364 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
bf08611b 7365 ftr_rsp++;
f44ac12f 7366 }
bf08611b 7367 }
da0436e9
JS
7368
7369 if (phba->max_vpi && phba->cfg_enable_npiv &&
7370 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7371 ftr_rsp++;
7372
7373 if (ftr_rsp) {
7374 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7375 "0379 Feature Mismatch Data: x%08x %08x "
7376 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7377 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7378 phba->cfg_enable_npiv, phba->max_vpi);
7379 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7380 phba->cfg_enable_bg = 0;
7381 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7382 phba->cfg_enable_npiv = 0;
7383 }
7384
7385 /* These SLI3 features are assumed in SLI4 */
7386 spin_lock_irq(&phba->hbalock);
7387 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7388 spin_unlock_irq(&phba->hbalock);
7389
6d368e53
JS
7390 /*
7391 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7392 * calls depends on these resources to complete port setup.
7393 */
7394 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7395 if (rc) {
7396 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7397 "2920 Failed to alloc Resource IDs "
7398 "rc = x%x\n", rc);
7399 goto out_free_mbox;
7400 }
7401
61bda8f7
JS
7402 lpfc_set_host_data(phba, mboxq);
7403
7404 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7405 if (rc) {
7406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7407 "2134 Failed to set host os driver version %x",
7408 rc);
7409 }
7410
da0436e9 7411 /* Read the port's service parameters. */
9f1177a3
JS
7412 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7413 if (rc) {
7414 phba->link_state = LPFC_HBA_ERROR;
7415 rc = -ENOMEM;
76a95d75 7416 goto out_free_mbox;
9f1177a3
JS
7417 }
7418
da0436e9
JS
7419 mboxq->vport = vport;
7420 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3e1f0718 7421 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
7422 if (rc == MBX_SUCCESS) {
7423 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7424 rc = 0;
7425 }
7426
7427 /*
7428 * This memory was allocated by the lpfc_read_sparam routine. Release
7429 * it to the mbuf pool.
7430 */
7431 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7432 kfree(mp);
3e1f0718 7433 mboxq->ctx_buf = NULL;
da0436e9
JS
7434 if (unlikely(rc)) {
7435 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7436 "0382 READ_SPARAM command failed "
7437 "status %d, mbxStatus x%x\n",
7438 rc, bf_get(lpfc_mqe_status, mqe));
7439 phba->link_state = LPFC_HBA_ERROR;
7440 rc = -EIO;
76a95d75 7441 goto out_free_mbox;
da0436e9
JS
7442 }
7443
0558056c 7444 lpfc_update_vport_wwn(vport);
da0436e9
JS
7445
7446 /* Update the fc_host data structures with new wwn. */
7447 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7448 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7449
895427bd
JS
7450 /* Create all the SLI4 queues */
7451 rc = lpfc_sli4_queue_create(phba);
7452 if (rc) {
7453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7454 "3089 Failed to allocate queues\n");
7455 rc = -ENODEV;
7456 goto out_free_mbox;
7457 }
7458 /* Set up all the queues to the device */
7459 rc = lpfc_sli4_queue_setup(phba);
7460 if (unlikely(rc)) {
7461 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7462 "0381 Error %d during queue setup.\n ", rc);
7463 goto out_stop_timers;
7464 }
7465 /* Initialize the driver internal SLI layer lists. */
7466 lpfc_sli4_setup(phba);
7467 lpfc_sli4_queue_init(phba);
7468
7469 /* update host els xri-sgl sizes and mappings */
7470 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
7471 if (unlikely(rc)) {
7472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7473 "1400 Failed to update xri-sgl size and "
7474 "mapping: %d\n", rc);
895427bd 7475 goto out_destroy_queue;
da0436e9
JS
7476 }
7477
8a9d2e80 7478 /* register the els sgl pool to the port */
895427bd
JS
7479 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7480 phba->sli4_hba.els_xri_cnt);
7481 if (unlikely(rc < 0)) {
8a9d2e80
JS
7482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7483 "0582 Error %d during els sgl post "
7484 "operation\n", rc);
7485 rc = -ENODEV;
895427bd 7486 goto out_destroy_queue;
8a9d2e80 7487 }
895427bd 7488 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 7489
f358dd0c
JS
7490 if (phba->nvmet_support) {
7491 /* update host nvmet xri-sgl sizes and mappings */
7492 rc = lpfc_sli4_nvmet_sgl_update(phba);
7493 if (unlikely(rc)) {
7494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7495 "6308 Failed to update nvmet-sgl size "
7496 "and mapping: %d\n", rc);
7497 goto out_destroy_queue;
7498 }
7499
7500 /* register the nvmet sgl pool to the port */
7501 rc = lpfc_sli4_repost_sgl_list(
7502 phba,
7503 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7504 phba->sli4_hba.nvmet_xri_cnt);
7505 if (unlikely(rc < 0)) {
7506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7507 "3117 Error %d during nvmet "
7508 "sgl post\n", rc);
7509 rc = -ENODEV;
7510 goto out_destroy_queue;
7511 }
7512 phba->sli4_hba.nvmet_xri_cnt = rc;
6c621a22
JS
7513
7514 cnt = phba->cfg_iocb_cnt * 1024;
7515 /* We need 1 iocbq for every SGL, for IO processing */
7516 cnt += phba->sli4_hba.nvmet_xri_cnt;
f358dd0c 7517 } else {
895427bd
JS
7518 /* update host scsi xri-sgl sizes and mappings */
7519 rc = lpfc_sli4_scsi_sgl_update(phba);
7520 if (unlikely(rc)) {
7521 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7522 "6309 Failed to update scsi-sgl size "
7523 "and mapping: %d\n", rc);
7524 goto out_destroy_queue;
7525 }
7526
7527 /* update host nvme xri-sgl sizes and mappings */
7528 rc = lpfc_sli4_nvme_sgl_update(phba);
7529 if (unlikely(rc)) {
7530 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7531 "6082 Failed to update nvme-sgl size "
7532 "and mapping: %d\n", rc);
7533 goto out_destroy_queue;
7534 }
6c621a22
JS
7535
7536 cnt = phba->cfg_iocb_cnt * 1024;
11e644e2
JS
7537 }
7538
7539 if (!phba->sli.iocbq_lookup) {
6c621a22
JS
7540 /* Initialize and populate the iocb list per host */
7541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11e644e2 7542 "2821 initialize iocb list %d total %d\n",
6c621a22
JS
7543 phba->cfg_iocb_cnt, cnt);
7544 rc = lpfc_init_iocb_list(phba, cnt);
7545 if (rc) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11e644e2 7547 "1413 Failed to init iocb list.\n");
6c621a22
JS
7548 goto out_destroy_queue;
7549 }
895427bd
JS
7550 }
7551
11e644e2
JS
7552 if (phba->nvmet_support)
7553 lpfc_nvmet_create_targetport(phba);
7554
2d7dbc4c 7555 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
2d7dbc4c
JS
7556 /* Post initial buffers to all RQs created */
7557 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7558 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7559 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7560 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7561 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 7562 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
7563 rqbp->buffer_count = 0;
7564
2d7dbc4c
JS
7565 lpfc_post_rq_buffer(
7566 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7567 phba->sli4_hba.nvmet_mrq_data[i],
2448e484 7568 phba->cfg_nvmet_mrq_post, i);
2d7dbc4c
JS
7569 }
7570 }
7571
895427bd
JS
7572 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7573 /* register the allocated scsi sgl pool to the port */
7574 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7575 if (unlikely(rc)) {
7576 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7577 "0383 Error %d during scsi sgl post "
7578 "operation\n", rc);
7579 /* Some Scsi buffers were moved to abort scsi list */
7580 /* A pci function reset will repost them */
7581 rc = -ENODEV;
7582 goto out_destroy_queue;
7583 }
da0436e9
JS
7584 }
7585
01649561
JS
7586 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7587 (phba->nvmet_support == 0)) {
7588
7589 /* register the allocated nvme sgl pool to the port */
7590 rc = lpfc_repost_nvme_sgl_list(phba);
7591 if (unlikely(rc)) {
7592 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7593 "6116 Error %d during nvme sgl post "
7594 "operation\n", rc);
7595 /* Some NVME buffers were moved to abort nvme list */
7596 /* A pci function reset will repost them */
7597 rc = -ENODEV;
7598 goto out_destroy_queue;
7599 }
da0436e9
JS
7600 }
7601
7602 /* Post the rpi header region to the device. */
7603 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7604 if (unlikely(rc)) {
7605 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7606 "0393 Error %d during rpi post operation\n",
7607 rc);
7608 rc = -ENODEV;
895427bd 7609 goto out_destroy_queue;
da0436e9 7610 }
97f2ecf1 7611 lpfc_sli4_node_prep(phba);
da0436e9 7612
895427bd 7613 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 7614 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
7615 /*
7616 * The FC Port needs to register FCFI (index 0)
7617 */
7618 lpfc_reg_fcfi(phba, mboxq);
7619 mboxq->vport = phba->pport;
7620 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7621 if (rc != MBX_SUCCESS)
7622 goto out_unset_queue;
7623 rc = 0;
7624 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7625 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
7626 } else {
7627 /* We are a NVME Target mode with MRQ > 1 */
7628
7629 /* First register the FCFI */
7630 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7631 mboxq->vport = phba->pport;
7632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7633 if (rc != MBX_SUCCESS)
7634 goto out_unset_queue;
7635 rc = 0;
7636 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7637 &mboxq->u.mqe.un.reg_fcfi_mrq);
7638
7639 /* Next register the MRQs */
7640 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7641 mboxq->vport = phba->pport;
7642 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7643 if (rc != MBX_SUCCESS)
7644 goto out_unset_queue;
7645 rc = 0;
895427bd
JS
7646 }
7647 /* Check if the port is configured to be disabled */
7648 lpfc_sli_read_link_ste(phba);
da0436e9
JS
7649 }
7650
7651 /* Arm the CQs and then EQs on device */
7652 lpfc_sli4_arm_cqeq_intr(phba);
7653
7654 /* Indicate device interrupt mode */
7655 phba->sli4_hba.intr_enable = 1;
7656
7657 /* Allow asynchronous mailbox command to go through */
7658 spin_lock_irq(&phba->hbalock);
7659 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7660 spin_unlock_irq(&phba->hbalock);
7661
7662 /* Post receive buffers to the device */
7663 lpfc_sli4_rb_setup(phba);
7664
fc2b989b
JS
7665 /* Reset HBA FCF states after HBA reset */
7666 phba->fcf.fcf_flag = 0;
7667 phba->fcf.current_rec.flag = 0;
7668
da0436e9 7669 /* Start the ELS watchdog timer */
8fa38513 7670 mod_timer(&vport->els_tmofunc,
256ec0d0 7671 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7672
7673 /* Start heart beat timer */
7674 mod_timer(&phba->hb_tmofunc,
256ec0d0 7675 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7676 phba->hb_outstanding = 0;
7677 phba->last_completion_time = jiffies;
7678
7679 /* Start error attention (ERATT) polling timer */
256ec0d0 7680 mod_timer(&phba->eratt_poll,
65791f1f 7681 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7682
75baf696
JS
7683 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7684 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7685 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7686 if (!rc) {
7687 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7688 "2829 This device supports "
7689 "Advanced Error Reporting (AER)\n");
7690 spin_lock_irq(&phba->hbalock);
7691 phba->hba_flag |= HBA_AER_ENABLED;
7692 spin_unlock_irq(&phba->hbalock);
7693 } else {
7694 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7695 "2830 This device does not support "
7696 "Advanced Error Reporting (AER)\n");
7697 phba->cfg_aer_support = 0;
7698 }
0a96e975 7699 rc = 0;
75baf696
JS
7700 }
7701
da0436e9
JS
7702 /*
7703 * The port is ready, set the host's link state to LINK_DOWN
7704 * in preparation for link interrupts.
7705 */
da0436e9
JS
7706 spin_lock_irq(&phba->hbalock);
7707 phba->link_state = LPFC_LINK_DOWN;
1dc5ec24
JS
7708
7709 /* Check if physical ports are trunked */
7710 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7711 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7712 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7713 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7714 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7715 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7716 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7717 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
da0436e9 7718 spin_unlock_irq(&phba->hbalock);
1dc5ec24 7719
026abb87
JS
7720 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7721 (phba->hba_flag & LINK_DISABLED)) {
7722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7723 "3103 Adapter Link is disabled.\n");
7724 lpfc_down_link(phba, mboxq);
7725 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7726 if (rc != MBX_SUCCESS) {
7727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7728 "3104 Adapter failed to issue "
7729 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7730 goto out_unset_queue;
7731 }
7732 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7733 /* don't perform init_link on SLI4 FC port loopback test */
7734 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7735 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7736 if (rc)
7737 goto out_unset_queue;
7738 }
5350d872
JS
7739 }
7740 mempool_free(mboxq, phba->mbox_mem_pool);
7741 return rc;
76a95d75 7742out_unset_queue:
da0436e9 7743 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7744 lpfc_sli4_queue_unset(phba);
7745out_destroy_queue:
6c621a22 7746 lpfc_free_iocb_list(phba);
5350d872 7747 lpfc_sli4_queue_destroy(phba);
da0436e9 7748out_stop_timers:
5350d872 7749 lpfc_stop_hba_timers(phba);
da0436e9
JS
7750out_free_mbox:
7751 mempool_free(mboxq, phba->mbox_mem_pool);
7752 return rc;
7753}
7754
7755/**
7756 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7757 * @ptr: context object - pointer to hba structure.
7758 *
7759 * This is the callback function for mailbox timer. The mailbox
7760 * timer is armed when a new mailbox command is issued and the timer
7761 * is deleted when the mailbox complete. The function is called by
7762 * the kernel timer code when a mailbox does not complete within
7763 * expected time. This function wakes up the worker thread to
7764 * process the mailbox timeout and returns. All the processing is
7765 * done by the worker thread function lpfc_mbox_timeout_handler.
7766 **/
7767void
f22eb4d3 7768lpfc_mbox_timeout(struct timer_list *t)
da0436e9 7769{
f22eb4d3 7770 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
da0436e9
JS
7771 unsigned long iflag;
7772 uint32_t tmo_posted;
7773
7774 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7775 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7776 if (!tmo_posted)
7777 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7778 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7779
7780 if (!tmo_posted)
7781 lpfc_worker_wake_up(phba);
7782 return;
7783}
7784
e8d3c3b1
JS
7785/**
7786 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7787 * are pending
7788 * @phba: Pointer to HBA context object.
7789 *
7790 * This function checks if any mailbox completions are present on the mailbox
7791 * completion queue.
7792 **/
3bb11fc5 7793static bool
e8d3c3b1
JS
7794lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7795{
7796
7797 uint32_t idx;
7798 struct lpfc_queue *mcq;
7799 struct lpfc_mcqe *mcqe;
7800 bool pending_completions = false;
7365f6fd 7801 uint8_t qe_valid;
e8d3c3b1
JS
7802
7803 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7804 return false;
7805
7806 /* Check for completions on mailbox completion queue */
7807
7808 mcq = phba->sli4_hba.mbx_cq;
7809 idx = mcq->hba_index;
7365f6fd
JS
7810 qe_valid = mcq->qe_valid;
7811 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
e8d3c3b1
JS
7812 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7813 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7814 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7815 pending_completions = true;
7816 break;
7817 }
7818 idx = (idx + 1) % mcq->entry_count;
7819 if (mcq->hba_index == idx)
7820 break;
7365f6fd
JS
7821
7822 /* if the index wrapped around, toggle the valid bit */
7823 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7824 qe_valid = (qe_valid) ? 0 : 1;
e8d3c3b1
JS
7825 }
7826 return pending_completions;
7827
7828}
7829
7830/**
7831 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7832 * that were missed.
7833 * @phba: Pointer to HBA context object.
7834 *
7835 * For sli4, it is possible to miss an interrupt. As such mbox completions
7836 * maybe missed causing erroneous mailbox timeouts to occur. This function
7837 * checks to see if mbox completions are on the mailbox completion queue
7838 * and will process all the completions associated with the eq for the
7839 * mailbox completion queue.
7840 **/
7841bool
7842lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7843{
b71413dd 7844 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
e8d3c3b1
JS
7845 uint32_t eqidx;
7846 struct lpfc_queue *fpeq = NULL;
7847 struct lpfc_eqe *eqe;
7848 bool mbox_pending;
7849
7850 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7851 return false;
7852
7853 /* Find the eq associated with the mcq */
7854
b71413dd 7855 if (sli4_hba->hba_eq)
895427bd 7856 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
b71413dd
JS
7857 if (sli4_hba->hba_eq[eqidx]->queue_id ==
7858 sli4_hba->mbx_cq->assoc_qid) {
7859 fpeq = sli4_hba->hba_eq[eqidx];
e8d3c3b1
JS
7860 break;
7861 }
7862 if (!fpeq)
7863 return false;
7864
7865 /* Turn off interrupts from this EQ */
7866
b71413dd 7867 sli4_hba->sli4_eq_clr_intr(fpeq);
e8d3c3b1
JS
7868
7869 /* Check to see if a mbox completion is pending */
7870
7871 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7872
7873 /*
7874 * If a mbox completion is pending, process all the events on EQ
7875 * associated with the mbox completion queue (this could include
7876 * mailbox commands, async events, els commands, receive queue data
7877 * and fcp commands)
7878 */
7879
7880 if (mbox_pending)
7881 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7882 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7883 fpeq->EQ_processed++;
7884 }
7885
7886 /* Always clear and re-arm the EQ */
7887
b71413dd 7888 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
e8d3c3b1
JS
7889
7890 return mbox_pending;
7891
7892}
da0436e9
JS
7893
7894/**
7895 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7896 * @phba: Pointer to HBA context object.
7897 *
7898 * This function is called from worker thread when a mailbox command times out.
7899 * The caller is not required to hold any locks. This function will reset the
7900 * HBA and recover all the pending commands.
7901 **/
7902void
7903lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7904{
7905 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
7906 MAILBOX_t *mb = NULL;
7907
da0436e9 7908 struct lpfc_sli *psli = &phba->sli;
da0436e9 7909
e8d3c3b1
JS
7910 /* If the mailbox completed, process the completion and return */
7911 if (lpfc_sli4_process_missed_mbox_completions(phba))
7912 return;
7913
eb016566
JS
7914 if (pmbox != NULL)
7915 mb = &pmbox->u.mb;
da0436e9
JS
7916 /* Check the pmbox pointer first. There is a race condition
7917 * between the mbox timeout handler getting executed in the
7918 * worklist and the mailbox actually completing. When this
7919 * race condition occurs, the mbox_active will be NULL.
7920 */
7921 spin_lock_irq(&phba->hbalock);
7922 if (pmbox == NULL) {
7923 lpfc_printf_log(phba, KERN_WARNING,
7924 LOG_MBOX | LOG_SLI,
7925 "0353 Active Mailbox cleared - mailbox timeout "
7926 "exiting\n");
7927 spin_unlock_irq(&phba->hbalock);
7928 return;
7929 }
7930
7931 /* Mbox cmd <mbxCommand> timeout */
7932 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7933 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7934 mb->mbxCommand,
7935 phba->pport->port_state,
7936 phba->sli.sli_flag,
7937 phba->sli.mbox_active);
7938 spin_unlock_irq(&phba->hbalock);
7939
7940 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7941 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 7942 * it to fail all outstanding SCSI IO.
da0436e9
JS
7943 */
7944 spin_lock_irq(&phba->pport->work_port_lock);
7945 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7946 spin_unlock_irq(&phba->pport->work_port_lock);
7947 spin_lock_irq(&phba->hbalock);
7948 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 7949 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
7950 spin_unlock_irq(&phba->hbalock);
7951
db55fba8 7952 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
7953
7954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7955 "0345 Resetting board due to mailbox timeout\n");
7956
7957 /* Reset the HBA device */
7958 lpfc_reset_hba(phba);
7959}
7960
7961/**
7962 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7963 * @phba: Pointer to HBA context object.
7964 * @pmbox: Pointer to mailbox object.
7965 * @flag: Flag indicating how the mailbox need to be processed.
7966 *
7967 * This function is called by discovery code and HBA management code
7968 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7969 * function gets the hbalock to protect the data structures.
7970 * The mailbox command can be submitted in polling mode, in which case
7971 * this function will wait in a polling loop for the completion of the
7972 * mailbox.
7973 * If the mailbox is submitted in no_wait mode (not polling) the
7974 * function will submit the command and returns immediately without waiting
7975 * for the mailbox completion. The no_wait is supported only when HBA
7976 * is in SLI2/SLI3 mode - interrupts are enabled.
7977 * The SLI interface allows only one mailbox pending at a time. If the
7978 * mailbox is issued in polling mode and there is already a mailbox
7979 * pending, then the function will return an error. If the mailbox is issued
7980 * in NO_WAIT mode and there is a mailbox pending already, the function
7981 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7982 * The sli layer owns the mailbox object until the completion of mailbox
7983 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7984 * return codes the caller owns the mailbox command after the return of
7985 * the function.
e59058c4 7986 **/
3772a991
JS
7987static int
7988lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7989 uint32_t flag)
dea3101e 7990{
bf07bdea 7991 MAILBOX_t *mbx;
2e0fef85 7992 struct lpfc_sli *psli = &phba->sli;
dea3101e 7993 uint32_t status, evtctr;
9940b97b 7994 uint32_t ha_copy, hc_copy;
dea3101e 7995 int i;
09372820 7996 unsigned long timeout;
dea3101e 7997 unsigned long drvr_flag = 0;
34b02dcd 7998 uint32_t word0, ldata;
dea3101e 7999 void __iomem *to_slim;
58da1ffb
JS
8000 int processing_queue = 0;
8001
8002 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8003 if (!pmbox) {
8568a4d2 8004 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 8005 /* processing mbox queue from intr_handler */
3772a991
JS
8006 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8007 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8008 return MBX_SUCCESS;
8009 }
58da1ffb 8010 processing_queue = 1;
58da1ffb
JS
8011 pmbox = lpfc_mbox_get(phba);
8012 if (!pmbox) {
8013 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8014 return MBX_SUCCESS;
8015 }
8016 }
dea3101e 8017
ed957684 8018 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 8019 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 8020 if(!pmbox->vport) {
58da1ffb 8021 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 8022 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 8023 LOG_MBOX | LOG_VPORT,
e8b62011 8024 "1806 Mbox x%x failed. No vport\n",
3772a991 8025 pmbox->u.mb.mbxCommand);
ed957684 8026 dump_stack();
58da1ffb 8027 goto out_not_finished;
ed957684
JS
8028 }
8029 }
8030
8d63f375 8031 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
8032 if (unlikely(pci_channel_offline(phba->pcidev))) {
8033 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8034 goto out_not_finished;
8035 }
8d63f375 8036
a257bf90
JS
8037 /* If HBA has a deferred error attention, fail the iocb. */
8038 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8039 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8040 goto out_not_finished;
8041 }
8042
dea3101e 8043 psli = &phba->sli;
92d7f7b0 8044
bf07bdea 8045 mbx = &pmbox->u.mb;
dea3101e 8046 status = MBX_SUCCESS;
8047
2e0fef85
JS
8048 if (phba->link_state == LPFC_HBA_ERROR) {
8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
8050
8051 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8052 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8053 "(%d):0311 Mailbox command x%x cannot "
8054 "issue Data: x%x x%x\n",
8055 pmbox->vport ? pmbox->vport->vpi : 0,
8056 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 8057 goto out_not_finished;
41415862
JW
8058 }
8059
bf07bdea 8060 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
8061 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8062 !(hc_copy & HC_MBINT_ENA)) {
8063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8064 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
8065 "(%d):2528 Mailbox command x%x cannot "
8066 "issue Data: x%x x%x\n",
8067 pmbox->vport ? pmbox->vport->vpi : 0,
8068 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
8069 goto out_not_finished;
8070 }
9290831f
JS
8071 }
8072
dea3101e 8073 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8074 /* Polling for a mbox command when another one is already active
8075 * is not allowed in SLI. Also, the driver must have established
8076 * SLI2 mode to queue and process multiple mbox commands.
8077 */
8078
8079 if (flag & MBX_POLL) {
2e0fef85 8080 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8081
8082 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8084 "(%d):2529 Mailbox command x%x "
8085 "cannot issue Data: x%x x%x\n",
8086 pmbox->vport ? pmbox->vport->vpi : 0,
8087 pmbox->u.mb.mbxCommand,
8088 psli->sli_flag, flag);
58da1ffb 8089 goto out_not_finished;
dea3101e 8090 }
8091
3772a991 8092 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 8093 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8094 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8095 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8096 "(%d):2530 Mailbox command x%x "
8097 "cannot issue Data: x%x x%x\n",
8098 pmbox->vport ? pmbox->vport->vpi : 0,
8099 pmbox->u.mb.mbxCommand,
8100 psli->sli_flag, flag);
58da1ffb 8101 goto out_not_finished;
dea3101e 8102 }
8103
dea3101e 8104 /* Another mailbox command is still being processed, queue this
8105 * command to be processed later.
8106 */
8107 lpfc_mbox_put(phba, pmbox);
8108
8109 /* Mbox cmd issue - BUSY */
ed957684 8110 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8111 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 8112 "x%x x%x x%x x%x\n",
92d7f7b0 8113 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
e92974f6
JS
8114 mbx->mbxCommand,
8115 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8116 psli->sli_flag, flag);
dea3101e 8117
8118 psli->slistat.mbox_busy++;
2e0fef85 8119 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8120
858c9f6c
JS
8121 if (pmbox->vport) {
8122 lpfc_debugfs_disc_trc(pmbox->vport,
8123 LPFC_DISC_TRC_MBOX_VPORT,
8124 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8125 (uint32_t)mbx->mbxCommand,
8126 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8127 }
8128 else {
8129 lpfc_debugfs_disc_trc(phba->pport,
8130 LPFC_DISC_TRC_MBOX,
8131 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8132 (uint32_t)mbx->mbxCommand,
8133 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8134 }
8135
2e0fef85 8136 return MBX_BUSY;
dea3101e 8137 }
8138
dea3101e 8139 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8140
8141 /* If we are not polling, we MUST be in SLI2 mode */
8142 if (flag != MBX_POLL) {
3772a991 8143 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 8144 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 8145 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8146 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8147 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8148 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8149 "(%d):2531 Mailbox command x%x "
8150 "cannot issue Data: x%x x%x\n",
8151 pmbox->vport ? pmbox->vport->vpi : 0,
8152 pmbox->u.mb.mbxCommand,
8153 psli->sli_flag, flag);
58da1ffb 8154 goto out_not_finished;
dea3101e 8155 }
8156 /* timeout active mbox command */
256ec0d0
JS
8157 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8158 1000);
8159 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 8160 }
8161
8162 /* Mailbox cmd <cmd> issue */
ed957684 8163 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8164 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 8165 "x%x\n",
e8b62011 8166 pmbox->vport ? pmbox->vport->vpi : 0,
e92974f6
JS
8167 mbx->mbxCommand,
8168 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8169 psli->sli_flag, flag);
dea3101e 8170
bf07bdea 8171 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
8172 if (pmbox->vport) {
8173 lpfc_debugfs_disc_trc(pmbox->vport,
8174 LPFC_DISC_TRC_MBOX_VPORT,
8175 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8176 (uint32_t)mbx->mbxCommand,
8177 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8178 }
8179 else {
8180 lpfc_debugfs_disc_trc(phba->pport,
8181 LPFC_DISC_TRC_MBOX,
8182 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8183 (uint32_t)mbx->mbxCommand,
8184 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8185 }
8186 }
8187
dea3101e 8188 psli->slistat.mbox_cmd++;
8189 evtctr = psli->slistat.mbox_event;
8190
8191 /* next set own bit for the adapter and copy over command word */
bf07bdea 8192 mbx->mbxOwner = OWN_CHIP;
dea3101e 8193
3772a991 8194 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
8195 /* Populate mbox extension offset word. */
8196 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 8197 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8198 = (uint8_t *)phba->mbox_ext
8199 - (uint8_t *)phba->mbox;
8200 }
8201
8202 /* Copy the mailbox extension data */
3e1f0718
JS
8203 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8204 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8205 (uint8_t *)phba->mbox_ext,
8206 pmbox->in_ext_byte_len);
7a470277
JS
8207 }
8208 /* Copy command data to host SLIM area */
bf07bdea 8209 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 8210 } else {
7a470277
JS
8211 /* Populate mbox extension offset word. */
8212 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 8213 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8214 = MAILBOX_HBA_EXT_OFFSET;
8215
8216 /* Copy the mailbox extension data */
3e1f0718 8217 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
7a470277
JS
8218 lpfc_memcpy_to_slim(phba->MBslimaddr +
8219 MAILBOX_HBA_EXT_OFFSET,
3e1f0718 8220 pmbox->ctx_buf, pmbox->in_ext_byte_len);
7a470277 8221
895427bd 8222 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8223 /* copy command data into host mbox for cmpl */
895427bd
JS
8224 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8225 MAILBOX_CMD_SIZE);
dea3101e 8226
8227 /* First copy mbox command data to HBA SLIM, skip past first
8228 word */
8229 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 8230 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 8231 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8232
8233 /* Next copy over first word, with mbxOwner set */
bf07bdea 8234 ldata = *((uint32_t *)mbx);
dea3101e 8235 to_slim = phba->MBslimaddr;
8236 writel(ldata, to_slim);
8237 readl(to_slim); /* flush */
8238
895427bd 8239 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8240 /* switch over to host mailbox */
3772a991 8241 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 8242 }
8243
8244 wmb();
dea3101e 8245
8246 switch (flag) {
8247 case MBX_NOWAIT:
09372820 8248 /* Set up reference to mailbox command */
dea3101e 8249 psli->mbox_active = pmbox;
09372820
JS
8250 /* Interrupt board to do it */
8251 writel(CA_MBATT, phba->CAregaddr);
8252 readl(phba->CAregaddr); /* flush */
8253 /* Don't wait for it to finish, just return */
dea3101e 8254 break;
8255
8256 case MBX_POLL:
09372820 8257 /* Set up null reference to mailbox command */
dea3101e 8258 psli->mbox_active = NULL;
09372820
JS
8259 /* Interrupt board to do it */
8260 writel(CA_MBATT, phba->CAregaddr);
8261 readl(phba->CAregaddr); /* flush */
8262
3772a991 8263 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8264 /* First read mbox status word */
34b02dcd 8265 word0 = *((uint32_t *)phba->mbox);
dea3101e 8266 word0 = le32_to_cpu(word0);
8267 } else {
8268 /* First read mbox status word */
9940b97b
JS
8269 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8270 spin_unlock_irqrestore(&phba->hbalock,
8271 drvr_flag);
8272 goto out_not_finished;
8273 }
dea3101e 8274 }
8275
8276 /* Read the HBA Host Attention Register */
9940b97b
JS
8277 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8278 spin_unlock_irqrestore(&phba->hbalock,
8279 drvr_flag);
8280 goto out_not_finished;
8281 }
a183a15f
JS
8282 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8283 1000) + jiffies;
09372820 8284 i = 0;
dea3101e 8285 /* Wait for command to complete */
41415862
JW
8286 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8287 (!(ha_copy & HA_MBATT) &&
2e0fef85 8288 (phba->link_state > LPFC_WARM_START))) {
09372820 8289 if (time_after(jiffies, timeout)) {
dea3101e 8290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8291 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 8292 drvr_flag);
58da1ffb 8293 goto out_not_finished;
dea3101e 8294 }
8295
8296 /* Check if we took a mbox interrupt while we were
8297 polling */
8298 if (((word0 & OWN_CHIP) != OWN_CHIP)
8299 && (evtctr != psli->slistat.mbox_event))
8300 break;
8301
09372820
JS
8302 if (i++ > 10) {
8303 spin_unlock_irqrestore(&phba->hbalock,
8304 drvr_flag);
8305 msleep(1);
8306 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8307 }
dea3101e 8308
3772a991 8309 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8310 /* First copy command data */
34b02dcd 8311 word0 = *((uint32_t *)phba->mbox);
dea3101e 8312 word0 = le32_to_cpu(word0);
bf07bdea 8313 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 8314 MAILBOX_t *slimmb;
34b02dcd 8315 uint32_t slimword0;
dea3101e 8316 /* Check real SLIM for any errors */
8317 slimword0 = readl(phba->MBslimaddr);
8318 slimmb = (MAILBOX_t *) & slimword0;
8319 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8320 && slimmb->mbxStatus) {
8321 psli->sli_flag &=
3772a991 8322 ~LPFC_SLI_ACTIVE;
dea3101e 8323 word0 = slimword0;
8324 }
8325 }
8326 } else {
8327 /* First copy command data */
8328 word0 = readl(phba->MBslimaddr);
8329 }
8330 /* Read the HBA Host Attention Register */
9940b97b
JS
8331 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8332 spin_unlock_irqrestore(&phba->hbalock,
8333 drvr_flag);
8334 goto out_not_finished;
8335 }
dea3101e 8336 }
8337
3772a991 8338 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8339 /* copy results back to user */
2ea259ee
JS
8340 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8341 MAILBOX_CMD_SIZE);
7a470277 8342 /* Copy the mailbox extension data */
3e1f0718 8343 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
7a470277 8344 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
3e1f0718 8345 pmbox->ctx_buf,
7a470277
JS
8346 pmbox->out_ext_byte_len);
8347 }
dea3101e 8348 } else {
8349 /* First copy command data */
bf07bdea 8350 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 8351 MAILBOX_CMD_SIZE);
7a470277 8352 /* Copy the mailbox extension data */
3e1f0718
JS
8353 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8354 lpfc_memcpy_from_slim(
8355 pmbox->ctx_buf,
7a470277
JS
8356 phba->MBslimaddr +
8357 MAILBOX_HBA_EXT_OFFSET,
8358 pmbox->out_ext_byte_len);
dea3101e 8359 }
8360 }
8361
8362 writel(HA_MBATT, phba->HAregaddr);
8363 readl(phba->HAregaddr); /* flush */
8364
8365 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 8366 status = mbx->mbxStatus;
dea3101e 8367 }
8368
2e0fef85
JS
8369 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8370 return status;
58da1ffb
JS
8371
8372out_not_finished:
8373 if (processing_queue) {
da0436e9 8374 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
8375 lpfc_mbox_cmpl_put(phba, pmbox);
8376 }
8377 return MBX_NOT_FINISHED;
dea3101e 8378}
8379
f1126688
JS
8380/**
8381 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8382 * @phba: Pointer to HBA context object.
8383 *
8384 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8385 * the driver internal pending mailbox queue. It will then try to wait out the
8386 * possible outstanding mailbox command before return.
8387 *
8388 * Returns:
8389 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8390 * the outstanding mailbox command timed out.
8391 **/
8392static int
8393lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8394{
8395 struct lpfc_sli *psli = &phba->sli;
f1126688 8396 int rc = 0;
a183a15f 8397 unsigned long timeout = 0;
f1126688
JS
8398
8399 /* Mark the asynchronous mailbox command posting as blocked */
8400 spin_lock_irq(&phba->hbalock);
8401 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
8402 /* Determine how long we might wait for the active mailbox
8403 * command to be gracefully completed by firmware.
8404 */
a183a15f
JS
8405 if (phba->sli.mbox_active)
8406 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8407 phba->sli.mbox_active) *
8408 1000) + jiffies;
8409 spin_unlock_irq(&phba->hbalock);
8410
e8d3c3b1
JS
8411 /* Make sure the mailbox is really active */
8412 if (timeout)
8413 lpfc_sli4_process_missed_mbox_completions(phba);
8414
f1126688
JS
8415 /* Wait for the outstnading mailbox command to complete */
8416 while (phba->sli.mbox_active) {
8417 /* Check active mailbox complete status every 2ms */
8418 msleep(2);
8419 if (time_after(jiffies, timeout)) {
8420 /* Timeout, marked the outstanding cmd not complete */
8421 rc = 1;
8422 break;
8423 }
8424 }
8425
8426 /* Can not cleanly block async mailbox command, fails it */
8427 if (rc) {
8428 spin_lock_irq(&phba->hbalock);
8429 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8430 spin_unlock_irq(&phba->hbalock);
8431 }
8432 return rc;
8433}
8434
8435/**
8436 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8437 * @phba: Pointer to HBA context object.
8438 *
8439 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8440 * commands from the driver internal pending mailbox queue. It makes sure
8441 * that there is no outstanding mailbox command before resuming posting
8442 * asynchronous mailbox commands. If, for any reason, there is outstanding
8443 * mailbox command, it will try to wait it out before resuming asynchronous
8444 * mailbox command posting.
8445 **/
8446static void
8447lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8448{
8449 struct lpfc_sli *psli = &phba->sli;
8450
8451 spin_lock_irq(&phba->hbalock);
8452 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8453 /* Asynchronous mailbox posting is not blocked, do nothing */
8454 spin_unlock_irq(&phba->hbalock);
8455 return;
8456 }
8457
8458 /* Outstanding synchronous mailbox command is guaranteed to be done,
8459 * successful or timeout, after timing-out the outstanding mailbox
8460 * command shall always be removed, so just unblock posting async
8461 * mailbox command and resume
8462 */
8463 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8464 spin_unlock_irq(&phba->hbalock);
8465
8466 /* wake up worker thread to post asynchronlous mailbox command */
8467 lpfc_worker_wake_up(phba);
8468}
8469
2d843edc
JS
8470/**
8471 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8472 * @phba: Pointer to HBA context object.
8473 * @mboxq: Pointer to mailbox object.
8474 *
8475 * The function waits for the bootstrap mailbox register ready bit from
8476 * port for twice the regular mailbox command timeout value.
8477 *
8478 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8479 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8480 **/
8481static int
8482lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8483{
8484 uint32_t db_ready;
8485 unsigned long timeout;
8486 struct lpfc_register bmbx_reg;
8487
8488 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8489 * 1000) + jiffies;
8490
8491 do {
8492 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8493 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8494 if (!db_ready)
8495 msleep(2);
8496
8497 if (time_after(jiffies, timeout))
8498 return MBXERR_ERROR;
8499 } while (!db_ready);
8500
8501 return 0;
8502}
8503
da0436e9
JS
8504/**
8505 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8506 * @phba: Pointer to HBA context object.
8507 * @mboxq: Pointer to mailbox object.
8508 *
8509 * The function posts a mailbox to the port. The mailbox is expected
8510 * to be comletely filled in and ready for the port to operate on it.
8511 * This routine executes a synchronous completion operation on the
8512 * mailbox by polling for its completion.
8513 *
8514 * The caller must not be holding any locks when calling this routine.
8515 *
8516 * Returns:
8517 * MBX_SUCCESS - mailbox posted successfully
8518 * Any of the MBX error values.
8519 **/
8520static int
8521lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8522{
8523 int rc = MBX_SUCCESS;
8524 unsigned long iflag;
da0436e9
JS
8525 uint32_t mcqe_status;
8526 uint32_t mbx_cmnd;
da0436e9
JS
8527 struct lpfc_sli *psli = &phba->sli;
8528 struct lpfc_mqe *mb = &mboxq->u.mqe;
8529 struct lpfc_bmbx_create *mbox_rgn;
8530 struct dma_address *dma_address;
da0436e9
JS
8531
8532 /*
8533 * Only one mailbox can be active to the bootstrap mailbox region
8534 * at a time and there is no queueing provided.
8535 */
8536 spin_lock_irqsave(&phba->hbalock, iflag);
8537 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8538 spin_unlock_irqrestore(&phba->hbalock, iflag);
8539 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8540 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8541 "cannot issue Data: x%x x%x\n",
8542 mboxq->vport ? mboxq->vport->vpi : 0,
8543 mboxq->u.mb.mbxCommand,
a183a15f
JS
8544 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8545 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8546 psli->sli_flag, MBX_POLL);
8547 return MBXERR_ERROR;
8548 }
8549 /* The server grabs the token and owns it until release */
8550 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8551 phba->sli.mbox_active = mboxq;
8552 spin_unlock_irqrestore(&phba->hbalock, iflag);
8553
2d843edc
JS
8554 /* wait for bootstrap mbox register for readyness */
8555 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8556 if (rc)
8557 goto exit;
8558
da0436e9
JS
8559 /*
8560 * Initialize the bootstrap memory region to avoid stale data areas
8561 * in the mailbox post. Then copy the caller's mailbox contents to
8562 * the bmbx mailbox region.
8563 */
8564 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8565 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
48f8fdb4
JS
8566 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8567 sizeof(struct lpfc_mqe));
da0436e9
JS
8568
8569 /* Post the high mailbox dma address to the port and wait for ready. */
8570 dma_address = &phba->sli4_hba.bmbx.dma_address;
8571 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8572
2d843edc
JS
8573 /* wait for bootstrap mbox register for hi-address write done */
8574 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8575 if (rc)
8576 goto exit;
da0436e9
JS
8577
8578 /* Post the low mailbox dma address to the port. */
8579 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 8580
2d843edc
JS
8581 /* wait for bootstrap mbox register for low address write done */
8582 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8583 if (rc)
8584 goto exit;
da0436e9
JS
8585
8586 /*
8587 * Read the CQ to ensure the mailbox has completed.
8588 * If so, update the mailbox status so that the upper layers
8589 * can complete the request normally.
8590 */
48f8fdb4
JS
8591 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8592 sizeof(struct lpfc_mqe));
da0436e9 8593 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
48f8fdb4
JS
8594 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8595 sizeof(struct lpfc_mcqe));
da0436e9 8596 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
8597 /*
8598 * When the CQE status indicates a failure and the mailbox status
8599 * indicates success then copy the CQE status into the mailbox status
8600 * (and prefix it with x4000).
8601 */
da0436e9 8602 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
8603 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8604 bf_set(lpfc_mqe_status, mb,
8605 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 8606 rc = MBXERR_ERROR;
d7c47992
JS
8607 } else
8608 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
8609
8610 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8611 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
8612 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8613 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
8614 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8615 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8616 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8617 bf_get(lpfc_mqe_status, mb),
8618 mb->un.mb_words[0], mb->un.mb_words[1],
8619 mb->un.mb_words[2], mb->un.mb_words[3],
8620 mb->un.mb_words[4], mb->un.mb_words[5],
8621 mb->un.mb_words[6], mb->un.mb_words[7],
8622 mb->un.mb_words[8], mb->un.mb_words[9],
8623 mb->un.mb_words[10], mb->un.mb_words[11],
8624 mb->un.mb_words[12], mboxq->mcqe.word0,
8625 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8626 mboxq->mcqe.trailer);
8627exit:
8628 /* We are holding the token, no needed for lock when release */
8629 spin_lock_irqsave(&phba->hbalock, iflag);
8630 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8631 phba->sli.mbox_active = NULL;
8632 spin_unlock_irqrestore(&phba->hbalock, iflag);
8633 return rc;
8634}
8635
8636/**
8637 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8638 * @phba: Pointer to HBA context object.
8639 * @pmbox: Pointer to mailbox object.
8640 * @flag: Flag indicating how the mailbox need to be processed.
8641 *
8642 * This function is called by discovery code and HBA management code to submit
8643 * a mailbox command to firmware with SLI-4 interface spec.
8644 *
8645 * Return codes the caller owns the mailbox command after the return of the
8646 * function.
8647 **/
8648static int
8649lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8650 uint32_t flag)
8651{
8652 struct lpfc_sli *psli = &phba->sli;
8653 unsigned long iflags;
8654 int rc;
8655
b76f2dc9
JS
8656 /* dump from issue mailbox command if setup */
8657 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8658
8fa38513
JS
8659 rc = lpfc_mbox_dev_check(phba);
8660 if (unlikely(rc)) {
8661 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8662 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
8663 "cannot issue Data: x%x x%x\n",
8664 mboxq->vport ? mboxq->vport->vpi : 0,
8665 mboxq->u.mb.mbxCommand,
a183a15f
JS
8666 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8667 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
8668 psli->sli_flag, flag);
8669 goto out_not_finished;
8670 }
8671
da0436e9
JS
8672 /* Detect polling mode and jump to a handler */
8673 if (!phba->sli4_hba.intr_enable) {
8674 if (flag == MBX_POLL)
8675 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8676 else
8677 rc = -EIO;
8678 if (rc != MBX_SUCCESS)
0558056c 8679 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8680 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8681 "(x%x/x%x) failure: "
8682 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8683 "Data: x%x x%x\n,",
da0436e9
JS
8684 mboxq->vport ? mboxq->vport->vpi : 0,
8685 mboxq->u.mb.mbxCommand,
a183a15f
JS
8686 lpfc_sli_config_mbox_subsys_get(phba,
8687 mboxq),
8688 lpfc_sli_config_mbox_opcode_get(phba,
8689 mboxq),
cc459f19
JS
8690 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8691 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8692 bf_get(lpfc_mcqe_ext_status,
8693 &mboxq->mcqe),
da0436e9
JS
8694 psli->sli_flag, flag);
8695 return rc;
8696 } else if (flag == MBX_POLL) {
f1126688
JS
8697 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8698 "(%d):2542 Try to issue mailbox command "
7365f6fd 8699 "x%x (x%x/x%x) synchronously ahead of async "
f1126688 8700 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8701 mboxq->vport ? mboxq->vport->vpi : 0,
8702 mboxq->u.mb.mbxCommand,
a183a15f
JS
8703 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8704 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8705 psli->sli_flag, flag);
f1126688
JS
8706 /* Try to block the asynchronous mailbox posting */
8707 rc = lpfc_sli4_async_mbox_block(phba);
8708 if (!rc) {
8709 /* Successfully blocked, now issue sync mbox cmd */
8710 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8711 if (rc != MBX_SUCCESS)
cc459f19 8712 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8713 LOG_MBOX | LOG_SLI,
cc459f19
JS
8714 "(%d):2597 Sync Mailbox command "
8715 "x%x (x%x/x%x) failure: "
8716 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8717 "Data: x%x x%x\n,",
8718 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8719 mboxq->u.mb.mbxCommand,
8720 lpfc_sli_config_mbox_subsys_get(phba,
8721 mboxq),
8722 lpfc_sli_config_mbox_opcode_get(phba,
8723 mboxq),
cc459f19
JS
8724 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8725 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8726 bf_get(lpfc_mcqe_ext_status,
8727 &mboxq->mcqe),
a183a15f 8728 psli->sli_flag, flag);
f1126688
JS
8729 /* Unblock the async mailbox posting afterward */
8730 lpfc_sli4_async_mbox_unblock(phba);
8731 }
8732 return rc;
da0436e9
JS
8733 }
8734
8735 /* Now, interrupt mode asynchrous mailbox command */
8736 rc = lpfc_mbox_cmd_check(phba, mboxq);
8737 if (rc) {
8738 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8739 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8740 "cannot issue Data: x%x x%x\n",
8741 mboxq->vport ? mboxq->vport->vpi : 0,
8742 mboxq->u.mb.mbxCommand,
a183a15f
JS
8743 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8744 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8745 psli->sli_flag, flag);
8746 goto out_not_finished;
8747 }
da0436e9
JS
8748
8749 /* Put the mailbox command to the driver internal FIFO */
8750 psli->slistat.mbox_busy++;
8751 spin_lock_irqsave(&phba->hbalock, iflags);
8752 lpfc_mbox_put(phba, mboxq);
8753 spin_unlock_irqrestore(&phba->hbalock, iflags);
8754 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8755 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8756 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8757 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8758 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8759 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8760 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8761 phba->pport->port_state,
8762 psli->sli_flag, MBX_NOWAIT);
8763 /* Wake up worker thread to transport mailbox command from head */
8764 lpfc_worker_wake_up(phba);
8765
8766 return MBX_BUSY;
8767
8768out_not_finished:
8769 return MBX_NOT_FINISHED;
8770}
8771
8772/**
8773 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8774 * @phba: Pointer to HBA context object.
8775 *
8776 * This function is called by worker thread to send a mailbox command to
8777 * SLI4 HBA firmware.
8778 *
8779 **/
8780int
8781lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8782{
8783 struct lpfc_sli *psli = &phba->sli;
8784 LPFC_MBOXQ_t *mboxq;
8785 int rc = MBX_SUCCESS;
8786 unsigned long iflags;
8787 struct lpfc_mqe *mqe;
8788 uint32_t mbx_cmnd;
8789
8790 /* Check interrupt mode before post async mailbox command */
8791 if (unlikely(!phba->sli4_hba.intr_enable))
8792 return MBX_NOT_FINISHED;
8793
8794 /* Check for mailbox command service token */
8795 spin_lock_irqsave(&phba->hbalock, iflags);
8796 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8797 spin_unlock_irqrestore(&phba->hbalock, iflags);
8798 return MBX_NOT_FINISHED;
8799 }
8800 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8801 spin_unlock_irqrestore(&phba->hbalock, iflags);
8802 return MBX_NOT_FINISHED;
8803 }
8804 if (unlikely(phba->sli.mbox_active)) {
8805 spin_unlock_irqrestore(&phba->hbalock, iflags);
8806 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8807 "0384 There is pending active mailbox cmd\n");
8808 return MBX_NOT_FINISHED;
8809 }
8810 /* Take the mailbox command service token */
8811 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8812
8813 /* Get the next mailbox command from head of queue */
8814 mboxq = lpfc_mbox_get(phba);
8815
8816 /* If no more mailbox command waiting for post, we're done */
8817 if (!mboxq) {
8818 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8819 spin_unlock_irqrestore(&phba->hbalock, iflags);
8820 return MBX_SUCCESS;
8821 }
8822 phba->sli.mbox_active = mboxq;
8823 spin_unlock_irqrestore(&phba->hbalock, iflags);
8824
8825 /* Check device readiness for posting mailbox command */
8826 rc = lpfc_mbox_dev_check(phba);
8827 if (unlikely(rc))
8828 /* Driver clean routine will clean up pending mailbox */
8829 goto out_not_finished;
8830
8831 /* Prepare the mbox command to be posted */
8832 mqe = &mboxq->u.mqe;
8833 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8834
8835 /* Start timer for the mbox_tmo and log some mailbox post messages */
8836 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 8837 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
8838
8839 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8840 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
8841 "x%x x%x\n",
8842 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
8843 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8844 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8845 phba->pport->port_state, psli->sli_flag);
8846
8847 if (mbx_cmnd != MBX_HEARTBEAT) {
8848 if (mboxq->vport) {
8849 lpfc_debugfs_disc_trc(mboxq->vport,
8850 LPFC_DISC_TRC_MBOX_VPORT,
8851 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8852 mbx_cmnd, mqe->un.mb_words[0],
8853 mqe->un.mb_words[1]);
8854 } else {
8855 lpfc_debugfs_disc_trc(phba->pport,
8856 LPFC_DISC_TRC_MBOX,
8857 "MBOX Send: cmd:x%x mb:x%x x%x",
8858 mbx_cmnd, mqe->un.mb_words[0],
8859 mqe->un.mb_words[1]);
8860 }
8861 }
8862 psli->slistat.mbox_cmd++;
8863
8864 /* Post the mailbox command to the port */
8865 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8866 if (rc != MBX_SUCCESS) {
8867 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8868 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8869 "cannot issue Data: x%x x%x\n",
8870 mboxq->vport ? mboxq->vport->vpi : 0,
8871 mboxq->u.mb.mbxCommand,
a183a15f
JS
8872 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8873 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8874 psli->sli_flag, MBX_NOWAIT);
8875 goto out_not_finished;
8876 }
8877
8878 return rc;
8879
8880out_not_finished:
8881 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
8882 if (phba->sli.mbox_active) {
8883 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8884 __lpfc_mbox_cmpl_put(phba, mboxq);
8885 /* Release the token */
8886 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8887 phba->sli.mbox_active = NULL;
8888 }
da0436e9
JS
8889 spin_unlock_irqrestore(&phba->hbalock, iflags);
8890
8891 return MBX_NOT_FINISHED;
8892}
8893
8894/**
8895 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8896 * @phba: Pointer to HBA context object.
8897 * @pmbox: Pointer to mailbox object.
8898 * @flag: Flag indicating how the mailbox need to be processed.
8899 *
8900 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8901 * the API jump table function pointer from the lpfc_hba struct.
8902 *
8903 * Return codes the caller owns the mailbox command after the return of the
8904 * function.
8905 **/
8906int
8907lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8908{
8909 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8910}
8911
8912/**
25985edc 8913 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
8914 * @phba: The hba struct for which this call is being executed.
8915 * @dev_grp: The HBA PCI-Device group number.
8916 *
8917 * This routine sets up the mbox interface API function jump table in @phba
8918 * struct.
8919 * Returns: 0 - success, -ENODEV - failure.
8920 **/
8921int
8922lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8923{
8924
8925 switch (dev_grp) {
8926 case LPFC_PCI_DEV_LP:
8927 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8928 phba->lpfc_sli_handle_slow_ring_event =
8929 lpfc_sli_handle_slow_ring_event_s3;
8930 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8931 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8932 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8933 break;
8934 case LPFC_PCI_DEV_OC:
8935 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8936 phba->lpfc_sli_handle_slow_ring_event =
8937 lpfc_sli_handle_slow_ring_event_s4;
8938 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8939 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8940 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8941 break;
8942 default:
8943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8944 "1420 Invalid HBA PCI-device group: 0x%x\n",
8945 dev_grp);
8946 return -ENODEV;
8947 break;
8948 }
8949 return 0;
8950}
8951
e59058c4 8952/**
3621a710 8953 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
8954 * @phba: Pointer to HBA context object.
8955 * @pring: Pointer to driver SLI ring object.
8956 * @piocb: Pointer to address of newly added command iocb.
8957 *
8958 * This function is called with hbalock held to add a command
8959 * iocb to the txq when SLI layer cannot submit the command iocb
8960 * to the ring.
8961 **/
2a9bf3d0 8962void
92d7f7b0 8963__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8964 struct lpfc_iocbq *piocb)
dea3101e 8965{
1c2ba475 8966 lockdep_assert_held(&phba->hbalock);
dea3101e 8967 /* Insert the caller's iocb in the txq tail for later processing. */
8968 list_add_tail(&piocb->list, &pring->txq);
dea3101e 8969}
8970
e59058c4 8971/**
3621a710 8972 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
8973 * @phba: Pointer to HBA context object.
8974 * @pring: Pointer to driver SLI ring object.
8975 * @piocb: Pointer to address of newly added command iocb.
8976 *
8977 * This function is called with hbalock held before a new
8978 * iocb is submitted to the firmware. This function checks
8979 * txq to flush the iocbs in txq to Firmware before
8980 * submitting new iocbs to the Firmware.
8981 * If there are iocbs in the txq which need to be submitted
8982 * to firmware, lpfc_sli_next_iocb returns the first element
8983 * of the txq after dequeuing it from txq.
8984 * If there is no iocb in the txq then the function will return
8985 * *piocb and *piocb is set to NULL. Caller needs to check
8986 * *piocb to find if there are more commands in the txq.
8987 **/
dea3101e 8988static struct lpfc_iocbq *
8989lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8990 struct lpfc_iocbq **piocb)
dea3101e 8991{
8992 struct lpfc_iocbq * nextiocb;
8993
1c2ba475
JT
8994 lockdep_assert_held(&phba->hbalock);
8995
dea3101e 8996 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8997 if (!nextiocb) {
8998 nextiocb = *piocb;
8999 *piocb = NULL;
9000 }
9001
9002 return nextiocb;
9003}
9004
e59058c4 9005/**
3772a991 9006 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 9007 * @phba: Pointer to HBA context object.
3772a991 9008 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
9009 * @piocb: Pointer to command iocb.
9010 * @flag: Flag indicating if this command can be put into txq.
9011 *
3772a991
JS
9012 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9013 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9014 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9015 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9016 * this function allows only iocbs for posting buffers. This function finds
9017 * next available slot in the command ring and posts the command to the
9018 * available slot and writes the port attention register to request HBA start
9019 * processing new iocb. If there is no slot available in the ring and
9020 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9021 * the function returns IOCB_BUSY.
e59058c4 9022 *
3772a991
JS
9023 * This function is called with hbalock held. The function will return success
9024 * after it successfully submit the iocb to firmware or after adding to the
9025 * txq.
e59058c4 9026 **/
98c9ea5c 9027static int
3772a991 9028__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 9029 struct lpfc_iocbq *piocb, uint32_t flag)
9030{
9031 struct lpfc_iocbq *nextiocb;
9032 IOCB_t *iocb;
895427bd 9033 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 9034
1c2ba475
JT
9035 lockdep_assert_held(&phba->hbalock);
9036
92d7f7b0
JS
9037 if (piocb->iocb_cmpl && (!piocb->vport) &&
9038 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9039 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9040 lpfc_printf_log(phba, KERN_ERR,
9041 LOG_SLI | LOG_VPORT,
e8b62011 9042 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
9043 piocb->iocb.ulpCommand);
9044 dump_stack();
9045 return IOCB_ERROR;
9046 }
9047
9048
8d63f375
LV
9049 /* If the PCI channel is in offline state, do not post iocbs. */
9050 if (unlikely(pci_channel_offline(phba->pcidev)))
9051 return IOCB_ERROR;
9052
a257bf90
JS
9053 /* If HBA has a deferred error attention, fail the iocb. */
9054 if (unlikely(phba->hba_flag & DEFER_ERATT))
9055 return IOCB_ERROR;
9056
dea3101e 9057 /*
9058 * We should never get an IOCB if we are in a < LINK_DOWN state
9059 */
2e0fef85 9060 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 9061 return IOCB_ERROR;
9062
9063 /*
9064 * Check to see if we are blocking IOCB processing because of a
0b727fea 9065 * outstanding event.
dea3101e 9066 */
0b727fea 9067 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 9068 goto iocb_busy;
9069
2e0fef85 9070 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 9071 /*
2680eeaa 9072 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 9073 * can be issued if the link is not up.
9074 */
9075 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
9076 case CMD_GEN_REQUEST64_CR:
9077 case CMD_GEN_REQUEST64_CX:
9078 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9079 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 9080 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
9081 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9082 MENLO_TRANSPORT_TYPE))
9083
9084 goto iocb_busy;
9085 break;
dea3101e 9086 case CMD_QUE_RING_BUF_CN:
9087 case CMD_QUE_RING_BUF64_CN:
dea3101e 9088 /*
9089 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9090 * completion, iocb_cmpl MUST be 0.
9091 */
9092 if (piocb->iocb_cmpl)
9093 piocb->iocb_cmpl = NULL;
9094 /*FALLTHROUGH*/
9095 case CMD_CREATE_XRI_CR:
2680eeaa
JS
9096 case CMD_CLOSE_XRI_CN:
9097 case CMD_CLOSE_XRI_CX:
dea3101e 9098 break;
9099 default:
9100 goto iocb_busy;
9101 }
9102
9103 /*
9104 * For FCP commands, we must be in a state where we can process link
9105 * attention events.
9106 */
895427bd 9107 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 9108 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 9109 goto iocb_busy;
92d7f7b0 9110 }
dea3101e 9111
dea3101e 9112 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9113 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9114 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9115
9116 if (iocb)
9117 lpfc_sli_update_ring(phba, pring);
9118 else
9119 lpfc_sli_update_full_ring(phba, pring);
9120
9121 if (!piocb)
9122 return IOCB_SUCCESS;
9123
9124 goto out_busy;
9125
9126 iocb_busy:
9127 pring->stats.iocb_cmd_delay++;
9128
9129 out_busy:
9130
9131 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 9132 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 9133 return IOCB_SUCCESS;
9134 }
9135
9136 return IOCB_BUSY;
9137}
9138
3772a991 9139/**
4f774513
JS
9140 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9141 * @phba: Pointer to HBA context object.
9142 * @piocb: Pointer to command iocb.
9143 * @sglq: Pointer to the scatter gather queue object.
9144 *
9145 * This routine converts the bpl or bde that is in the IOCB
9146 * to a sgl list for the sli4 hardware. The physical address
9147 * of the bpl/bde is converted back to a virtual address.
9148 * If the IOCB contains a BPL then the list of BDE's is
9149 * converted to sli4_sge's. If the IOCB contains a single
9150 * BDE then it is converted to a single sli_sge.
9151 * The IOCB is still in cpu endianess so the contents of
9152 * the bpl can be used without byte swapping.
9153 *
9154 * Returns valid XRI = Success, NO_XRI = Failure.
9155**/
9156static uint16_t
9157lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9158 struct lpfc_sglq *sglq)
3772a991 9159{
4f774513
JS
9160 uint16_t xritag = NO_XRI;
9161 struct ulp_bde64 *bpl = NULL;
9162 struct ulp_bde64 bde;
9163 struct sli4_sge *sgl = NULL;
1b51197d 9164 struct lpfc_dmabuf *dmabuf;
4f774513
JS
9165 IOCB_t *icmd;
9166 int numBdes = 0;
9167 int i = 0;
63e801ce
JS
9168 uint32_t offset = 0; /* accumulated offset in the sg request list */
9169 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 9170
4f774513
JS
9171 if (!piocbq || !sglq)
9172 return xritag;
9173
9174 sgl = (struct sli4_sge *)sglq->sgl;
9175 icmd = &piocbq->iocb;
6b5151fd
JS
9176 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9177 return sglq->sli4_xritag;
4f774513
JS
9178 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9179 numBdes = icmd->un.genreq64.bdl.bdeSize /
9180 sizeof(struct ulp_bde64);
9181 /* The addrHigh and addrLow fields within the IOCB
9182 * have not been byteswapped yet so there is no
9183 * need to swap them back.
9184 */
1b51197d
JS
9185 if (piocbq->context3)
9186 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9187 else
9188 return xritag;
4f774513 9189
1b51197d 9190 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
9191 if (!bpl)
9192 return xritag;
9193
9194 for (i = 0; i < numBdes; i++) {
9195 /* Should already be byte swapped. */
28baac74
JS
9196 sgl->addr_hi = bpl->addrHigh;
9197 sgl->addr_lo = bpl->addrLow;
9198
0558056c 9199 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9200 if ((i+1) == numBdes)
9201 bf_set(lpfc_sli4_sge_last, sgl, 1);
9202 else
9203 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
9204 /* swap the size field back to the cpu so we
9205 * can assign it to the sgl.
9206 */
9207 bde.tus.w = le32_to_cpu(bpl->tus.w);
9208 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
9209 /* The offsets in the sgl need to be accumulated
9210 * separately for the request and reply lists.
9211 * The request is always first, the reply follows.
9212 */
9213 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9214 /* add up the reply sg entries */
9215 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9216 inbound++;
9217 /* first inbound? reset the offset */
9218 if (inbound == 1)
9219 offset = 0;
9220 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
9221 bf_set(lpfc_sli4_sge_type, sgl,
9222 LPFC_SGE_TYPE_DATA);
63e801ce
JS
9223 offset += bde.tus.f.bdeSize;
9224 }
546fc854 9225 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
9226 bpl++;
9227 sgl++;
9228 }
9229 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9230 /* The addrHigh and addrLow fields of the BDE have not
9231 * been byteswapped yet so they need to be swapped
9232 * before putting them in the sgl.
9233 */
9234 sgl->addr_hi =
9235 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9236 sgl->addr_lo =
9237 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 9238 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9239 bf_set(lpfc_sli4_sge_last, sgl, 1);
9240 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
9241 sgl->sge_len =
9242 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
9243 }
9244 return sglq->sli4_xritag;
3772a991 9245}
92d7f7b0 9246
e59058c4 9247/**
4f774513 9248 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 9249 * @phba: Pointer to HBA context object.
4f774513
JS
9250 * @piocb: Pointer to command iocb.
9251 * @wqe: Pointer to the work queue entry.
e59058c4 9252 *
4f774513
JS
9253 * This routine converts the iocb command to its Work Queue Entry
9254 * equivalent. The wqe pointer should not have any fields set when
9255 * this routine is called because it will memcpy over them.
9256 * This routine does not set the CQ_ID or the WQEC bits in the
9257 * wqe.
e59058c4 9258 *
4f774513 9259 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 9260 **/
cf5bf97e 9261static int
4f774513 9262lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
205e8240 9263 union lpfc_wqe128 *wqe)
cf5bf97e 9264{
5ffc266e 9265 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
9266 uint8_t ct = 0;
9267 uint32_t fip;
9268 uint32_t abort_tag;
9269 uint8_t command_type = ELS_COMMAND_NON_FIP;
9270 uint8_t cmnd;
9271 uint16_t xritag;
dcf2a4e0
JS
9272 uint16_t abrt_iotag;
9273 struct lpfc_iocbq *abrtiocbq;
4f774513 9274 struct ulp_bde64 *bpl = NULL;
f0d9bccc 9275 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
9276 int numBdes, i;
9277 struct ulp_bde64 bde;
c31098ce 9278 struct lpfc_nodelist *ndlp;
ff78d8f9 9279 uint32_t *pcmd;
1b51197d 9280 uint32_t if_type;
4f774513 9281
45ed1190 9282 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 9283 /* The fcp commands will set command type */
0c287589 9284 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 9285 command_type = FCP_COMMAND;
c868595d 9286 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
9287 command_type = ELS_COMMAND_FIP;
9288 else
9289 command_type = ELS_COMMAND_NON_FIP;
9290
b5c53958
JS
9291 if (phba->fcp_embed_io)
9292 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
9293 /* Some of the fields are in the right position already */
9294 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
ae9e28f3
JS
9295 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9296 /* The ct field has moved so reset */
9297 wqe->generic.wqe_com.word7 = 0;
9298 wqe->generic.wqe_com.word10 = 0;
9299 }
b5c53958
JS
9300
9301 abort_tag = (uint32_t) iocbq->iotag;
9302 xritag = iocbq->sli4_xritag;
4f774513
JS
9303 /* words0-2 bpl convert bde */
9304 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
9305 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9306 sizeof(struct ulp_bde64);
4f774513
JS
9307 bpl = (struct ulp_bde64 *)
9308 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9309 if (!bpl)
9310 return IOCB_ERROR;
cf5bf97e 9311
4f774513
JS
9312 /* Should already be byte swapped. */
9313 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9314 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9315 /* swap the size field back to the cpu so we
9316 * can assign it to the sgl.
9317 */
9318 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
9319 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9320 total_len = 0;
9321 for (i = 0; i < numBdes; i++) {
9322 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9323 total_len += bde.tus.f.bdeSize;
9324 }
4f774513 9325 } else
5ffc266e 9326 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 9327
4f774513
JS
9328 iocbq->iocb.ulpIoTag = iocbq->iotag;
9329 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 9330
4f774513
JS
9331 switch (iocbq->iocb.ulpCommand) {
9332 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
9333 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9334 ndlp = iocbq->context_un.ndlp;
9335 else
9336 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
9337 if (!iocbq->iocb.ulpLe) {
9338 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9339 "2007 Only Limited Edition cmd Format"
9340 " supported 0x%x\n",
9341 iocbq->iocb.ulpCommand);
9342 return IOCB_ERROR;
9343 }
ff78d8f9 9344
5ffc266e 9345 wqe->els_req.payload_len = xmit_len;
4f774513
JS
9346 /* Els_reguest64 has a TMO */
9347 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9348 iocbq->iocb.ulpTimeout);
9349 /* Need a VF for word 4 set the vf bit*/
9350 bf_set(els_req64_vf, &wqe->els_req, 0);
9351 /* And a VFID for word 12 */
9352 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 9353 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
9354 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9355 iocbq->iocb.ulpContext);
9356 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9357 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 9358 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 9359 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
9360 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9361 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
9362 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9363 iocbq->context2)->virt);
1b51197d
JS
9364 if_type = bf_get(lpfc_sli_intf_if_type,
9365 &phba->sli4_hba.sli_intf);
27d6ac0a 9366 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 9367 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 9368 *pcmd == ELS_CMD_SCR ||
6b5151fd 9369 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 9370 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
9371 *pcmd == ELS_CMD_PLOGI)) {
9372 bf_set(els_req64_sp, &wqe->els_req, 1);
9373 bf_set(els_req64_sid, &wqe->els_req,
9374 iocbq->vport->fc_myDID);
939723a4
JS
9375 if ((*pcmd == ELS_CMD_FLOGI) &&
9376 !(phba->fc_topology ==
9377 LPFC_TOPOLOGY_LOOP))
9378 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
9379 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9380 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 9381 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 9382 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
9383 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9384 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9385 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9386 }
c868595d 9387 }
6d368e53
JS
9388 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9389 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
9390 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9391 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9392 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9393 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9394 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9395 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 9396 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 9397 break;
5ffc266e 9398 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
9399 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9400 iocbq->iocb.un.ulpWord[3]);
9401 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 9402 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
9403 /* The entire sequence is transmitted for this IOCB */
9404 xmit_len = total_len;
9405 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
9406 if (phba->link_flag & LS_LOOPBACK_MODE)
9407 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 9408 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
9409 /* word3 iocb=io_tag32 wqe=reserved */
9410 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
9411 /* word4 relative_offset memcpy */
9412 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
9413 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9414 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9415 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9416 LPFC_WQE_IOD_WRITE);
9417 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9418 LPFC_WQE_LENLOC_WORD12);
9419 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
9420 wqe->xmit_sequence.xmit_len = xmit_len;
9421 command_type = OTHER_COMMAND;
7851fe2c 9422 break;
4f774513 9423 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
9424 /* word3 iocb=iotag32 wqe=seq_payload_len */
9425 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
9426 /* word4 iocb=rsvd wqe=rsvd */
9427 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9428 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 9429 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 9430 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
9431 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9432 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9433 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9434 LPFC_WQE_LENLOC_WORD3);
9435 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 9436 break;
4f774513
JS
9437 case CMD_FCP_IWRITE64_CR:
9438 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
9439 /* word3 iocb=iotag wqe=payload_offset_len */
9440 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9441 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9442 xmit_len + sizeof(struct fcp_rsp));
9443 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9444 0);
f0d9bccc
JS
9445 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9446 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9447 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9448 iocbq->iocb.ulpFCP2Rcvy);
9449 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9450 /* Always open the exchange */
f0d9bccc
JS
9451 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9452 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9453 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9454 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9455 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
9456 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9457 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
9458 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9459 if (iocbq->priority) {
9460 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9461 (iocbq->priority << 1));
9462 } else {
1ba981fd
JS
9463 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9464 (phba->cfg_XLanePriority << 1));
9465 }
9466 }
b5c53958
JS
9467 /* Note, word 10 is already initialized to 0 */
9468
414abe0a
JS
9469 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9470 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9471 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9472 else
9473 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9474
b5c53958
JS
9475 if (phba->fcp_embed_io) {
9476 struct lpfc_scsi_buf *lpfc_cmd;
9477 struct sli4_sge *sgl;
b5c53958
JS
9478 struct fcp_cmnd *fcp_cmnd;
9479 uint32_t *ptr;
9480
9481 /* 128 byte wqe support here */
b5c53958
JS
9482
9483 lpfc_cmd = iocbq->context1;
9484 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9485 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9486
9487 /* Word 0-2 - FCP_CMND */
205e8240 9488 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9489 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9490 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9491 wqe->generic.bde.addrHigh = 0;
9492 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9493
205e8240
JS
9494 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9495 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
b5c53958
JS
9496
9497 /* Word 22-29 FCP CMND Payload */
205e8240 9498 ptr = &wqe->words[22];
b5c53958
JS
9499 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9500 }
7851fe2c 9501 break;
4f774513 9502 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
9503 /* word3 iocb=iotag wqe=payload_offset_len */
9504 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9505 bf_set(payload_offset_len, &wqe->fcp_iread,
9506 xmit_len + sizeof(struct fcp_rsp));
9507 bf_set(cmd_buff_len, &wqe->fcp_iread,
9508 0);
f0d9bccc
JS
9509 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9510 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9511 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9512 iocbq->iocb.ulpFCP2Rcvy);
9513 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 9514 /* Always open the exchange */
f0d9bccc
JS
9515 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9516 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9517 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9518 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9519 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
9520 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9521 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
9522 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9523 if (iocbq->priority) {
9524 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9525 (iocbq->priority << 1));
9526 } else {
1ba981fd
JS
9527 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9528 (phba->cfg_XLanePriority << 1));
9529 }
9530 }
b5c53958
JS
9531 /* Note, word 10 is already initialized to 0 */
9532
414abe0a
JS
9533 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9534 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9535 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9536 else
9537 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9538
b5c53958
JS
9539 if (phba->fcp_embed_io) {
9540 struct lpfc_scsi_buf *lpfc_cmd;
9541 struct sli4_sge *sgl;
b5c53958
JS
9542 struct fcp_cmnd *fcp_cmnd;
9543 uint32_t *ptr;
9544
9545 /* 128 byte wqe support here */
b5c53958
JS
9546
9547 lpfc_cmd = iocbq->context1;
9548 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9549 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9550
9551 /* Word 0-2 - FCP_CMND */
205e8240 9552 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9553 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9554 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9555 wqe->generic.bde.addrHigh = 0;
9556 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9557
205e8240
JS
9558 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9559 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
b5c53958
JS
9560
9561 /* Word 22-29 FCP CMND Payload */
205e8240 9562 ptr = &wqe->words[22];
b5c53958
JS
9563 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9564 }
7851fe2c 9565 break;
4f774513 9566 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
9567 /* word3 iocb=iotag wqe=payload_offset_len */
9568 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9569 bf_set(payload_offset_len, &wqe->fcp_icmd,
9570 xmit_len + sizeof(struct fcp_rsp));
9571 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9572 0);
f0d9bccc 9573 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 9574 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 9575 /* Always open the exchange */
f0d9bccc
JS
9576 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9577 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9578 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9579 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9580 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
9581 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9582 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
9583 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9584 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
9585 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9586 if (iocbq->priority) {
9587 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9588 (iocbq->priority << 1));
9589 } else {
1ba981fd
JS
9590 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9591 (phba->cfg_XLanePriority << 1));
9592 }
9593 }
b5c53958
JS
9594 /* Note, word 10 is already initialized to 0 */
9595
9596 if (phba->fcp_embed_io) {
9597 struct lpfc_scsi_buf *lpfc_cmd;
9598 struct sli4_sge *sgl;
b5c53958
JS
9599 struct fcp_cmnd *fcp_cmnd;
9600 uint32_t *ptr;
9601
9602 /* 128 byte wqe support here */
b5c53958
JS
9603
9604 lpfc_cmd = iocbq->context1;
9605 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9606 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9607
9608 /* Word 0-2 - FCP_CMND */
205e8240 9609 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9610 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9611 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9612 wqe->generic.bde.addrHigh = 0;
9613 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9614
205e8240
JS
9615 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9616 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
b5c53958
JS
9617
9618 /* Word 22-29 FCP CMND Payload */
205e8240 9619 ptr = &wqe->words[22];
b5c53958
JS
9620 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9621 }
7851fe2c 9622 break;
4f774513 9623 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
9624 /* For this command calculate the xmit length of the
9625 * request bde.
9626 */
9627 xmit_len = 0;
9628 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9629 sizeof(struct ulp_bde64);
9630 for (i = 0; i < numBdes; i++) {
63e801ce 9631 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
9632 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9633 break;
63e801ce
JS
9634 xmit_len += bde.tus.f.bdeSize;
9635 }
f0d9bccc
JS
9636 /* word3 iocb=IO_TAG wqe=request_payload_len */
9637 wqe->gen_req.request_payload_len = xmit_len;
9638 /* word4 iocb=parameter wqe=relative_offset memcpy */
9639 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
9640 /* word6 context tag copied in memcpy */
9641 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9642 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9644 "2015 Invalid CT %x command 0x%x\n",
9645 ct, iocbq->iocb.ulpCommand);
9646 return IOCB_ERROR;
9647 }
f0d9bccc
JS
9648 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9649 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9650 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9651 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9652 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9653 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9654 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9655 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 9656 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 9657 command_type = OTHER_COMMAND;
7851fe2c 9658 break;
4f774513 9659 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 9660 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 9661 /* words0-2 BDE memcpy */
f0d9bccc
JS
9662 /* word3 iocb=iotag32 wqe=response_payload_len */
9663 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
9664 /* word4 */
9665 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
9666 /* word5 iocb=rsvd wge=did */
9667 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
9668 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9669
9670 if_type = bf_get(lpfc_sli_intf_if_type,
9671 &phba->sli4_hba.sli_intf);
27d6ac0a 9672 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
939723a4
JS
9673 if (iocbq->vport->fc_flag & FC_PT2PT) {
9674 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9675 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9676 iocbq->vport->fc_myDID);
9677 if (iocbq->vport->fc_myDID == Fabric_DID) {
9678 bf_set(wqe_els_did,
9679 &wqe->xmit_els_rsp.wqe_dest, 0);
9680 }
9681 }
9682 }
f0d9bccc
JS
9683 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9684 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9685 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9686 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9687 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9688 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9689 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9690 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9691 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9692 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9693 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9694 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9695 LPFC_WQE_LENLOC_WORD3);
9696 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9697 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9698 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9699 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9700 iocbq->context2)->virt);
9701 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9702 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9703 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9704 iocbq->vport->fc_myDID);
939723a4
JS
9705 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9706 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9707 phba->vpi_ids[phba->pport->vpi]);
9708 }
4f774513 9709 command_type = OTHER_COMMAND;
7851fe2c 9710 break;
4f774513
JS
9711 case CMD_CLOSE_XRI_CN:
9712 case CMD_ABORT_XRI_CN:
9713 case CMD_ABORT_XRI_CX:
9714 /* words 0-2 memcpy should be 0 rserved */
9715 /* port will send abts */
dcf2a4e0
JS
9716 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9717 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9718 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9719 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9720 } else
9721 fip = 0;
9722
9723 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9724 /*
dcf2a4e0
JS
9725 * The link is down, or the command was ELS_FIP
9726 * so the fw does not need to send abts
4f774513
JS
9727 * on the wire.
9728 */
9729 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9730 else
9731 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9732 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9733 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9734 wqe->abort_cmd.rsrvd5 = 0;
9735 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9736 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9737 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9738 /*
9739 * The abort handler will send us CMD_ABORT_XRI_CN or
9740 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9741 */
f0d9bccc
JS
9742 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9743 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9744 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9745 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9746 cmnd = CMD_ABORT_XRI_CX;
9747 command_type = OTHER_COMMAND;
9748 xritag = 0;
7851fe2c 9749 break;
6669f9bb 9750 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9751 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9752 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9753 * we re-construct this WQE here based on information in
9754 * iocbq from scratch.
9755 */
9756 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 9757 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9758 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9759 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9760 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9761 LPFC_ABTS_UNSOL_INT) {
9762 /* ABTS sent by initiator to CT exchange, the
9763 * RX_ID field will be filled with the newly
9764 * allocated responder XRI.
9765 */
9766 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9767 iocbq->sli4_xritag);
9768 } else {
9769 /* ABTS sent by responder to CT exchange, the
9770 * RX_ID field will be filled with the responder
9771 * RX_ID from ABTS.
9772 */
9773 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9774 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9775 }
6669f9bb
JS
9776 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9777 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9778
9779 /* Use CT=VPI */
9780 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9781 ndlp->nlp_DID);
9782 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9783 iocbq->iocb.ulpContext);
9784 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9785 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9786 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9787 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9788 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9789 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9790 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9791 command_type = OTHER_COMMAND;
546fc854
JS
9792 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9793 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9794 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9795 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9796 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9797 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9798 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9799 }
9800
7851fe2c 9801 break;
ae9e28f3
JS
9802 case CMD_SEND_FRAME:
9803 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9804 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9805 return 0;
4f774513
JS
9806 case CMD_XRI_ABORTED_CX:
9807 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
9808 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9809 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9810 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9811 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9812 default:
9813 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9814 "2014 Invalid command 0x%x\n",
9815 iocbq->iocb.ulpCommand);
9816 return IOCB_ERROR;
7851fe2c 9817 break;
4f774513 9818 }
6d368e53 9819
8012cc38
JS
9820 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9821 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9822 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9823 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9824 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9825 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9826 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9827 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
9828 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9829 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9830 wqe->generic.wqe_com.abort_tag = abort_tag;
9831 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9832 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9833 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9834 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
9835 return 0;
9836}
9837
9838/**
9839 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9840 * @phba: Pointer to HBA context object.
9841 * @ring_number: SLI ring number to issue iocb on.
9842 * @piocb: Pointer to command iocb.
9843 * @flag: Flag indicating if this command can be put into txq.
9844 *
9845 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9846 * an iocb command to an HBA with SLI-4 interface spec.
9847 *
9848 * This function is called with hbalock held. The function will return success
9849 * after it successfully submit the iocb to firmware or after adding to the
9850 * txq.
9851 **/
9852static int
9853__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9854 struct lpfc_iocbq *piocb, uint32_t flag)
9855{
9856 struct lpfc_sglq *sglq;
205e8240 9857 union lpfc_wqe128 wqe;
1ba981fd 9858 struct lpfc_queue *wq;
895427bd 9859 struct lpfc_sli_ring *pring;
4f774513 9860
895427bd
JS
9861 /* Get the WQ */
9862 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9863 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9864 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9865 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9866 else
9867 wq = phba->sli4_hba.oas_wq;
9868 } else {
9869 wq = phba->sli4_hba.els_wq;
9870 }
9871
9872 /* Get corresponding ring */
9873 pring = wq->pring;
1c2ba475 9874
b5c53958
JS
9875 /*
9876 * The WQE can be either 64 or 128 bytes,
b5c53958 9877 */
b5c53958 9878
895427bd
JS
9879 lockdep_assert_held(&phba->hbalock);
9880
4f774513
JS
9881 if (piocb->sli4_xritag == NO_XRI) {
9882 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 9883 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
9884 sglq = NULL;
9885 else {
0e9bb8d7 9886 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
9887 if (!(flag & SLI_IOCB_RET_IOCB)) {
9888 __lpfc_sli_ringtx_put(phba,
9889 pring, piocb);
9890 return IOCB_SUCCESS;
9891 } else {
9892 return IOCB_BUSY;
9893 }
9894 } else {
895427bd 9895 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
9896 if (!sglq) {
9897 if (!(flag & SLI_IOCB_RET_IOCB)) {
9898 __lpfc_sli_ringtx_put(phba,
9899 pring,
9900 piocb);
9901 return IOCB_SUCCESS;
9902 } else
9903 return IOCB_BUSY;
9904 }
9905 }
4f774513 9906 }
2ea259ee 9907 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
9908 /* These IO's already have an XRI and a mapped sgl. */
9909 sglq = NULL;
2ea259ee 9910 else {
6d368e53
JS
9911 /*
9912 * This is a continuation of a commandi,(CX) so this
4f774513
JS
9913 * sglq is on the active list
9914 */
edccdc17 9915 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
9916 if (!sglq)
9917 return IOCB_ERROR;
9918 }
9919
9920 if (sglq) {
6d368e53 9921 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 9922 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 9923 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
9924 return IOCB_ERROR;
9925 }
9926
205e8240 9927 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
4f774513
JS
9928 return IOCB_ERROR;
9929
205e8240 9930 if (lpfc_sli4_wq_put(wq, &wqe))
895427bd 9931 return IOCB_ERROR;
4f774513
JS
9932 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9933
9934 return 0;
9935}
9936
9937/**
9938 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9939 *
9940 * This routine wraps the actual lockless version for issusing IOCB function
9941 * pointer from the lpfc_hba struct.
9942 *
9943 * Return codes:
b5c53958
JS
9944 * IOCB_ERROR - Error
9945 * IOCB_SUCCESS - Success
9946 * IOCB_BUSY - Busy
4f774513 9947 **/
2a9bf3d0 9948int
4f774513
JS
9949__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9950 struct lpfc_iocbq *piocb, uint32_t flag)
9951{
9952 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9953}
9954
9955/**
25985edc 9956 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
9957 * @phba: The hba struct for which this call is being executed.
9958 * @dev_grp: The HBA PCI-Device group number.
9959 *
9960 * This routine sets up the SLI interface API function jump table in @phba
9961 * struct.
9962 * Returns: 0 - success, -ENODEV - failure.
9963 **/
9964int
9965lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9966{
9967
9968 switch (dev_grp) {
9969 case LPFC_PCI_DEV_LP:
9970 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9971 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9972 break;
9973 case LPFC_PCI_DEV_OC:
9974 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9975 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9976 break;
9977 default:
9978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9979 "1419 Invalid HBA PCI-device group: 0x%x\n",
9980 dev_grp);
9981 return -ENODEV;
9982 break;
9983 }
9984 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9985 return 0;
9986}
9987
a1efe163 9988/**
895427bd 9989 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 9990 * @phba: Pointer to HBA context object.
a1efe163
JS
9991 * @piocb: Pointer to command iocb.
9992 *
895427bd
JS
9993 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9994 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 9995 * Since ABORTS must go on the same WQ of the command they are
895427bd 9996 * aborting, we use command's hba_wqidx.
a1efe163 9997 */
895427bd
JS
9998struct lpfc_sli_ring *
9999lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 10000{
895427bd 10001 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8b0dff14 10002 if (!(phba->cfg_fof) ||
895427bd 10003 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
8b0dff14 10004 if (unlikely(!phba->sli4_hba.fcp_wq))
895427bd 10005 return NULL;
8b0dff14 10006 /*
895427bd 10007 * for abort iocb hba_wqidx should already
8b0dff14
JS
10008 * be setup based on what work queue we used.
10009 */
8e036a94 10010 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
895427bd 10011 piocb->hba_wqidx =
8b0dff14
JS
10012 lpfc_sli4_scmd_to_wqidx_distr(phba,
10013 piocb->context1);
8e036a94
DK
10014 piocb->hba_wqidx = piocb->hba_wqidx %
10015 phba->cfg_fcp_io_channel;
10016 }
895427bd 10017 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
8b0dff14
JS
10018 } else {
10019 if (unlikely(!phba->sli4_hba.oas_wq))
895427bd
JS
10020 return NULL;
10021 piocb->hba_wqidx = 0;
10022 return phba->sli4_hba.oas_wq->pring;
9bd2bff5 10023 }
895427bd
JS
10024 } else {
10025 if (unlikely(!phba->sli4_hba.els_wq))
10026 return NULL;
10027 piocb->hba_wqidx = 0;
10028 return phba->sli4_hba.els_wq->pring;
9bd2bff5 10029 }
9bd2bff5
JS
10030}
10031
4f774513
JS
10032/**
10033 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10034 * @phba: Pointer to HBA context object.
10035 * @pring: Pointer to driver SLI ring object.
10036 * @piocb: Pointer to command iocb.
10037 * @flag: Flag indicating if this command can be put into txq.
10038 *
10039 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10040 * function. This function gets the hbalock and calls
10041 * __lpfc_sli_issue_iocb function and will return the error returned
10042 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10043 * functions which do not hold hbalock.
10044 **/
10045int
10046lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10047 struct lpfc_iocbq *piocb, uint32_t flag)
10048{
895427bd 10049 struct lpfc_hba_eq_hdl *hba_eq_hdl;
2a76a283 10050 struct lpfc_sli_ring *pring;
ba20c853
JS
10051 struct lpfc_queue *fpeq;
10052 struct lpfc_eqe *eqe;
4f774513 10053 unsigned long iflags;
2a76a283 10054 int rc, idx;
4f774513 10055
7e56aa25 10056 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
10057 pring = lpfc_sli4_calc_ring(phba, piocb);
10058 if (unlikely(pring == NULL))
9bd2bff5 10059 return IOCB_ERROR;
ba20c853 10060
9bd2bff5
JS
10061 spin_lock_irqsave(&pring->ring_lock, iflags);
10062 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10063 spin_unlock_irqrestore(&pring->ring_lock, iflags);
ba20c853 10064
9bd2bff5 10065 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
895427bd
JS
10066 idx = piocb->hba_wqidx;
10067 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
4f774513 10068
895427bd 10069 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
ba20c853 10070
9bd2bff5
JS
10071 /* Get associated EQ with this index */
10072 fpeq = phba->sli4_hba.hba_eq[idx];
ba20c853 10073
9bd2bff5 10074 /* Turn off interrupts from this EQ */
b71413dd 10075 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
ba20c853 10076
9bd2bff5
JS
10077 /*
10078 * Process all the events on FCP EQ
10079 */
10080 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
10081 lpfc_sli4_hba_handle_eqe(phba,
10082 eqe, idx);
10083 fpeq->EQ_processed++;
ba20c853 10084 }
ba20c853 10085
9bd2bff5 10086 /* Always clear and re-arm the EQ */
b71413dd 10087 phba->sli4_hba.sli4_eq_release(fpeq,
9bd2bff5
JS
10088 LPFC_QUEUE_REARM);
10089 }
895427bd 10090 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
2a76a283 10091 }
7e56aa25
JS
10092 } else {
10093 /* For now, SLI2/3 will still use hbalock */
10094 spin_lock_irqsave(&phba->hbalock, iflags);
10095 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10096 spin_unlock_irqrestore(&phba->hbalock, iflags);
10097 }
4f774513
JS
10098 return rc;
10099}
10100
10101/**
10102 * lpfc_extra_ring_setup - Extra ring setup function
10103 * @phba: Pointer to HBA context object.
10104 *
10105 * This function is called while driver attaches with the
10106 * HBA to setup the extra ring. The extra ring is used
10107 * only when driver needs to support target mode functionality
10108 * or IP over FC functionalities.
10109 *
895427bd 10110 * This function is called with no lock held. SLI3 only.
4f774513
JS
10111 **/
10112static int
10113lpfc_extra_ring_setup( struct lpfc_hba *phba)
10114{
10115 struct lpfc_sli *psli;
10116 struct lpfc_sli_ring *pring;
10117
10118 psli = &phba->sli;
10119
10120 /* Adjust cmd/rsp ring iocb entries more evenly */
10121
10122 /* Take some away from the FCP ring */
895427bd 10123 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
10124 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10125 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10126 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10127 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 10128
a4bc3379 10129 /* and give them to the extra ring */
895427bd 10130 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 10131
7e56aa25
JS
10132 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10133 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10134 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10135 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
10136
10137 /* Setup default profile for this ring */
10138 pring->iotag_max = 4096;
10139 pring->num_mask = 1;
10140 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
10141 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10142 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
10143 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10144 return 0;
10145}
10146
cb69f7de
JS
10147/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10148 * @phba: Pointer to HBA context object.
10149 * @iocbq: Pointer to iocb object.
10150 *
10151 * The async_event handler calls this routine when it receives
10152 * an ASYNC_STATUS_CN event from the port. The port generates
10153 * this event when an Abort Sequence request to an rport fails
10154 * twice in succession. The abort could be originated by the
10155 * driver or by the port. The ABTS could have been for an ELS
10156 * or FCP IO. The port only generates this event when an ABTS
10157 * fails to complete after one retry.
10158 */
10159static void
10160lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10161 struct lpfc_iocbq *iocbq)
10162{
10163 struct lpfc_nodelist *ndlp = NULL;
10164 uint16_t rpi = 0, vpi = 0;
10165 struct lpfc_vport *vport = NULL;
10166
10167 /* The rpi in the ulpContext is vport-sensitive. */
10168 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10169 rpi = iocbq->iocb.ulpContext;
10170
10171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10172 "3092 Port generated ABTS async event "
10173 "on vpi %d rpi %d status 0x%x\n",
10174 vpi, rpi, iocbq->iocb.ulpStatus);
10175
10176 vport = lpfc_find_vport_by_vpid(phba, vpi);
10177 if (!vport)
10178 goto err_exit;
10179 ndlp = lpfc_findnode_rpi(vport, rpi);
10180 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10181 goto err_exit;
10182
10183 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10184 lpfc_sli_abts_recover_port(vport, ndlp);
10185 return;
10186
10187 err_exit:
10188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10189 "3095 Event Context not found, no "
10190 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10191 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10192 vpi, rpi);
10193}
10194
10195/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10196 * @phba: pointer to HBA context object.
10197 * @ndlp: nodelist pointer for the impacted rport.
10198 * @axri: pointer to the wcqe containing the failed exchange.
10199 *
10200 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10201 * port. The port generates this event when an abort exchange request to an
10202 * rport fails twice in succession with no reply. The abort could be originated
10203 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10204 */
10205void
10206lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10207 struct lpfc_nodelist *ndlp,
10208 struct sli4_wcqe_xri_aborted *axri)
10209{
10210 struct lpfc_vport *vport;
5c1db2ac 10211 uint32_t ext_status = 0;
cb69f7de 10212
6b5151fd 10213 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
10214 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10215 "3115 Node Context not found, driver "
10216 "ignoring abts err event\n");
6b5151fd
JS
10217 return;
10218 }
10219
cb69f7de
JS
10220 vport = ndlp->vport;
10221 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10222 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 10223 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 10224 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 10225 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
10226 bf_get(lpfc_wcqe_xa_status, axri),
10227 axri->parameter);
cb69f7de 10228
5c1db2ac
JS
10229 /*
10230 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10231 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10232 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10233 */
e3d2b802 10234 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
10235 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10236 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
10237 lpfc_sli_abts_recover_port(vport, ndlp);
10238}
10239
e59058c4 10240/**
3621a710 10241 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
10242 * @phba: Pointer to HBA context object.
10243 * @pring: Pointer to driver SLI ring object.
10244 * @iocbq: Pointer to iocb object.
10245 *
10246 * This function is called by the slow ring event handler
10247 * function when there is an ASYNC event iocb in the ring.
10248 * This function is called with no lock held.
10249 * Currently this function handles only temperature related
10250 * ASYNC events. The function decodes the temperature sensor
10251 * event message and posts events for the management applications.
10252 **/
98c9ea5c 10253static void
57127f15
JS
10254lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10255 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10256{
10257 IOCB_t *icmd;
10258 uint16_t evt_code;
57127f15
JS
10259 struct temp_event temp_event_data;
10260 struct Scsi_Host *shost;
a257bf90 10261 uint32_t *iocb_w;
57127f15
JS
10262
10263 icmd = &iocbq->iocb;
10264 evt_code = icmd->un.asyncstat.evt_code;
57127f15 10265
cb69f7de
JS
10266 switch (evt_code) {
10267 case ASYNC_TEMP_WARN:
10268 case ASYNC_TEMP_SAFE:
10269 temp_event_data.data = (uint32_t) icmd->ulpContext;
10270 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10271 if (evt_code == ASYNC_TEMP_WARN) {
10272 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10273 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10274 "0347 Adapter is very hot, please take "
10275 "corrective action. temperature : %d Celsius\n",
10276 (uint32_t) icmd->ulpContext);
10277 } else {
10278 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10279 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10280 "0340 Adapter temperature is OK now. "
10281 "temperature : %d Celsius\n",
10282 (uint32_t) icmd->ulpContext);
10283 }
10284
10285 /* Send temperature change event to applications */
10286 shost = lpfc_shost_from_vport(phba->pport);
10287 fc_host_post_vendor_event(shost, fc_get_event_number(),
10288 sizeof(temp_event_data), (char *) &temp_event_data,
10289 LPFC_NL_VENDOR_ID);
10290 break;
10291 case ASYNC_STATUS_CN:
10292 lpfc_sli_abts_err_handler(phba, iocbq);
10293 break;
10294 default:
a257bf90 10295 iocb_w = (uint32_t *) icmd;
cb69f7de 10296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 10297 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 10298 " evt_code 0x%x\n"
a257bf90
JS
10299 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10300 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10301 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10302 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 10303 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
10304 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10305 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10306 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10307 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10308
cb69f7de 10309 break;
57127f15 10310 }
57127f15
JS
10311}
10312
10313
e59058c4 10314/**
895427bd 10315 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
10316 * @phba: Pointer to HBA context object.
10317 *
10318 * lpfc_sli_setup sets up rings of the SLI interface with
10319 * number of iocbs per ring and iotags. This function is
10320 * called while driver attach to the HBA and before the
10321 * interrupts are enabled. So there is no need for locking.
10322 *
10323 * This function always returns 0.
10324 **/
dea3101e 10325int
895427bd
JS
10326lpfc_sli4_setup(struct lpfc_hba *phba)
10327{
10328 struct lpfc_sli_ring *pring;
10329
10330 pring = phba->sli4_hba.els_wq->pring;
10331 pring->num_mask = LPFC_MAX_RING_MASK;
10332 pring->prt[0].profile = 0; /* Mask 0 */
10333 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10334 pring->prt[0].type = FC_TYPE_ELS;
10335 pring->prt[0].lpfc_sli_rcv_unsol_event =
10336 lpfc_els_unsol_event;
10337 pring->prt[1].profile = 0; /* Mask 1 */
10338 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10339 pring->prt[1].type = FC_TYPE_ELS;
10340 pring->prt[1].lpfc_sli_rcv_unsol_event =
10341 lpfc_els_unsol_event;
10342 pring->prt[2].profile = 0; /* Mask 2 */
10343 /* NameServer Inquiry */
10344 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10345 /* NameServer */
10346 pring->prt[2].type = FC_TYPE_CT;
10347 pring->prt[2].lpfc_sli_rcv_unsol_event =
10348 lpfc_ct_unsol_event;
10349 pring->prt[3].profile = 0; /* Mask 3 */
10350 /* NameServer response */
10351 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10352 /* NameServer */
10353 pring->prt[3].type = FC_TYPE_CT;
10354 pring->prt[3].lpfc_sli_rcv_unsol_event =
10355 lpfc_ct_unsol_event;
10356 return 0;
10357}
10358
10359/**
10360 * lpfc_sli_setup - SLI ring setup function
10361 * @phba: Pointer to HBA context object.
10362 *
10363 * lpfc_sli_setup sets up rings of the SLI interface with
10364 * number of iocbs per ring and iotags. This function is
10365 * called while driver attach to the HBA and before the
10366 * interrupts are enabled. So there is no need for locking.
10367 *
10368 * This function always returns 0. SLI3 only.
10369 **/
10370int
dea3101e 10371lpfc_sli_setup(struct lpfc_hba *phba)
10372{
ed957684 10373 int i, totiocbsize = 0;
dea3101e 10374 struct lpfc_sli *psli = &phba->sli;
10375 struct lpfc_sli_ring *pring;
10376
2a76a283 10377 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 10378 psli->sli_flag = 0;
dea3101e 10379
604a3e30
JB
10380 psli->iocbq_lookup = NULL;
10381 psli->iocbq_lookup_len = 0;
10382 psli->last_iotag = 0;
10383
dea3101e 10384 for (i = 0; i < psli->num_rings; i++) {
895427bd 10385 pring = &psli->sli3_ring[i];
dea3101e 10386 switch (i) {
10387 case LPFC_FCP_RING: /* ring 0 - FCP */
10388 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10389 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10390 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10391 pring->sli.sli3.numCiocb +=
10392 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10393 pring->sli.sli3.numRiocb +=
10394 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10395 pring->sli.sli3.numCiocb +=
10396 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10397 pring->sli.sli3.numRiocb +=
10398 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10399 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10400 SLI3_IOCB_CMD_SIZE :
10401 SLI2_IOCB_CMD_SIZE;
7e56aa25 10402 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10403 SLI3_IOCB_RSP_SIZE :
10404 SLI2_IOCB_RSP_SIZE;
dea3101e 10405 pring->iotag_ctr = 0;
10406 pring->iotag_max =
92d7f7b0 10407 (phba->cfg_hba_queue_depth * 2);
dea3101e 10408 pring->fast_iotag = pring->iotag_max;
10409 pring->num_mask = 0;
10410 break;
a4bc3379 10411 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 10412 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10413 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10414 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10415 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10416 SLI3_IOCB_CMD_SIZE :
10417 SLI2_IOCB_CMD_SIZE;
7e56aa25 10418 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10419 SLI3_IOCB_RSP_SIZE :
10420 SLI2_IOCB_RSP_SIZE;
2e0fef85 10421 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 10422 pring->num_mask = 0;
10423 break;
10424 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10425 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10426 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10427 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10428 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10429 SLI3_IOCB_CMD_SIZE :
10430 SLI2_IOCB_CMD_SIZE;
7e56aa25 10431 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10432 SLI3_IOCB_RSP_SIZE :
10433 SLI2_IOCB_RSP_SIZE;
dea3101e 10434 pring->fast_iotag = 0;
10435 pring->iotag_ctr = 0;
10436 pring->iotag_max = 4096;
57127f15
JS
10437 pring->lpfc_sli_rcv_async_status =
10438 lpfc_sli_async_event_handler;
6669f9bb 10439 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 10440 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
10441 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10442 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 10443 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 10444 lpfc_els_unsol_event;
dea3101e 10445 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
10446 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10447 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 10448 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 10449 lpfc_els_unsol_event;
dea3101e 10450 pring->prt[2].profile = 0; /* Mask 2 */
10451 /* NameServer Inquiry */
6a9c52cf 10452 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 10453 /* NameServer */
6a9c52cf 10454 pring->prt[2].type = FC_TYPE_CT;
dea3101e 10455 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 10456 lpfc_ct_unsol_event;
dea3101e 10457 pring->prt[3].profile = 0; /* Mask 3 */
10458 /* NameServer response */
6a9c52cf 10459 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 10460 /* NameServer */
6a9c52cf 10461 pring->prt[3].type = FC_TYPE_CT;
dea3101e 10462 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 10463 lpfc_ct_unsol_event;
dea3101e 10464 break;
10465 }
7e56aa25
JS
10466 totiocbsize += (pring->sli.sli3.numCiocb *
10467 pring->sli.sli3.sizeCiocb) +
10468 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 10469 }
ed957684 10470 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 10471 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
10472 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10473 "SLI2 SLIM Data: x%x x%lx\n",
10474 phba->brd_no, totiocbsize,
10475 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 10476 }
cf5bf97e
JW
10477 if (phba->cfg_multi_ring_support == 2)
10478 lpfc_extra_ring_setup(phba);
dea3101e 10479
10480 return 0;
10481}
10482
e59058c4 10483/**
895427bd 10484 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
10485 * @phba: Pointer to HBA context object.
10486 *
895427bd 10487 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
10488 * ring. This function also initializes ring indices of each ring.
10489 * This function is called during the initialization of the SLI
10490 * interface of an HBA.
10491 * This function is called with no lock held and always returns
10492 * 1.
10493 **/
895427bd
JS
10494void
10495lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e 10496{
10497 struct lpfc_sli *psli;
10498 struct lpfc_sli_ring *pring;
604a3e30 10499 int i;
dea3101e 10500
10501 psli = &phba->sli;
2e0fef85 10502 spin_lock_irq(&phba->hbalock);
dea3101e 10503 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10504 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10505 /* Initialize list headers for txq and txcmplq as double linked lists */
895427bd
JS
10506 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10507 pring = phba->sli4_hba.fcp_wq[i]->pring;
68e814f5 10508 pring->flag = 0;
895427bd 10509 pring->ringno = LPFC_FCP_RING;
dea3101e 10510 INIT_LIST_HEAD(&pring->txq);
10511 INIT_LIST_HEAD(&pring->txcmplq);
10512 INIT_LIST_HEAD(&pring->iocb_continueq);
7e56aa25 10513 spin_lock_init(&pring->ring_lock);
dea3101e 10514 }
895427bd
JS
10515 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10516 pring = phba->sli4_hba.nvme_wq[i]->pring;
10517 pring->flag = 0;
10518 pring->ringno = LPFC_FCP_RING;
10519 INIT_LIST_HEAD(&pring->txq);
10520 INIT_LIST_HEAD(&pring->txcmplq);
10521 INIT_LIST_HEAD(&pring->iocb_continueq);
10522 spin_lock_init(&pring->ring_lock);
10523 }
10524 pring = phba->sli4_hba.els_wq->pring;
10525 pring->flag = 0;
10526 pring->ringno = LPFC_ELS_RING;
10527 INIT_LIST_HEAD(&pring->txq);
10528 INIT_LIST_HEAD(&pring->txcmplq);
10529 INIT_LIST_HEAD(&pring->iocb_continueq);
10530 spin_lock_init(&pring->ring_lock);
dea3101e 10531
895427bd
JS
10532 if (phba->cfg_nvme_io_channel) {
10533 pring = phba->sli4_hba.nvmels_wq->pring;
10534 pring->flag = 0;
10535 pring->ringno = LPFC_ELS_RING;
10536 INIT_LIST_HEAD(&pring->txq);
10537 INIT_LIST_HEAD(&pring->txcmplq);
10538 INIT_LIST_HEAD(&pring->iocb_continueq);
10539 spin_lock_init(&pring->ring_lock);
10540 }
10541
10542 if (phba->cfg_fof) {
10543 pring = phba->sli4_hba.oas_wq->pring;
10544 pring->flag = 0;
10545 pring->ringno = LPFC_FCP_RING;
10546 INIT_LIST_HEAD(&pring->txq);
10547 INIT_LIST_HEAD(&pring->txcmplq);
10548 INIT_LIST_HEAD(&pring->iocb_continueq);
10549 spin_lock_init(&pring->ring_lock);
10550 }
10551
10552 spin_unlock_irq(&phba->hbalock);
10553}
10554
10555/**
10556 * lpfc_sli_queue_init - Queue initialization function
10557 * @phba: Pointer to HBA context object.
10558 *
10559 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10560 * ring. This function also initializes ring indices of each ring.
10561 * This function is called during the initialization of the SLI
10562 * interface of an HBA.
10563 * This function is called with no lock held and always returns
10564 * 1.
10565 **/
10566void
10567lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e 10568{
10569 struct lpfc_sli *psli;
10570 struct lpfc_sli_ring *pring;
604a3e30 10571 int i;
dea3101e 10572
10573 psli = &phba->sli;
2e0fef85 10574 spin_lock_irq(&phba->hbalock);
dea3101e 10575 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10576 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10577 /* Initialize list headers for txq and txcmplq as double linked lists */
10578 for (i = 0; i < psli->num_rings; i++) {
895427bd 10579 pring = &psli->sli3_ring[i];
dea3101e 10580 pring->ringno = i;
7e56aa25
JS
10581 pring->sli.sli3.next_cmdidx = 0;
10582 pring->sli.sli3.local_getidx = 0;
10583 pring->sli.sli3.cmdidx = 0;
dea3101e 10584 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 10585 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 10586 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
10587 pring->flag = 0;
10588 INIT_LIST_HEAD(&pring->txq);
10589 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 10590 spin_lock_init(&pring->ring_lock);
dea3101e 10591 }
2e0fef85 10592 spin_unlock_irq(&phba->hbalock);
dea3101e 10593}
10594
04c68496
JS
10595/**
10596 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10597 * @phba: Pointer to HBA context object.
10598 *
10599 * This routine flushes the mailbox command subsystem. It will unconditionally
10600 * flush all the mailbox commands in the three possible stages in the mailbox
10601 * command sub-system: pending mailbox command queue; the outstanding mailbox
10602 * command; and completed mailbox command queue. It is caller's responsibility
10603 * to make sure that the driver is in the proper state to flush the mailbox
10604 * command sub-system. Namely, the posting of mailbox commands into the
10605 * pending mailbox command queue from the various clients must be stopped;
10606 * either the HBA is in a state that it will never works on the outstanding
10607 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10608 * mailbox command has been completed.
10609 **/
10610static void
10611lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10612{
10613 LIST_HEAD(completions);
10614 struct lpfc_sli *psli = &phba->sli;
10615 LPFC_MBOXQ_t *pmb;
10616 unsigned long iflag;
10617
523128e5
JS
10618 /* Disable softirqs, including timers from obtaining phba->hbalock */
10619 local_bh_disable();
10620
04c68496
JS
10621 /* Flush all the mailbox commands in the mbox system */
10622 spin_lock_irqsave(&phba->hbalock, iflag);
523128e5 10623
04c68496
JS
10624 /* The pending mailbox command queue */
10625 list_splice_init(&phba->sli.mboxq, &completions);
10626 /* The outstanding active mailbox command */
10627 if (psli->mbox_active) {
10628 list_add_tail(&psli->mbox_active->list, &completions);
10629 psli->mbox_active = NULL;
10630 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10631 }
10632 /* The completed mailbox command queue */
10633 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10634 spin_unlock_irqrestore(&phba->hbalock, iflag);
10635
523128e5
JS
10636 /* Enable softirqs again, done with phba->hbalock */
10637 local_bh_enable();
10638
04c68496
JS
10639 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10640 while (!list_empty(&completions)) {
10641 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10642 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10643 if (pmb->mbox_cmpl)
10644 pmb->mbox_cmpl(phba, pmb);
10645 }
10646}
10647
e59058c4 10648/**
3621a710 10649 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
10650 * @vport: Pointer to virtual port object.
10651 *
10652 * lpfc_sli_host_down is called to clean up the resources
10653 * associated with a vport before destroying virtual
10654 * port data structures.
10655 * This function does following operations:
10656 * - Free discovery resources associated with this virtual
10657 * port.
10658 * - Free iocbs associated with this virtual port in
10659 * the txq.
10660 * - Send abort for all iocb commands associated with this
10661 * vport in txcmplq.
10662 *
10663 * This function is called with no lock held and always returns 1.
10664 **/
92d7f7b0
JS
10665int
10666lpfc_sli_host_down(struct lpfc_vport *vport)
10667{
858c9f6c 10668 LIST_HEAD(completions);
92d7f7b0
JS
10669 struct lpfc_hba *phba = vport->phba;
10670 struct lpfc_sli *psli = &phba->sli;
895427bd 10671 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
10672 struct lpfc_sli_ring *pring;
10673 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
10674 int i;
10675 unsigned long flags = 0;
10676 uint16_t prev_pring_flag;
10677
10678 lpfc_cleanup_discovery_resources(vport);
10679
10680 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 10681
895427bd
JS
10682 /*
10683 * Error everything on the txq since these iocbs
10684 * have not been given to the FW yet.
10685 * Also issue ABTS for everything on the txcmplq
10686 */
10687 if (phba->sli_rev != LPFC_SLI_REV4) {
10688 for (i = 0; i < psli->num_rings; i++) {
10689 pring = &psli->sli3_ring[i];
10690 prev_pring_flag = pring->flag;
10691 /* Only slow rings */
10692 if (pring->ringno == LPFC_ELS_RING) {
10693 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10694 /* Set the lpfc data pending flag */
10695 set_bit(LPFC_DATA_READY, &phba->data_flags);
10696 }
10697 list_for_each_entry_safe(iocb, next_iocb,
10698 &pring->txq, list) {
10699 if (iocb->vport != vport)
10700 continue;
10701 list_move_tail(&iocb->list, &completions);
10702 }
10703 list_for_each_entry_safe(iocb, next_iocb,
10704 &pring->txcmplq, list) {
10705 if (iocb->vport != vport)
10706 continue;
10707 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10708 }
10709 pring->flag = prev_pring_flag;
10710 }
10711 } else {
10712 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10713 pring = qp->pring;
10714 if (!pring)
92d7f7b0 10715 continue;
895427bd
JS
10716 if (pring == phba->sli4_hba.els_wq->pring) {
10717 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10718 /* Set the lpfc data pending flag */
10719 set_bit(LPFC_DATA_READY, &phba->data_flags);
10720 }
10721 prev_pring_flag = pring->flag;
10722 spin_lock_irq(&pring->ring_lock);
10723 list_for_each_entry_safe(iocb, next_iocb,
10724 &pring->txq, list) {
10725 if (iocb->vport != vport)
10726 continue;
10727 list_move_tail(&iocb->list, &completions);
10728 }
10729 spin_unlock_irq(&pring->ring_lock);
10730 list_for_each_entry_safe(iocb, next_iocb,
10731 &pring->txcmplq, list) {
10732 if (iocb->vport != vport)
10733 continue;
10734 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10735 }
10736 pring->flag = prev_pring_flag;
92d7f7b0 10737 }
92d7f7b0 10738 }
92d7f7b0
JS
10739 spin_unlock_irqrestore(&phba->hbalock, flags);
10740
a257bf90
JS
10741 /* Cancel all the IOCBs from the completions list */
10742 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10743 IOERR_SLI_DOWN);
92d7f7b0
JS
10744 return 1;
10745}
10746
e59058c4 10747/**
3621a710 10748 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10749 * @phba: Pointer to HBA context object.
10750 *
10751 * This function cleans up all iocb, buffers, mailbox commands
10752 * while shutting down the HBA. This function is called with no
10753 * lock held and always returns 1.
10754 * This function does the following to cleanup driver resources:
10755 * - Free discovery resources for each virtual port
10756 * - Cleanup any pending fabric iocbs
10757 * - Iterate through the iocb txq and free each entry
10758 * in the list.
10759 * - Free up any buffer posted to the HBA
10760 * - Free mailbox commands in the mailbox queue.
10761 **/
dea3101e 10762int
2e0fef85 10763lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10764{
2534ba75 10765 LIST_HEAD(completions);
2e0fef85 10766 struct lpfc_sli *psli = &phba->sli;
895427bd 10767 struct lpfc_queue *qp = NULL;
dea3101e 10768 struct lpfc_sli_ring *pring;
0ff10d46 10769 struct lpfc_dmabuf *buf_ptr;
dea3101e 10770 unsigned long flags = 0;
04c68496
JS
10771 int i;
10772
10773 /* Shutdown the mailbox command sub-system */
618a5230 10774 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10775
dea3101e 10776 lpfc_hba_down_prep(phba);
10777
523128e5
JS
10778 /* Disable softirqs, including timers from obtaining phba->hbalock */
10779 local_bh_disable();
10780
92d7f7b0
JS
10781 lpfc_fabric_abort_hba(phba);
10782
2e0fef85 10783 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10784
895427bd
JS
10785 /*
10786 * Error everything on the txq since these iocbs
10787 * have not been given to the FW yet.
10788 */
10789 if (phba->sli_rev != LPFC_SLI_REV4) {
10790 for (i = 0; i < psli->num_rings; i++) {
10791 pring = &psli->sli3_ring[i];
10792 /* Only slow rings */
10793 if (pring->ringno == LPFC_ELS_RING) {
10794 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10795 /* Set the lpfc data pending flag */
10796 set_bit(LPFC_DATA_READY, &phba->data_flags);
10797 }
10798 list_splice_init(&pring->txq, &completions);
10799 }
10800 } else {
10801 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10802 pring = qp->pring;
10803 if (!pring)
10804 continue;
10805 spin_lock_irq(&pring->ring_lock);
10806 list_splice_init(&pring->txq, &completions);
10807 spin_unlock_irq(&pring->ring_lock);
10808 if (pring == phba->sli4_hba.els_wq->pring) {
10809 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10810 /* Set the lpfc data pending flag */
10811 set_bit(LPFC_DATA_READY, &phba->data_flags);
10812 }
10813 }
2534ba75 10814 }
2e0fef85 10815 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10816
a257bf90
JS
10817 /* Cancel all the IOCBs from the completions list */
10818 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10819 IOERR_SLI_DOWN);
dea3101e 10820
0ff10d46
JS
10821 spin_lock_irqsave(&phba->hbalock, flags);
10822 list_splice_init(&phba->elsbuf, &completions);
10823 phba->elsbuf_cnt = 0;
10824 phba->elsbuf_prev_cnt = 0;
10825 spin_unlock_irqrestore(&phba->hbalock, flags);
10826
10827 while (!list_empty(&completions)) {
10828 list_remove_head(&completions, buf_ptr,
10829 struct lpfc_dmabuf, list);
10830 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10831 kfree(buf_ptr);
10832 }
10833
523128e5
JS
10834 /* Enable softirqs again, done with phba->hbalock */
10835 local_bh_enable();
10836
dea3101e 10837 /* Return any active mbox cmds */
10838 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10839
da0436e9 10840 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10841 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10842 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10843
da0436e9
JS
10844 return 1;
10845}
10846
e59058c4 10847/**
3621a710 10848 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10849 * @srcp: Source memory pointer.
10850 * @destp: Destination memory pointer.
10851 * @cnt: Number of words required to be copied.
10852 *
10853 * This function is used for copying data between driver memory
10854 * and the SLI memory. This function also changes the endianness
10855 * of each word if native endianness is different from SLI
10856 * endianness. This function can be called with or without
10857 * lock.
10858 **/
dea3101e 10859void
10860lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10861{
10862 uint32_t *src = srcp;
10863 uint32_t *dest = destp;
10864 uint32_t ldata;
10865 int i;
10866
10867 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10868 ldata = *src;
10869 ldata = le32_to_cpu(ldata);
10870 *dest = ldata;
10871 src++;
10872 dest++;
10873 }
10874}
10875
e59058c4 10876
a0c87cbd
JS
10877/**
10878 * lpfc_sli_bemem_bcopy - SLI memory copy function
10879 * @srcp: Source memory pointer.
10880 * @destp: Destination memory pointer.
10881 * @cnt: Number of words required to be copied.
10882 *
10883 * This function is used for copying data between a data structure
10884 * with big endian representation to local endianness.
10885 * This function can be called with or without lock.
10886 **/
10887void
10888lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10889{
10890 uint32_t *src = srcp;
10891 uint32_t *dest = destp;
10892 uint32_t ldata;
10893 int i;
10894
10895 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10896 ldata = *src;
10897 ldata = be32_to_cpu(ldata);
10898 *dest = ldata;
10899 src++;
10900 dest++;
10901 }
10902}
10903
e59058c4 10904/**
3621a710 10905 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
10906 * @phba: Pointer to HBA context object.
10907 * @pring: Pointer to driver SLI ring object.
10908 * @mp: Pointer to driver buffer object.
10909 *
10910 * This function is called with no lock held.
10911 * It always return zero after adding the buffer to the postbufq
10912 * buffer list.
10913 **/
dea3101e 10914int
2e0fef85
JS
10915lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10916 struct lpfc_dmabuf *mp)
dea3101e 10917{
10918 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10919 later */
2e0fef85 10920 spin_lock_irq(&phba->hbalock);
dea3101e 10921 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 10922 pring->postbufq_cnt++;
2e0fef85 10923 spin_unlock_irq(&phba->hbalock);
dea3101e 10924 return 0;
10925}
10926
e59058c4 10927/**
3621a710 10928 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
10929 * @phba: Pointer to HBA context object.
10930 *
10931 * When HBQ is enabled, buffers are searched based on tags. This function
10932 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10933 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10934 * does not conflict with tags of buffer posted for unsolicited events.
10935 * The function returns the allocated tag. The function is called with
10936 * no locks held.
10937 **/
76bb24ef
JS
10938uint32_t
10939lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10940{
10941 spin_lock_irq(&phba->hbalock);
10942 phba->buffer_tag_count++;
10943 /*
10944 * Always set the QUE_BUFTAG_BIT to distiguish between
10945 * a tag assigned by HBQ.
10946 */
10947 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10948 spin_unlock_irq(&phba->hbalock);
10949 return phba->buffer_tag_count;
10950}
10951
e59058c4 10952/**
3621a710 10953 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
10954 * @phba: Pointer to HBA context object.
10955 * @pring: Pointer to driver SLI ring object.
10956 * @tag: Buffer tag.
10957 *
10958 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10959 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10960 * iocb is posted to the response ring with the tag of the buffer.
10961 * This function searches the pring->postbufq list using the tag
10962 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10963 * iocb. If the buffer is found then lpfc_dmabuf object of the
10964 * buffer is returned to the caller else NULL is returned.
10965 * This function is called with no lock held.
10966 **/
76bb24ef
JS
10967struct lpfc_dmabuf *
10968lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10969 uint32_t tag)
10970{
10971 struct lpfc_dmabuf *mp, *next_mp;
10972 struct list_head *slp = &pring->postbufq;
10973
25985edc 10974 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
10975 spin_lock_irq(&phba->hbalock);
10976 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10977 if (mp->buffer_tag == tag) {
10978 list_del_init(&mp->list);
10979 pring->postbufq_cnt--;
10980 spin_unlock_irq(&phba->hbalock);
10981 return mp;
10982 }
10983 }
10984
10985 spin_unlock_irq(&phba->hbalock);
10986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 10987 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
10988 "ring %d Data x%lx x%p x%p x%x\n",
10989 pring->ringno, (unsigned long) tag,
10990 slp->next, slp->prev, pring->postbufq_cnt);
10991
10992 return NULL;
10993}
dea3101e 10994
e59058c4 10995/**
3621a710 10996 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
10997 * @phba: Pointer to HBA context object.
10998 * @pring: Pointer to driver SLI ring object.
10999 * @phys: DMA address of the buffer.
11000 *
11001 * This function searches the buffer list using the dma_address
11002 * of unsolicited event to find the driver's lpfc_dmabuf object
11003 * corresponding to the dma_address. The function returns the
11004 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11005 * This function is called by the ct and els unsolicited event
11006 * handlers to get the buffer associated with the unsolicited
11007 * event.
11008 *
11009 * This function is called with no lock held.
11010 **/
dea3101e 11011struct lpfc_dmabuf *
11012lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11013 dma_addr_t phys)
11014{
11015 struct lpfc_dmabuf *mp, *next_mp;
11016 struct list_head *slp = &pring->postbufq;
11017
25985edc 11018 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 11019 spin_lock_irq(&phba->hbalock);
dea3101e 11020 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11021 if (mp->phys == phys) {
11022 list_del_init(&mp->list);
11023 pring->postbufq_cnt--;
2e0fef85 11024 spin_unlock_irq(&phba->hbalock);
dea3101e 11025 return mp;
11026 }
11027 }
11028
2e0fef85 11029 spin_unlock_irq(&phba->hbalock);
dea3101e 11030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 11031 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 11032 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 11033 pring->ringno, (unsigned long long)phys,
dea3101e 11034 slp->next, slp->prev, pring->postbufq_cnt);
11035 return NULL;
11036}
11037
e59058c4 11038/**
3621a710 11039 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
11040 * @phba: Pointer to HBA context object.
11041 * @cmdiocb: Pointer to driver command iocb object.
11042 * @rspiocb: Pointer to driver response iocb object.
11043 *
11044 * This function is the completion handler for the abort iocbs for
11045 * ELS commands. This function is called from the ELS ring event
11046 * handler with no lock held. This function frees memory resources
11047 * associated with the abort iocb.
11048 **/
dea3101e 11049static void
2e0fef85
JS
11050lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11051 struct lpfc_iocbq *rspiocb)
dea3101e 11052{
2e0fef85 11053 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 11054 uint16_t abort_iotag, abort_context;
ff78d8f9 11055 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
11056
11057 if (irsp->ulpStatus) {
ff78d8f9
JS
11058
11059 /*
11060 * Assume that the port already completed and returned, or
11061 * will return the iocb. Just Log the message.
11062 */
2680eeaa
JS
11063 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11064 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11065
2e0fef85 11066 spin_lock_irq(&phba->hbalock);
45ed1190 11067 if (phba->sli_rev < LPFC_SLI_REV4) {
faa832e9
JS
11068 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11069 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11070 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11071 spin_unlock_irq(&phba->hbalock);
11072 goto release_iocb;
11073 }
45ed1190
JS
11074 if (abort_iotag != 0 &&
11075 abort_iotag <= phba->sli.last_iotag)
11076 abort_iocb =
11077 phba->sli.iocbq_lookup[abort_iotag];
11078 } else
11079 /* For sli4 the abort_tag is the XRI,
11080 * so the abort routine puts the iotag of the iocb
11081 * being aborted in the context field of the abort
11082 * IOCB.
11083 */
11084 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 11085
2a9bf3d0
JS
11086 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11087 "0327 Cannot abort els iocb %p "
11088 "with tag %x context %x, abort status %x, "
11089 "abort code %x\n",
11090 abort_iocb, abort_iotag, abort_context,
11091 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 11092
ff78d8f9 11093 spin_unlock_irq(&phba->hbalock);
2680eeaa 11094 }
faa832e9 11095release_iocb:
604a3e30 11096 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 11097 return;
11098}
11099
e59058c4 11100/**
3621a710 11101 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
11102 * @phba: Pointer to HBA context object.
11103 * @cmdiocb: Pointer to driver command iocb object.
11104 * @rspiocb: Pointer to driver response iocb object.
11105 *
11106 * The function is called from SLI ring event handler with no
11107 * lock held. This function is the completion handler for ELS commands
11108 * which are aborted. The function frees memory resources used for
11109 * the aborted ELS commands.
11110 **/
92d7f7b0
JS
11111static void
11112lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11113 struct lpfc_iocbq *rspiocb)
11114{
11115 IOCB_t *irsp = &rspiocb->iocb;
11116
11117 /* ELS cmd tag <ulpIoTag> completes */
11118 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 11119 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 11120 "x%x x%x x%x\n",
e8b62011 11121 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 11122 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
11123 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11124 lpfc_ct_free_iocb(phba, cmdiocb);
11125 else
11126 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
11127 return;
11128}
11129
e59058c4 11130/**
5af5eee7 11131 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
11132 * @phba: Pointer to HBA context object.
11133 * @pring: Pointer to driver SLI ring object.
11134 * @cmdiocb: Pointer to driver command iocb object.
11135 *
5af5eee7
JS
11136 * This function issues an abort iocb for the provided command iocb down to
11137 * the port. Other than the case the outstanding command iocb is an abort
11138 * request, this function issues abort out unconditionally. This function is
11139 * called with hbalock held. The function returns 0 when it fails due to
11140 * memory allocation failure or when the command iocb is an abort request.
e59058c4 11141 **/
5af5eee7
JS
11142static int
11143lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 11144 struct lpfc_iocbq *cmdiocb)
dea3101e 11145{
2e0fef85 11146 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 11147 struct lpfc_iocbq *abtsiocbp;
dea3101e 11148 IOCB_t *icmd = NULL;
11149 IOCB_t *iabt = NULL;
5af5eee7 11150 int retval;
7e56aa25 11151 unsigned long iflags;
faa832e9 11152 struct lpfc_nodelist *ndlp;
07951076 11153
1c2ba475
JT
11154 lockdep_assert_held(&phba->hbalock);
11155
92d7f7b0
JS
11156 /*
11157 * There are certain command types we don't want to abort. And we
11158 * don't want to abort commands that are already in the process of
11159 * being aborted.
07951076
JS
11160 */
11161 icmd = &cmdiocb->iocb;
2e0fef85 11162 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
11163 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11164 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
11165 return 0;
11166
dea3101e 11167 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 11168 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 11169 if (abtsiocbp == NULL)
11170 return 0;
dea3101e 11171
07951076 11172 /* This signals the response to set the correct status
341af102 11173 * before calling the completion handler
07951076
JS
11174 */
11175 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11176
dea3101e 11177 iabt = &abtsiocbp->iocb;
07951076
JS
11178 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11179 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 11180 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 11181 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190 11182 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
faa832e9 11183 } else {
da0436e9 11184 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
faa832e9
JS
11185 if (pring->ringno == LPFC_ELS_RING) {
11186 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11187 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11188 }
11189 }
07951076
JS
11190 iabt->ulpLe = 1;
11191 iabt->ulpClass = icmd->ulpClass;
dea3101e 11192
5ffc266e 11193 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11194 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
11195 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11196 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11197 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11198 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11199
2e0fef85 11200 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
11201 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11202 else
11203 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 11204
07951076 11205 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 11206 abtsiocbp->vport = vport;
5b8bd0c9 11207
e8b62011
JS
11208 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11209 "0339 Abort xri x%x, original iotag x%x, "
11210 "abort cmd iotag x%x\n",
2a9bf3d0 11211 iabt->un.acxri.abortIoTag,
e8b62011 11212 iabt->un.acxri.abortContextTag,
2a9bf3d0 11213 abtsiocbp->iotag);
7e56aa25
JS
11214
11215 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11216 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11217 if (unlikely(pring == NULL))
9bd2bff5 11218 return 0;
7e56aa25
JS
11219 /* Note: both hbalock and ring_lock need to be set here */
11220 spin_lock_irqsave(&pring->ring_lock, iflags);
11221 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11222 abtsiocbp, 0);
11223 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11224 } else {
11225 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11226 abtsiocbp, 0);
11227 }
dea3101e 11228
d7c255b2
JS
11229 if (retval)
11230 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
11231
11232 /*
11233 * Caller to this routine should check for IOCB_ERROR
11234 * and handle it properly. This routine no longer removes
11235 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11236 */
11237 return retval;
11238}
11239
11240/**
11241 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11242 * @phba: Pointer to HBA context object.
11243 * @pring: Pointer to driver SLI ring object.
11244 * @cmdiocb: Pointer to driver command iocb object.
11245 *
11246 * This function issues an abort iocb for the provided command iocb. In case
11247 * of unloading, the abort iocb will not be issued to commands on the ELS
11248 * ring. Instead, the callback function shall be changed to those commands
11249 * so that nothing happens when them finishes. This function is called with
11250 * hbalock held. The function returns 0 when the command iocb is an abort
11251 * request.
11252 **/
11253int
11254lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11255 struct lpfc_iocbq *cmdiocb)
11256{
11257 struct lpfc_vport *vport = cmdiocb->vport;
11258 int retval = IOCB_ERROR;
11259 IOCB_t *icmd = NULL;
11260
1c2ba475
JT
11261 lockdep_assert_held(&phba->hbalock);
11262
5af5eee7
JS
11263 /*
11264 * There are certain command types we don't want to abort. And we
11265 * don't want to abort commands that are already in the process of
11266 * being aborted.
11267 */
11268 icmd = &cmdiocb->iocb;
11269 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11270 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11271 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11272 return 0;
11273
1234a6d5
DK
11274 if (!pring) {
11275 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11276 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11277 else
11278 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11279 goto abort_iotag_exit;
11280 }
11281
5af5eee7
JS
11282 /*
11283 * If we're unloading, don't abort iocb on the ELS ring, but change
11284 * the callback so that nothing happens when it finishes.
11285 */
11286 if ((vport->load_flag & FC_UNLOADING) &&
11287 (pring->ringno == LPFC_ELS_RING)) {
11288 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11289 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11290 else
11291 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11292 goto abort_iotag_exit;
11293 }
11294
11295 /* Now, we try to issue the abort to the cmdiocb out */
11296 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11297
07951076 11298abort_iotag_exit:
2e0fef85
JS
11299 /*
11300 * Caller to this routine should check for IOCB_ERROR
11301 * and handle it properly. This routine no longer removes
11302 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 11303 */
2e0fef85 11304 return retval;
dea3101e 11305}
11306
895427bd
JS
11307/**
11308 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11309 * @phba: Pointer to HBA context object.
11310 * @pring: Pointer to driver SLI ring object.
11311 * @cmdiocb: Pointer to driver command iocb object.
11312 *
11313 * This function issues an abort iocb for the provided command iocb down to
11314 * the port. Other than the case the outstanding command iocb is an abort
11315 * request, this function issues abort out unconditionally. This function is
11316 * called with hbalock held. The function returns 0 when it fails due to
11317 * memory allocation failure or when the command iocb is an abort request.
11318 **/
11319static int
11320lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11321 struct lpfc_iocbq *cmdiocb)
11322{
11323 struct lpfc_vport *vport = cmdiocb->vport;
11324 struct lpfc_iocbq *abtsiocbp;
205e8240 11325 union lpfc_wqe128 *abts_wqe;
895427bd
JS
11326 int retval;
11327
11328 /*
11329 * There are certain command types we don't want to abort. And we
11330 * don't want to abort commands that are already in the process of
11331 * being aborted.
11332 */
11333 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11334 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11335 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11336 return 0;
11337
11338 /* issue ABTS for this io based on iotag */
11339 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11340 if (abtsiocbp == NULL)
11341 return 0;
11342
11343 /* This signals the response to set the correct status
11344 * before calling the completion handler
11345 */
11346 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11347
11348 /* Complete prepping the abort wqe and issue to the FW. */
11349 abts_wqe = &abtsiocbp->wqe;
895427bd 11350
1c36833d
JS
11351 /* Clear any stale WQE contents */
11352 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11353 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
895427bd
JS
11354
11355 /* word 7 */
895427bd
JS
11356 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11357 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11358 cmdiocb->iocb.ulpClass);
11359
11360 /* word 8 - tell the FW to abort the IO associated with this
11361 * outstanding exchange ID.
11362 */
11363 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11364
11365 /* word 9 - this is the iotag for the abts_wqe completion. */
11366 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11367 abtsiocbp->iotag);
11368
11369 /* word 10 */
895427bd
JS
11370 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11371 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11372
11373 /* word 11 */
11374 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11375 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11376 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11377
11378 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11379 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11380 abtsiocbp->vport = vport;
01649561 11381 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
895427bd 11382 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
cd22d605 11383 if (retval) {
895427bd
JS
11384 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11385 "6147 Failed abts issue_wqe with status x%x "
11386 "for oxid x%x\n",
11387 retval, cmdiocb->sli4_xritag);
11388 lpfc_sli_release_iocbq(phba, abtsiocbp);
11389 return retval;
11390 }
11391
11392 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11393 "6148 Drv Abort NVME Request Issued for "
11394 "ox_id x%x on reqtag x%x\n",
11395 cmdiocb->sli4_xritag,
11396 abtsiocbp->iotag);
11397
11398 return retval;
11399}
11400
5af5eee7
JS
11401/**
11402 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11403 * @phba: pointer to lpfc HBA data structure.
11404 *
11405 * This routine will abort all pending and outstanding iocbs to an HBA.
11406 **/
11407void
11408lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11409{
11410 struct lpfc_sli *psli = &phba->sli;
11411 struct lpfc_sli_ring *pring;
895427bd 11412 struct lpfc_queue *qp = NULL;
5af5eee7
JS
11413 int i;
11414
895427bd
JS
11415 if (phba->sli_rev != LPFC_SLI_REV4) {
11416 for (i = 0; i < psli->num_rings; i++) {
11417 pring = &psli->sli3_ring[i];
11418 lpfc_sli_abort_iocb_ring(phba, pring);
11419 }
11420 return;
11421 }
11422 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11423 pring = qp->pring;
11424 if (!pring)
11425 continue;
db55fba8 11426 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
11427 }
11428}
11429
e59058c4 11430/**
3621a710 11431 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
11432 * @iocbq: Pointer to driver iocb object.
11433 * @vport: Pointer to driver virtual port object.
11434 * @tgt_id: SCSI ID of the target.
11435 * @lun_id: LUN ID of the scsi device.
11436 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11437 *
3621a710 11438 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
11439 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11440 * 0 if the filtering criteria is met for the given iocb and will return
11441 * 1 if the filtering criteria is not met.
11442 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11443 * given iocb is for the SCSI device specified by vport, tgt_id and
11444 * lun_id parameter.
11445 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11446 * given iocb is for the SCSI target specified by vport and tgt_id
11447 * parameters.
11448 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11449 * given iocb is for the SCSI host associated with the given vport.
11450 * This function is called with no locks held.
11451 **/
dea3101e 11452static int
51ef4c26
JS
11453lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11454 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 11455 lpfc_ctx_cmd ctx_cmd)
dea3101e 11456{
0bd4ca25 11457 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 11458 int rc = 1;
11459
b0e83012 11460 if (iocbq->vport != vport)
0bd4ca25
JSEC
11461 return rc;
11462
b0e83012
JS
11463 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11464 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
51ef4c26
JS
11465 return rc;
11466
0bd4ca25 11467 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 11468
495a714c 11469 if (lpfc_cmd->pCmd == NULL)
dea3101e 11470 return rc;
11471
11472 switch (ctx_cmd) {
11473 case LPFC_CTX_LUN:
b0e83012 11474 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c
JS
11475 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11476 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 11477 rc = 0;
11478 break;
11479 case LPFC_CTX_TGT:
b0e83012 11480 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c 11481 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 11482 rc = 0;
11483 break;
dea3101e 11484 case LPFC_CTX_HOST:
11485 rc = 0;
11486 break;
11487 default:
11488 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 11489 __func__, ctx_cmd);
dea3101e 11490 break;
11491 }
11492
11493 return rc;
11494}
11495
e59058c4 11496/**
3621a710 11497 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
11498 * @vport: Pointer to virtual port.
11499 * @tgt_id: SCSI ID of the target.
11500 * @lun_id: LUN ID of the scsi device.
11501 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11502 *
11503 * This function returns number of FCP commands pending for the vport.
11504 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11505 * commands pending on the vport associated with SCSI device specified
11506 * by tgt_id and lun_id parameters.
11507 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11508 * commands pending on the vport associated with SCSI target specified
11509 * by tgt_id parameter.
11510 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11511 * commands pending on the vport.
11512 * This function returns the number of iocbs which satisfy the filter.
11513 * This function is called without any lock held.
11514 **/
dea3101e 11515int
51ef4c26
JS
11516lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11517 lpfc_ctx_cmd ctx_cmd)
dea3101e 11518{
51ef4c26 11519 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11520 struct lpfc_iocbq *iocbq;
11521 int sum, i;
dea3101e 11522
31979008 11523 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
11524 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11525 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11526
51ef4c26
JS
11527 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11528 ctx_cmd) == 0)
0bd4ca25 11529 sum++;
dea3101e 11530 }
31979008 11531 spin_unlock_irq(&phba->hbalock);
0bd4ca25 11532
dea3101e 11533 return sum;
11534}
11535
e59058c4 11536/**
3621a710 11537 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
11538 * @phba: Pointer to HBA context object
11539 * @cmdiocb: Pointer to command iocb object.
11540 * @rspiocb: Pointer to response iocb object.
11541 *
11542 * This function is called when an aborted FCP iocb completes. This
11543 * function is called by the ring event handler with no lock held.
11544 * This function frees the iocb.
11545 **/
5eb95af0 11546void
2e0fef85
JS
11547lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11548 struct lpfc_iocbq *rspiocb)
5eb95af0 11549{
cb69f7de 11550 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 11551 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
11552 "original iotag x%x, abort cmd iotag x%x "
11553 "status 0x%x, reason 0x%x\n",
11554 cmdiocb->iocb.un.acxri.abortContextTag,
11555 cmdiocb->iocb.un.acxri.abortIoTag,
11556 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11557 rspiocb->iocb.un.ulpWord[4]);
604a3e30 11558 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
11559 return;
11560}
11561
e59058c4 11562/**
3621a710 11563 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
11564 * @vport: Pointer to virtual port.
11565 * @pring: Pointer to driver SLI ring object.
11566 * @tgt_id: SCSI ID of the target.
11567 * @lun_id: LUN ID of the scsi device.
11568 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11569 *
11570 * This function sends an abort command for every SCSI command
11571 * associated with the given virtual port pending on the ring
11572 * filtered by lpfc_sli_validate_fcp_iocb function.
11573 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11574 * FCP iocbs associated with lun specified by tgt_id and lun_id
11575 * parameters
11576 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11577 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11578 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11579 * FCP iocbs associated with virtual port.
11580 * This function returns number of iocbs it failed to abort.
11581 * This function is called with no locks held.
11582 **/
dea3101e 11583int
51ef4c26
JS
11584lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11585 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 11586{
51ef4c26 11587 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11588 struct lpfc_iocbq *iocbq;
11589 struct lpfc_iocbq *abtsiocb;
ecbb227e 11590 struct lpfc_sli_ring *pring_s4;
dea3101e 11591 IOCB_t *cmd = NULL;
dea3101e 11592 int errcnt = 0, ret_val = 0;
0bd4ca25 11593 int i;
dea3101e 11594
b0e83012
JS
11595 /* all I/Os are in process of being flushed */
11596 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11597 return errcnt;
11598
0bd4ca25
JSEC
11599 for (i = 1; i <= phba->sli.last_iotag; i++) {
11600 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11601
51ef4c26 11602 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 11603 abort_cmd) != 0)
dea3101e 11604 continue;
11605
afbd8d88
JS
11606 /*
11607 * If the iocbq is already being aborted, don't take a second
11608 * action, but do count it.
11609 */
11610 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11611 continue;
11612
dea3101e 11613 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 11614 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 11615 if (abtsiocb == NULL) {
11616 errcnt++;
11617 continue;
11618 }
dea3101e 11619
afbd8d88
JS
11620 /* indicate the IO is being aborted by the driver. */
11621 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11622
0bd4ca25 11623 cmd = &iocbq->iocb;
dea3101e 11624 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11625 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
11626 if (phba->sli_rev == LPFC_SLI_REV4)
11627 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11628 else
11629 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 11630 abtsiocb->iocb.ulpLe = 1;
11631 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 11632 abtsiocb->vport = vport;
dea3101e 11633
5ffc266e 11634 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11635 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
11636 if (iocbq->iocb_flag & LPFC_IO_FCP)
11637 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11638 if (iocbq->iocb_flag & LPFC_IO_FOF)
11639 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11640
2e0fef85 11641 if (lpfc_is_link_up(phba))
dea3101e 11642 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11643 else
11644 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11645
5eb95af0
JSEC
11646 /* Setup callback routine and issue the command. */
11647 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ecbb227e
JS
11648 if (phba->sli_rev == LPFC_SLI_REV4) {
11649 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11650 if (!pring_s4)
11651 continue;
11652 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11653 abtsiocb, 0);
11654 } else
11655 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11656 abtsiocb, 0);
dea3101e 11657 if (ret_val == IOCB_ERROR) {
604a3e30 11658 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 11659 errcnt++;
11660 continue;
11661 }
11662 }
11663
11664 return errcnt;
11665}
11666
98912dda
JS
11667/**
11668 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11669 * @vport: Pointer to virtual port.
11670 * @pring: Pointer to driver SLI ring object.
11671 * @tgt_id: SCSI ID of the target.
11672 * @lun_id: LUN ID of the scsi device.
11673 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11674 *
11675 * This function sends an abort command for every SCSI command
11676 * associated with the given virtual port pending on the ring
11677 * filtered by lpfc_sli_validate_fcp_iocb function.
11678 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11679 * FCP iocbs associated with lun specified by tgt_id and lun_id
11680 * parameters
11681 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11682 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11683 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11684 * FCP iocbs associated with virtual port.
11685 * This function returns number of iocbs it aborted .
11686 * This function is called with no locks held right after a taskmgmt
11687 * command is sent.
11688 **/
11689int
11690lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11691 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11692{
11693 struct lpfc_hba *phba = vport->phba;
8c50d25c 11694 struct lpfc_scsi_buf *lpfc_cmd;
98912dda 11695 struct lpfc_iocbq *abtsiocbq;
8c50d25c 11696 struct lpfc_nodelist *ndlp;
98912dda
JS
11697 struct lpfc_iocbq *iocbq;
11698 IOCB_t *icmd;
11699 int sum, i, ret_val;
11700 unsigned long iflags;
11701 struct lpfc_sli_ring *pring_s4;
98912dda 11702
59c68eaa 11703 spin_lock_irqsave(&phba->hbalock, iflags);
98912dda
JS
11704
11705 /* all I/Os are in process of being flushed */
11706 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
59c68eaa 11707 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11708 return 0;
11709 }
11710 sum = 0;
11711
11712 for (i = 1; i <= phba->sli.last_iotag; i++) {
11713 iocbq = phba->sli.iocbq_lookup[i];
11714
11715 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11716 cmd) != 0)
11717 continue;
11718
11719 /*
11720 * If the iocbq is already being aborted, don't take a second
11721 * action, but do count it.
11722 */
11723 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11724 continue;
11725
11726 /* issue ABTS for this IOCB based on iotag */
11727 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11728 if (abtsiocbq == NULL)
11729 continue;
11730
11731 icmd = &iocbq->iocb;
11732 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11733 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11734 if (phba->sli_rev == LPFC_SLI_REV4)
11735 abtsiocbq->iocb.un.acxri.abortIoTag =
11736 iocbq->sli4_xritag;
11737 else
11738 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11739 abtsiocbq->iocb.ulpLe = 1;
11740 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11741 abtsiocbq->vport = vport;
11742
11743 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11744 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11745 if (iocbq->iocb_flag & LPFC_IO_FCP)
11746 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11747 if (iocbq->iocb_flag & LPFC_IO_FOF)
11748 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11749
8c50d25c
JS
11750 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11751 ndlp = lpfc_cmd->rdata->pnode;
11752
11753 if (lpfc_is_link_up(phba) &&
11754 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11755 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11756 else
11757 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11758
11759 /* Setup callback routine and issue the command. */
11760 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11761
11762 /*
11763 * Indicate the IO is being aborted by the driver and set
11764 * the caller's flag into the aborted IO.
11765 */
11766 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11767
11768 if (phba->sli_rev == LPFC_SLI_REV4) {
59c68eaa
JS
11769 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11770 if (!pring_s4)
895427bd 11771 continue;
98912dda 11772 /* Note: both hbalock and ring_lock must be set here */
59c68eaa 11773 spin_lock(&pring_s4->ring_lock);
98912dda
JS
11774 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11775 abtsiocbq, 0);
59c68eaa 11776 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
11777 } else {
11778 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11779 abtsiocbq, 0);
11780 }
11781
11782
11783 if (ret_val == IOCB_ERROR)
11784 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11785 else
11786 sum++;
11787 }
59c68eaa 11788 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11789 return sum;
11790}
11791
e59058c4 11792/**
3621a710 11793 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11794 * @phba: Pointer to HBA context object.
11795 * @cmdiocbq: Pointer to command iocb.
11796 * @rspiocbq: Pointer to response iocb.
11797 *
11798 * This function is the completion handler for iocbs issued using
11799 * lpfc_sli_issue_iocb_wait function. This function is called by the
11800 * ring event handler function without any lock held. This function
11801 * can be called from both worker thread context and interrupt
11802 * context. This function also can be called from other thread which
11803 * cleans up the SLI layer objects.
11804 * This function copy the contents of the response iocb to the
11805 * response iocb memory object provided by the caller of
11806 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11807 * sleeps for the iocb completion.
11808 **/
68876920
JSEC
11809static void
11810lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11811 struct lpfc_iocbq *cmdiocbq,
11812 struct lpfc_iocbq *rspiocbq)
dea3101e 11813{
68876920
JSEC
11814 wait_queue_head_t *pdone_q;
11815 unsigned long iflags;
0f65ff68 11816 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 11817
2e0fef85 11818 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11819 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11820
11821 /*
11822 * A time out has occurred for the iocb. If a time out
11823 * completion handler has been supplied, call it. Otherwise,
11824 * just free the iocbq.
11825 */
11826
11827 spin_unlock_irqrestore(&phba->hbalock, iflags);
11828 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11829 cmdiocbq->wait_iocb_cmpl = NULL;
11830 if (cmdiocbq->iocb_cmpl)
11831 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11832 else
11833 lpfc_sli_release_iocbq(phba, cmdiocbq);
11834 return;
11835 }
11836
68876920
JSEC
11837 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11838 if (cmdiocbq->context2 && rspiocbq)
11839 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11840 &rspiocbq->iocb, sizeof(IOCB_t));
11841
0f65ff68
JS
11842 /* Set the exchange busy flag for task management commands */
11843 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11844 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11845 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11846 cur_iocbq);
11847 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11848 }
11849
68876920 11850 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11851 if (pdone_q)
11852 wake_up(pdone_q);
858c9f6c 11853 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 11854 return;
11855}
11856
d11e31dd
JS
11857/**
11858 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11859 * @phba: Pointer to HBA context object..
11860 * @piocbq: Pointer to command iocb.
11861 * @flag: Flag to test.
11862 *
11863 * This routine grabs the hbalock and then test the iocb_flag to
11864 * see if the passed in flag is set.
11865 * Returns:
11866 * 1 if flag is set.
11867 * 0 if flag is not set.
11868 **/
11869static int
11870lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11871 struct lpfc_iocbq *piocbq, uint32_t flag)
11872{
11873 unsigned long iflags;
11874 int ret;
11875
11876 spin_lock_irqsave(&phba->hbalock, iflags);
11877 ret = piocbq->iocb_flag & flag;
11878 spin_unlock_irqrestore(&phba->hbalock, iflags);
11879 return ret;
11880
11881}
11882
e59058c4 11883/**
3621a710 11884 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11885 * @phba: Pointer to HBA context object..
11886 * @pring: Pointer to sli ring.
11887 * @piocb: Pointer to command iocb.
11888 * @prspiocbq: Pointer to response iocb.
11889 * @timeout: Timeout in number of seconds.
11890 *
11891 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11892 * iocb to complete. The iocb_cmpl field of the shall be used
11893 * to handle iocbs which time out. If the field is NULL, the
11894 * function shall free the iocbq structure. If more clean up is
11895 * needed, the caller is expected to provide a completion function
11896 * that will provide the needed clean up. If the iocb command is
11897 * not completed within timeout seconds, the function will either
11898 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11899 * completion function set in the iocb_cmpl field and then return
11900 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11901 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11902 * The function waits for the iocb completion using an
11903 * non-interruptible wait.
11904 * This function will sleep while waiting for iocb completion.
11905 * So, this function should not be called from any context which
11906 * does not allow sleeping. Due to the same reason, this function
11907 * cannot be called with interrupt disabled.
11908 * This function assumes that the iocb completions occur while
11909 * this function sleep. So, this function cannot be called from
11910 * the thread which process iocb completion for this ring.
11911 * This function clears the iocb_flag of the iocb object before
11912 * issuing the iocb and the iocb completion handler sets this
11913 * flag and wakes this thread when the iocb completes.
11914 * The contents of the response iocb will be copied to prspiocbq
11915 * by the completion handler when the command completes.
11916 * This function returns IOCB_SUCCESS when success.
11917 * This function is called with no lock held.
11918 **/
dea3101e 11919int
2e0fef85 11920lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11921 uint32_t ring_number,
2e0fef85
JS
11922 struct lpfc_iocbq *piocb,
11923 struct lpfc_iocbq *prspiocbq,
68876920 11924 uint32_t timeout)
dea3101e 11925{
7259f0d0 11926 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
11927 long timeleft, timeout_req = 0;
11928 int retval = IOCB_SUCCESS;
875fbdfe 11929 uint32_t creg_val;
0e9bb8d7
JS
11930 struct lpfc_iocbq *iocb;
11931 int txq_cnt = 0;
11932 int txcmplq_cnt = 0;
895427bd 11933 struct lpfc_sli_ring *pring;
5a0916b4
JS
11934 unsigned long iflags;
11935 bool iocb_completed = true;
11936
895427bd
JS
11937 if (phba->sli_rev >= LPFC_SLI_REV4)
11938 pring = lpfc_sli4_calc_ring(phba, piocb);
11939 else
11940 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 11941 /*
68876920
JSEC
11942 * If the caller has provided a response iocbq buffer, then context2
11943 * is NULL or its an error.
dea3101e 11944 */
68876920
JSEC
11945 if (prspiocbq) {
11946 if (piocb->context2)
11947 return IOCB_ERROR;
11948 piocb->context2 = prspiocbq;
dea3101e 11949 }
11950
5a0916b4 11951 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
11952 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11953 piocb->context_un.wait_queue = &done_q;
5a0916b4 11954 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 11955
875fbdfe 11956 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11957 if (lpfc_readl(phba->HCregaddr, &creg_val))
11958 return IOCB_ERROR;
875fbdfe
JSEC
11959 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11960 writel(creg_val, phba->HCregaddr);
11961 readl(phba->HCregaddr); /* flush */
11962 }
11963
2a9bf3d0
JS
11964 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11965 SLI_IOCB_RET_IOCB);
68876920 11966 if (retval == IOCB_SUCCESS) {
256ec0d0 11967 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 11968 timeleft = wait_event_timeout(done_q,
d11e31dd 11969 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 11970 timeout_req);
5a0916b4
JS
11971 spin_lock_irqsave(&phba->hbalock, iflags);
11972 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11973
11974 /*
11975 * IOCB timed out. Inform the wake iocb wait
11976 * completion function and set local status
11977 */
dea3101e 11978
5a0916b4
JS
11979 iocb_completed = false;
11980 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11981 }
11982 spin_unlock_irqrestore(&phba->hbalock, iflags);
11983 if (iocb_completed) {
7054a606 11984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 11985 "0331 IOCB wake signaled\n");
53151bbb
JS
11986 /* Note: we are not indicating if the IOCB has a success
11987 * status or not - that's for the caller to check.
11988 * IOCB_SUCCESS means just that the command was sent and
11989 * completed. Not that it completed successfully.
11990 * */
7054a606 11991 } else if (timeleft == 0) {
68876920 11992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11993 "0338 IOCB wait timeout error - no "
11994 "wake response Data x%x\n", timeout);
68876920 11995 retval = IOCB_TIMEDOUT;
7054a606 11996 } else {
68876920 11997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11998 "0330 IOCB wake NOT set, "
11999 "Data x%x x%lx\n",
68876920
JSEC
12000 timeout, (timeleft / jiffies));
12001 retval = IOCB_TIMEDOUT;
dea3101e 12002 }
2a9bf3d0 12003 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
12004 if (phba->cfg_log_verbose & LOG_SLI) {
12005 list_for_each_entry(iocb, &pring->txq, list) {
12006 txq_cnt++;
12007 }
12008 list_for_each_entry(iocb, &pring->txcmplq, list) {
12009 txcmplq_cnt++;
12010 }
12011 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12012 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12013 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12014 }
2a9bf3d0 12015 return retval;
68876920
JSEC
12016 } else {
12017 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 12018 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 12019 retval);
68876920 12020 retval = IOCB_ERROR;
dea3101e 12021 }
12022
875fbdfe 12023 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12024 if (lpfc_readl(phba->HCregaddr, &creg_val))
12025 return IOCB_ERROR;
875fbdfe
JSEC
12026 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12027 writel(creg_val, phba->HCregaddr);
12028 readl(phba->HCregaddr); /* flush */
12029 }
12030
68876920
JSEC
12031 if (prspiocbq)
12032 piocb->context2 = NULL;
12033
12034 piocb->context_un.wait_queue = NULL;
12035 piocb->iocb_cmpl = NULL;
dea3101e 12036 return retval;
12037}
68876920 12038
e59058c4 12039/**
3621a710 12040 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
12041 * @phba: Pointer to HBA context object.
12042 * @pmboxq: Pointer to driver mailbox object.
12043 * @timeout: Timeout in number of seconds.
12044 *
12045 * This function issues the mailbox to firmware and waits for the
12046 * mailbox command to complete. If the mailbox command is not
12047 * completed within timeout seconds, it returns MBX_TIMEOUT.
12048 * The function waits for the mailbox completion using an
12049 * interruptible wait. If the thread is woken up due to a
12050 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12051 * should not free the mailbox resources, if this function returns
12052 * MBX_TIMEOUT.
12053 * This function will sleep while waiting for mailbox completion.
12054 * So, this function should not be called from any context which
12055 * does not allow sleeping. Due to the same reason, this function
12056 * cannot be called with interrupt disabled.
12057 * This function assumes that the mailbox completion occurs while
12058 * this function sleep. So, this function cannot be called from
12059 * the worker thread which processes mailbox completion.
12060 * This function is called in the context of HBA management
12061 * applications.
12062 * This function returns MBX_SUCCESS when successful.
12063 * This function is called with no lock held.
12064 **/
dea3101e 12065int
2e0fef85 12066lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 12067 uint32_t timeout)
12068{
e29d74f8 12069 struct completion mbox_done;
dea3101e 12070 int retval;
858c9f6c 12071 unsigned long flag;
dea3101e 12072
495a714c 12073 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 12074 /* setup wake call as IOCB callback */
12075 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea3101e 12076
e29d74f8
JS
12077 /* setup context3 field to pass wait_queue pointer to wake function */
12078 init_completion(&mbox_done);
12079 pmboxq->context3 = &mbox_done;
dea3101e 12080 /* now issue the command */
12081 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 12082 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
e29d74f8
JS
12083 wait_for_completion_timeout(&mbox_done,
12084 msecs_to_jiffies(timeout * 1000));
7054a606 12085
858c9f6c 12086 spin_lock_irqsave(&phba->hbalock, flag);
e29d74f8 12087 pmboxq->context3 = NULL;
7054a606
JS
12088 /*
12089 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12090 * else do not free the resources.
12091 */
d7c47992 12092 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 12093 retval = MBX_SUCCESS;
d7c47992 12094 } else {
7054a606 12095 retval = MBX_TIMEOUT;
858c9f6c
JS
12096 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12097 }
12098 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 12099 }
dea3101e 12100 return retval;
12101}
12102
e59058c4 12103/**
3772a991 12104 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
12105 * @phba: Pointer to HBA context.
12106 *
3772a991
JS
12107 * This function is called to shutdown the driver's mailbox sub-system.
12108 * It first marks the mailbox sub-system is in a block state to prevent
12109 * the asynchronous mailbox command from issued off the pending mailbox
12110 * command queue. If the mailbox command sub-system shutdown is due to
12111 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12112 * the mailbox sub-system flush routine to forcefully bring down the
12113 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12114 * as with offline or HBA function reset), this routine will wait for the
12115 * outstanding mailbox command to complete before invoking the mailbox
12116 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 12117 **/
3772a991 12118void
618a5230 12119lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 12120{
3772a991 12121 struct lpfc_sli *psli = &phba->sli;
3772a991 12122 unsigned long timeout;
b4c02652 12123
618a5230
JS
12124 if (mbx_action == LPFC_MBX_NO_WAIT) {
12125 /* delay 100ms for port state */
12126 msleep(100);
12127 lpfc_sli_mbox_sys_flush(phba);
12128 return;
12129 }
a183a15f 12130 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 12131
523128e5
JS
12132 /* Disable softirqs, including timers from obtaining phba->hbalock */
12133 local_bh_disable();
12134
3772a991
JS
12135 spin_lock_irq(&phba->hbalock);
12136 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 12137
3772a991 12138 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
12139 /* Determine how long we might wait for the active mailbox
12140 * command to be gracefully completed by firmware.
12141 */
a183a15f
JS
12142 if (phba->sli.mbox_active)
12143 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12144 phba->sli.mbox_active) *
12145 1000) + jiffies;
12146 spin_unlock_irq(&phba->hbalock);
12147
523128e5
JS
12148 /* Enable softirqs again, done with phba->hbalock */
12149 local_bh_enable();
12150
3772a991
JS
12151 while (phba->sli.mbox_active) {
12152 /* Check active mailbox complete status every 2ms */
12153 msleep(2);
12154 if (time_after(jiffies, timeout))
12155 /* Timeout, let the mailbox flush routine to
12156 * forcefully release active mailbox command
12157 */
12158 break;
12159 }
523128e5 12160 } else {
d7069f09
JS
12161 spin_unlock_irq(&phba->hbalock);
12162
523128e5
JS
12163 /* Enable softirqs again, done with phba->hbalock */
12164 local_bh_enable();
12165 }
12166
3772a991
JS
12167 lpfc_sli_mbox_sys_flush(phba);
12168}
ed957684 12169
3772a991
JS
12170/**
12171 * lpfc_sli_eratt_read - read sli-3 error attention events
12172 * @phba: Pointer to HBA context.
12173 *
12174 * This function is called to read the SLI3 device error attention registers
12175 * for possible error attention events. The caller must hold the hostlock
12176 * with spin_lock_irq().
12177 *
25985edc 12178 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
12179 * Register and returns 0 otherwise.
12180 **/
12181static int
12182lpfc_sli_eratt_read(struct lpfc_hba *phba)
12183{
12184 uint32_t ha_copy;
b4c02652 12185
3772a991 12186 /* Read chip Host Attention (HA) register */
9940b97b
JS
12187 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12188 goto unplug_err;
12189
3772a991
JS
12190 if (ha_copy & HA_ERATT) {
12191 /* Read host status register to retrieve error event */
9940b97b
JS
12192 if (lpfc_sli_read_hs(phba))
12193 goto unplug_err;
b4c02652 12194
3772a991
JS
12195 /* Check if there is a deferred error condition is active */
12196 if ((HS_FFER1 & phba->work_hs) &&
12197 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 12198 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 12199 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
12200 /* Clear all interrupt enable conditions */
12201 writel(0, phba->HCregaddr);
12202 readl(phba->HCregaddr);
12203 }
12204
12205 /* Set the driver HA work bitmap */
3772a991
JS
12206 phba->work_ha |= HA_ERATT;
12207 /* Indicate polling handles this ERATT */
12208 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
12209 return 1;
12210 }
12211 return 0;
9940b97b
JS
12212
12213unplug_err:
12214 /* Set the driver HS work bitmap */
12215 phba->work_hs |= UNPLUG_ERR;
12216 /* Set the driver HA work bitmap */
12217 phba->work_ha |= HA_ERATT;
12218 /* Indicate polling handles this ERATT */
12219 phba->hba_flag |= HBA_ERATT_HANDLED;
12220 return 1;
b4c02652
JS
12221}
12222
da0436e9
JS
12223/**
12224 * lpfc_sli4_eratt_read - read sli-4 error attention events
12225 * @phba: Pointer to HBA context.
12226 *
12227 * This function is called to read the SLI4 device error attention registers
12228 * for possible error attention events. The caller must hold the hostlock
12229 * with spin_lock_irq().
12230 *
25985edc 12231 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
12232 * Register and returns 0 otherwise.
12233 **/
12234static int
12235lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12236{
12237 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
12238 uint32_t if_type, portsmphr;
12239 struct lpfc_register portstat_reg;
da0436e9 12240
2fcee4bf
JS
12241 /*
12242 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
12243 * registers for error attention. This can be changed later.
12244 */
2fcee4bf
JS
12245 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12246 switch (if_type) {
12247 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
12248 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12249 &uerr_sta_lo) ||
12250 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12251 &uerr_sta_hi)) {
12252 phba->work_hs |= UNPLUG_ERR;
12253 phba->work_ha |= HA_ERATT;
12254 phba->hba_flag |= HBA_ERATT_HANDLED;
12255 return 1;
12256 }
2fcee4bf
JS
12257 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12258 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12260 "1423 HBA Unrecoverable error: "
12261 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12262 "ue_mask_lo_reg=0x%x, "
12263 "ue_mask_hi_reg=0x%x\n",
12264 uerr_sta_lo, uerr_sta_hi,
12265 phba->sli4_hba.ue_mask_lo,
12266 phba->sli4_hba.ue_mask_hi);
12267 phba->work_status[0] = uerr_sta_lo;
12268 phba->work_status[1] = uerr_sta_hi;
12269 phba->work_ha |= HA_ERATT;
12270 phba->hba_flag |= HBA_ERATT_HANDLED;
12271 return 1;
12272 }
12273 break;
12274 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 12275 case LPFC_SLI_INTF_IF_TYPE_6:
9940b97b
JS
12276 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12277 &portstat_reg.word0) ||
12278 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12279 &portsmphr)){
12280 phba->work_hs |= UNPLUG_ERR;
12281 phba->work_ha |= HA_ERATT;
12282 phba->hba_flag |= HBA_ERATT_HANDLED;
12283 return 1;
12284 }
2fcee4bf
JS
12285 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12286 phba->work_status[0] =
12287 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12288 phba->work_status[1] =
12289 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 12291 "2885 Port Status Event: "
2fcee4bf
JS
12292 "port status reg 0x%x, "
12293 "port smphr reg 0x%x, "
12294 "error 1=0x%x, error 2=0x%x\n",
12295 portstat_reg.word0,
12296 portsmphr,
12297 phba->work_status[0],
12298 phba->work_status[1]);
12299 phba->work_ha |= HA_ERATT;
12300 phba->hba_flag |= HBA_ERATT_HANDLED;
12301 return 1;
12302 }
12303 break;
12304 case LPFC_SLI_INTF_IF_TYPE_1:
12305 default:
a747c9ce 12306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
12307 "2886 HBA Error Attention on unsupported "
12308 "if type %d.", if_type);
a747c9ce 12309 return 1;
da0436e9 12310 }
2fcee4bf 12311
da0436e9
JS
12312 return 0;
12313}
12314
e59058c4 12315/**
3621a710 12316 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
12317 * @phba: Pointer to HBA context.
12318 *
3772a991 12319 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
12320 * error attention register bit for error attention events.
12321 *
25985edc 12322 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
12323 * Register and returns 0 otherwise.
12324 **/
12325int
12326lpfc_sli_check_eratt(struct lpfc_hba *phba)
12327{
12328 uint32_t ha_copy;
12329
12330 /* If somebody is waiting to handle an eratt, don't process it
12331 * here. The brdkill function will do this.
12332 */
12333 if (phba->link_flag & LS_IGNORE_ERATT)
12334 return 0;
12335
12336 /* Check if interrupt handler handles this ERATT */
12337 spin_lock_irq(&phba->hbalock);
12338 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12339 /* Interrupt handler has handled ERATT */
12340 spin_unlock_irq(&phba->hbalock);
12341 return 0;
12342 }
12343
a257bf90
JS
12344 /*
12345 * If there is deferred error attention, do not check for error
12346 * attention
12347 */
12348 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12349 spin_unlock_irq(&phba->hbalock);
12350 return 0;
12351 }
12352
3772a991
JS
12353 /* If PCI channel is offline, don't process it */
12354 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 12355 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12356 return 0;
12357 }
12358
12359 switch (phba->sli_rev) {
12360 case LPFC_SLI_REV2:
12361 case LPFC_SLI_REV3:
12362 /* Read chip Host Attention (HA) register */
12363 ha_copy = lpfc_sli_eratt_read(phba);
12364 break;
da0436e9 12365 case LPFC_SLI_REV4:
2fcee4bf 12366 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
12367 ha_copy = lpfc_sli4_eratt_read(phba);
12368 break;
3772a991
JS
12369 default:
12370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12371 "0299 Invalid SLI revision (%d)\n",
12372 phba->sli_rev);
12373 ha_copy = 0;
12374 break;
9399627f
JS
12375 }
12376 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12377
12378 return ha_copy;
12379}
12380
12381/**
12382 * lpfc_intr_state_check - Check device state for interrupt handling
12383 * @phba: Pointer to HBA context.
12384 *
12385 * This inline routine checks whether a device or its PCI slot is in a state
12386 * that the interrupt should be handled.
12387 *
12388 * This function returns 0 if the device or the PCI slot is in a state that
12389 * interrupt should be handled, otherwise -EIO.
12390 */
12391static inline int
12392lpfc_intr_state_check(struct lpfc_hba *phba)
12393{
12394 /* If the pci channel is offline, ignore all the interrupts */
12395 if (unlikely(pci_channel_offline(phba->pcidev)))
12396 return -EIO;
12397
12398 /* Update device level interrupt statistics */
12399 phba->sli.slistat.sli_intr++;
12400
12401 /* Ignore all interrupts during initialization. */
12402 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12403 return -EIO;
12404
9399627f
JS
12405 return 0;
12406}
12407
12408/**
3772a991 12409 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
12410 * @irq: Interrupt number.
12411 * @dev_id: The device context pointer.
12412 *
9399627f 12413 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12414 * service routine when device with SLI-3 interface spec is enabled with
12415 * MSI-X multi-message interrupt mode and there are slow-path events in
12416 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12417 * interrupt mode, this function is called as part of the device-level
12418 * interrupt handler. When the PCI slot is in error recovery or the HBA
12419 * is undergoing initialization, the interrupt handler will not process
12420 * the interrupt. The link attention and ELS ring attention events are
12421 * handled by the worker thread. The interrupt handler signals the worker
12422 * thread and returns for these events. This function is called without
12423 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
12424 * structures.
12425 *
12426 * This function returns IRQ_HANDLED when interrupt is handled else it
12427 * returns IRQ_NONE.
e59058c4 12428 **/
dea3101e 12429irqreturn_t
3772a991 12430lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 12431{
2e0fef85 12432 struct lpfc_hba *phba;
a747c9ce 12433 uint32_t ha_copy, hc_copy;
dea3101e 12434 uint32_t work_ha_copy;
12435 unsigned long status;
5b75da2f 12436 unsigned long iflag;
dea3101e 12437 uint32_t control;
12438
92d7f7b0 12439 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
12440 struct lpfc_vport *vport;
12441 struct lpfc_nodelist *ndlp;
12442 struct lpfc_dmabuf *mp;
92d7f7b0
JS
12443 LPFC_MBOXQ_t *pmb;
12444 int rc;
12445
dea3101e 12446 /*
12447 * Get the driver's phba structure from the dev_id and
12448 * assume the HBA is not interrupting.
12449 */
9399627f 12450 phba = (struct lpfc_hba *)dev_id;
dea3101e 12451
12452 if (unlikely(!phba))
12453 return IRQ_NONE;
12454
dea3101e 12455 /*
9399627f
JS
12456 * Stuff needs to be attented to when this function is invoked as an
12457 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 12458 */
9399627f 12459 if (phba->intr_type == MSIX) {
3772a991
JS
12460 /* Check device state for handling interrupt */
12461 if (lpfc_intr_state_check(phba))
9399627f
JS
12462 return IRQ_NONE;
12463 /* Need to read HA REG for slow-path events */
5b75da2f 12464 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12465 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12466 goto unplug_error;
9399627f
JS
12467 /* If somebody is waiting to handle an eratt don't process it
12468 * here. The brdkill function will do this.
12469 */
12470 if (phba->link_flag & LS_IGNORE_ERATT)
12471 ha_copy &= ~HA_ERATT;
12472 /* Check the need for handling ERATT in interrupt handler */
12473 if (ha_copy & HA_ERATT) {
12474 if (phba->hba_flag & HBA_ERATT_HANDLED)
12475 /* ERATT polling has handled ERATT */
12476 ha_copy &= ~HA_ERATT;
12477 else
12478 /* Indicate interrupt handler handles ERATT */
12479 phba->hba_flag |= HBA_ERATT_HANDLED;
12480 }
a257bf90
JS
12481
12482 /*
12483 * If there is deferred error attention, do not check for any
12484 * interrupt.
12485 */
12486 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12487 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12488 return IRQ_NONE;
12489 }
12490
9399627f 12491 /* Clear up only attention source related to slow-path */
9940b97b
JS
12492 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12493 goto unplug_error;
12494
a747c9ce
JS
12495 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12496 HC_LAINT_ENA | HC_ERINT_ENA),
12497 phba->HCregaddr);
9399627f
JS
12498 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12499 phba->HAregaddr);
a747c9ce 12500 writel(hc_copy, phba->HCregaddr);
9399627f 12501 readl(phba->HAregaddr); /* flush */
5b75da2f 12502 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12503 } else
12504 ha_copy = phba->ha_copy;
dea3101e 12505
dea3101e 12506 work_ha_copy = ha_copy & phba->work_ha_mask;
12507
9399627f 12508 if (work_ha_copy) {
dea3101e 12509 if (work_ha_copy & HA_LATT) {
12510 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12511 /*
12512 * Turn off Link Attention interrupts
12513 * until CLEAR_LA done
12514 */
5b75da2f 12515 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12516 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
12517 if (lpfc_readl(phba->HCregaddr, &control))
12518 goto unplug_error;
dea3101e 12519 control &= ~HC_LAINT_ENA;
12520 writel(control, phba->HCregaddr);
12521 readl(phba->HCregaddr); /* flush */
5b75da2f 12522 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12523 }
12524 else
12525 work_ha_copy &= ~HA_LATT;
12526 }
12527
9399627f 12528 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
12529 /*
12530 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12531 * the only slow ring.
12532 */
12533 status = (work_ha_copy &
12534 (HA_RXMASK << (4*LPFC_ELS_RING)));
12535 status >>= (4*LPFC_ELS_RING);
12536 if (status & HA_RXMASK) {
5b75da2f 12537 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12538 if (lpfc_readl(phba->HCregaddr, &control))
12539 goto unplug_error;
a58cbd52
JS
12540
12541 lpfc_debugfs_slow_ring_trc(phba,
12542 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12543 control, status,
12544 (uint32_t)phba->sli.slistat.sli_intr);
12545
858c9f6c 12546 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
12547 lpfc_debugfs_slow_ring_trc(phba,
12548 "ISR Disable ring:"
12549 "pwork:x%x hawork:x%x wait:x%x",
12550 phba->work_ha, work_ha_copy,
12551 (uint32_t)((unsigned long)
5e9d9b82 12552 &phba->work_waitq));
a58cbd52 12553
858c9f6c
JS
12554 control &=
12555 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 12556 writel(control, phba->HCregaddr);
12557 readl(phba->HCregaddr); /* flush */
dea3101e 12558 }
a58cbd52
JS
12559 else {
12560 lpfc_debugfs_slow_ring_trc(phba,
12561 "ISR slow ring: pwork:"
12562 "x%x hawork:x%x wait:x%x",
12563 phba->work_ha, work_ha_copy,
12564 (uint32_t)((unsigned long)
5e9d9b82 12565 &phba->work_waitq));
a58cbd52 12566 }
5b75da2f 12567 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12568 }
12569 }
5b75da2f 12570 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 12571 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
12572 if (lpfc_sli_read_hs(phba))
12573 goto unplug_error;
a257bf90
JS
12574 /*
12575 * Check if there is a deferred error condition
12576 * is active
12577 */
12578 if ((HS_FFER1 & phba->work_hs) &&
12579 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
12580 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12581 phba->work_hs)) {
a257bf90
JS
12582 phba->hba_flag |= DEFER_ERATT;
12583 /* Clear all interrupt enable conditions */
12584 writel(0, phba->HCregaddr);
12585 readl(phba->HCregaddr);
12586 }
12587 }
12588
9399627f 12589 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 12590 pmb = phba->sli.mbox_active;
04c68496 12591 pmbox = &pmb->u.mb;
34b02dcd 12592 mbox = phba->mbox;
858c9f6c 12593 vport = pmb->vport;
92d7f7b0
JS
12594
12595 /* First check out the status word */
12596 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12597 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 12598 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
12599 /*
12600 * Stray Mailbox Interrupt, mbxCommand <cmd>
12601 * mbxStatus <status>
12602 */
09372820 12603 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 12604 LOG_SLI,
e8b62011 12605 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
12606 "Interrupt mbxCommand x%x "
12607 "mbxStatus x%x\n",
e8b62011 12608 (vport ? vport->vpi : 0),
92d7f7b0
JS
12609 pmbox->mbxCommand,
12610 pmbox->mbxStatus);
09372820
JS
12611 /* clear mailbox attention bit */
12612 work_ha_copy &= ~HA_MBATT;
12613 } else {
97eab634 12614 phba->sli.mbox_active = NULL;
5b75da2f 12615 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
12616 phba->last_completion_time = jiffies;
12617 del_timer(&phba->sli.mbox_tmo);
09372820
JS
12618 if (pmb->mbox_cmpl) {
12619 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12620 MAILBOX_CMD_SIZE);
7a470277 12621 if (pmb->out_ext_byte_len &&
3e1f0718 12622 pmb->ctx_buf)
7a470277
JS
12623 lpfc_sli_pcimem_bcopy(
12624 phba->mbox_ext,
3e1f0718 12625 pmb->ctx_buf,
7a470277 12626 pmb->out_ext_byte_len);
09372820
JS
12627 }
12628 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12629 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12630
12631 lpfc_debugfs_disc_trc(vport,
12632 LPFC_DISC_TRC_MBOX_VPORT,
12633 "MBOX dflt rpi: : "
12634 "status:x%x rpi:x%x",
12635 (uint32_t)pmbox->mbxStatus,
12636 pmbox->un.varWords[0], 0);
12637
12638 if (!pmbox->mbxStatus) {
12639 mp = (struct lpfc_dmabuf *)
3e1f0718 12640 (pmb->ctx_buf);
09372820 12641 ndlp = (struct lpfc_nodelist *)
3e1f0718 12642 pmb->ctx_ndlp;
09372820
JS
12643
12644 /* Reg_LOGIN of dflt RPI was
12645 * successful. new lets get
12646 * rid of the RPI using the
12647 * same mbox buffer.
12648 */
12649 lpfc_unreg_login(phba,
12650 vport->vpi,
12651 pmbox->un.varWords[0],
12652 pmb);
12653 pmb->mbox_cmpl =
12654 lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
12655 pmb->ctx_buf = mp;
12656 pmb->ctx_ndlp = ndlp;
09372820 12657 pmb->vport = vport;
58da1ffb
JS
12658 rc = lpfc_sli_issue_mbox(phba,
12659 pmb,
12660 MBX_NOWAIT);
12661 if (rc != MBX_BUSY)
12662 lpfc_printf_log(phba,
12663 KERN_ERR,
12664 LOG_MBOX | LOG_SLI,
d7c255b2 12665 "0350 rc should have"
6a9c52cf 12666 "been MBX_BUSY\n");
3772a991
JS
12667 if (rc != MBX_NOT_FINISHED)
12668 goto send_current_mbox;
09372820 12669 }
858c9f6c 12670 }
5b75da2f
JS
12671 spin_lock_irqsave(
12672 &phba->pport->work_port_lock,
12673 iflag);
09372820
JS
12674 phba->pport->work_port_events &=
12675 ~WORKER_MBOX_TMO;
5b75da2f
JS
12676 spin_unlock_irqrestore(
12677 &phba->pport->work_port_lock,
12678 iflag);
09372820 12679 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 12680 }
97eab634 12681 } else
5b75da2f 12682 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 12683
92d7f7b0
JS
12684 if ((work_ha_copy & HA_MBATT) &&
12685 (phba->sli.mbox_active == NULL)) {
858c9f6c 12686send_current_mbox:
92d7f7b0 12687 /* Process next mailbox command if there is one */
58da1ffb
JS
12688 do {
12689 rc = lpfc_sli_issue_mbox(phba, NULL,
12690 MBX_NOWAIT);
12691 } while (rc == MBX_NOT_FINISHED);
12692 if (rc != MBX_SUCCESS)
12693 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12694 LOG_SLI, "0349 rc should be "
6a9c52cf 12695 "MBX_SUCCESS\n");
92d7f7b0
JS
12696 }
12697
5b75da2f 12698 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12699 phba->work_ha |= work_ha_copy;
5b75da2f 12700 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 12701 lpfc_worker_wake_up(phba);
dea3101e 12702 }
9399627f 12703 return IRQ_HANDLED;
9940b97b
JS
12704unplug_error:
12705 spin_unlock_irqrestore(&phba->hbalock, iflag);
12706 return IRQ_HANDLED;
dea3101e 12707
3772a991 12708} /* lpfc_sli_sp_intr_handler */
9399627f
JS
12709
12710/**
3772a991 12711 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
12712 * @irq: Interrupt number.
12713 * @dev_id: The device context pointer.
12714 *
12715 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12716 * service routine when device with SLI-3 interface spec is enabled with
12717 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12718 * ring event in the HBA. However, when the device is enabled with either
12719 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12720 * device-level interrupt handler. When the PCI slot is in error recovery
12721 * or the HBA is undergoing initialization, the interrupt handler will not
12722 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12723 * the intrrupt context. This function is called without any lock held.
12724 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
12725 *
12726 * This function returns IRQ_HANDLED when interrupt is handled else it
12727 * returns IRQ_NONE.
12728 **/
12729irqreturn_t
3772a991 12730lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12731{
12732 struct lpfc_hba *phba;
12733 uint32_t ha_copy;
12734 unsigned long status;
5b75da2f 12735 unsigned long iflag;
895427bd 12736 struct lpfc_sli_ring *pring;
9399627f
JS
12737
12738 /* Get the driver's phba structure from the dev_id and
12739 * assume the HBA is not interrupting.
12740 */
12741 phba = (struct lpfc_hba *) dev_id;
12742
12743 if (unlikely(!phba))
12744 return IRQ_NONE;
12745
12746 /*
12747 * Stuff needs to be attented to when this function is invoked as an
12748 * individual interrupt handler in MSI-X multi-message interrupt mode
12749 */
12750 if (phba->intr_type == MSIX) {
3772a991
JS
12751 /* Check device state for handling interrupt */
12752 if (lpfc_intr_state_check(phba))
9399627f
JS
12753 return IRQ_NONE;
12754 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12755 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12756 return IRQ_HANDLED;
9399627f 12757 /* Clear up only attention source related to fast-path */
5b75da2f 12758 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12759 /*
12760 * If there is deferred error attention, do not check for
12761 * any interrupt.
12762 */
12763 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12764 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12765 return IRQ_NONE;
12766 }
9399627f
JS
12767 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12768 phba->HAregaddr);
12769 readl(phba->HAregaddr); /* flush */
5b75da2f 12770 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12771 } else
12772 ha_copy = phba->ha_copy;
dea3101e 12773
12774 /*
9399627f 12775 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12776 */
9399627f
JS
12777 ha_copy &= ~(phba->work_ha_mask);
12778
12779 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12780 status >>= (4*LPFC_FCP_RING);
895427bd 12781 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12782 if (status & HA_RXMASK)
895427bd 12783 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12784
12785 if (phba->cfg_multi_ring_support == 2) {
12786 /*
9399627f
JS
12787 * Process all events on extra ring. Take the optimized path
12788 * for extra ring IO.
a4bc3379 12789 */
9399627f 12790 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12791 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12792 if (status & HA_RXMASK) {
a4bc3379 12793 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12794 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12795 status);
12796 }
12797 }
dea3101e 12798 return IRQ_HANDLED;
3772a991 12799} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12800
12801/**
3772a991 12802 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12803 * @irq: Interrupt number.
12804 * @dev_id: The device context pointer.
12805 *
3772a991
JS
12806 * This function is the HBA device-level interrupt handler to device with
12807 * SLI-3 interface spec, called from the PCI layer when either MSI or
12808 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12809 * requires driver attention. This function invokes the slow-path interrupt
12810 * attention handling function and fast-path interrupt attention handling
12811 * function in turn to process the relevant HBA attention events. This
12812 * function is called without any lock held. It gets the hbalock to access
12813 * and update SLI data structures.
9399627f
JS
12814 *
12815 * This function returns IRQ_HANDLED when interrupt is handled, else it
12816 * returns IRQ_NONE.
12817 **/
12818irqreturn_t
3772a991 12819lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12820{
12821 struct lpfc_hba *phba;
12822 irqreturn_t sp_irq_rc, fp_irq_rc;
12823 unsigned long status1, status2;
a747c9ce 12824 uint32_t hc_copy;
9399627f
JS
12825
12826 /*
12827 * Get the driver's phba structure from the dev_id and
12828 * assume the HBA is not interrupting.
12829 */
12830 phba = (struct lpfc_hba *) dev_id;
12831
12832 if (unlikely(!phba))
12833 return IRQ_NONE;
12834
3772a991
JS
12835 /* Check device state for handling interrupt */
12836 if (lpfc_intr_state_check(phba))
9399627f
JS
12837 return IRQ_NONE;
12838
12839 spin_lock(&phba->hbalock);
9940b97b
JS
12840 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12841 spin_unlock(&phba->hbalock);
12842 return IRQ_HANDLED;
12843 }
12844
9399627f
JS
12845 if (unlikely(!phba->ha_copy)) {
12846 spin_unlock(&phba->hbalock);
12847 return IRQ_NONE;
12848 } else if (phba->ha_copy & HA_ERATT) {
12849 if (phba->hba_flag & HBA_ERATT_HANDLED)
12850 /* ERATT polling has handled ERATT */
12851 phba->ha_copy &= ~HA_ERATT;
12852 else
12853 /* Indicate interrupt handler handles ERATT */
12854 phba->hba_flag |= HBA_ERATT_HANDLED;
12855 }
12856
a257bf90
JS
12857 /*
12858 * If there is deferred error attention, do not check for any interrupt.
12859 */
12860 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12861 spin_unlock(&phba->hbalock);
a257bf90
JS
12862 return IRQ_NONE;
12863 }
12864
9399627f 12865 /* Clear attention sources except link and error attentions */
9940b97b
JS
12866 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12867 spin_unlock(&phba->hbalock);
12868 return IRQ_HANDLED;
12869 }
a747c9ce
JS
12870 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12871 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12872 phba->HCregaddr);
9399627f 12873 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12874 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12875 readl(phba->HAregaddr); /* flush */
12876 spin_unlock(&phba->hbalock);
12877
12878 /*
12879 * Invokes slow-path host attention interrupt handling as appropriate.
12880 */
12881
12882 /* status of events with mailbox and link attention */
12883 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12884
12885 /* status of events with ELS ring */
12886 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12887 status2 >>= (4*LPFC_ELS_RING);
12888
12889 if (status1 || (status2 & HA_RXMASK))
3772a991 12890 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12891 else
12892 sp_irq_rc = IRQ_NONE;
12893
12894 /*
12895 * Invoke fast-path host attention interrupt handling as appropriate.
12896 */
12897
12898 /* status of events with FCP ring */
12899 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12900 status1 >>= (4*LPFC_FCP_RING);
12901
12902 /* status of events with extra ring */
12903 if (phba->cfg_multi_ring_support == 2) {
12904 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12905 status2 >>= (4*LPFC_EXTRA_RING);
12906 } else
12907 status2 = 0;
12908
12909 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12910 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12911 else
12912 fp_irq_rc = IRQ_NONE;
dea3101e 12913
9399627f
JS
12914 /* Return device-level interrupt handling status */
12915 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12916} /* lpfc_sli_intr_handler */
4f774513
JS
12917
12918/**
12919 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12920 * @phba: pointer to lpfc hba data structure.
12921 *
12922 * This routine is invoked by the worker thread to process all the pending
12923 * SLI4 FCP abort XRI events.
12924 **/
12925void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12926{
12927 struct lpfc_cq_event *cq_event;
12928
12929 /* First, declare the fcp xri abort event has been handled */
12930 spin_lock_irq(&phba->hbalock);
12931 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12932 spin_unlock_irq(&phba->hbalock);
12933 /* Now, handle all the fcp xri abort events */
12934 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12935 /* Get the first event from the head of the event queue */
12936 spin_lock_irq(&phba->hbalock);
12937 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12938 cq_event, struct lpfc_cq_event, list);
12939 spin_unlock_irq(&phba->hbalock);
12940 /* Notify aborted XRI for FCP work queue */
12941 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12942 /* Free the event processed back to the free pool */
12943 lpfc_sli4_cq_event_release(phba, cq_event);
12944 }
12945}
12946
12947/**
12948 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12949 * @phba: pointer to lpfc hba data structure.
12950 *
12951 * This routine is invoked by the worker thread to process all the pending
12952 * SLI4 els abort xri events.
12953 **/
12954void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12955{
12956 struct lpfc_cq_event *cq_event;
12957
12958 /* First, declare the els xri abort event has been handled */
12959 spin_lock_irq(&phba->hbalock);
12960 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12961 spin_unlock_irq(&phba->hbalock);
12962 /* Now, handle all the els xri abort events */
12963 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12964 /* Get the first event from the head of the event queue */
12965 spin_lock_irq(&phba->hbalock);
12966 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12967 cq_event, struct lpfc_cq_event, list);
12968 spin_unlock_irq(&phba->hbalock);
12969 /* Notify aborted XRI for ELS work queue */
12970 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12971 /* Free the event processed back to the free pool */
12972 lpfc_sli4_cq_event_release(phba, cq_event);
12973 }
12974}
12975
341af102
JS
12976/**
12977 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12978 * @phba: pointer to lpfc hba data structure
12979 * @pIocbIn: pointer to the rspiocbq
12980 * @pIocbOut: pointer to the cmdiocbq
12981 * @wcqe: pointer to the complete wcqe
12982 *
12983 * This routine transfers the fields of a command iocbq to a response iocbq
12984 * by copying all the IOCB fields from command iocbq and transferring the
12985 * completion status information from the complete wcqe.
12986 **/
4f774513 12987static void
341af102
JS
12988lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12989 struct lpfc_iocbq *pIocbIn,
4f774513
JS
12990 struct lpfc_iocbq *pIocbOut,
12991 struct lpfc_wcqe_complete *wcqe)
12992{
af22741c 12993 int numBdes, i;
341af102 12994 unsigned long iflags;
af22741c
JS
12995 uint32_t status, max_response;
12996 struct lpfc_dmabuf *dmabuf;
12997 struct ulp_bde64 *bpl, bde;
4f774513
JS
12998 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12999
13000 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13001 sizeof(struct lpfc_iocbq) - offset);
4f774513 13002 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
13003 status = bf_get(lpfc_wcqe_c_status, wcqe);
13004 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
13005 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13006 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13007 pIocbIn->iocb.un.fcpi.fcpi_parm =
13008 pIocbOut->iocb.un.fcpi.fcpi_parm -
13009 wcqe->total_data_placed;
13010 else
13011 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 13012 else {
4f774513 13013 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
13014 switch (pIocbOut->iocb.ulpCommand) {
13015 case CMD_ELS_REQUEST64_CR:
13016 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13017 bpl = (struct ulp_bde64 *)dmabuf->virt;
13018 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13019 max_response = bde.tus.f.bdeSize;
13020 break;
13021 case CMD_GEN_REQUEST64_CR:
13022 max_response = 0;
13023 if (!pIocbOut->context3)
13024 break;
13025 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13026 sizeof(struct ulp_bde64);
13027 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13028 bpl = (struct ulp_bde64 *)dmabuf->virt;
13029 for (i = 0; i < numBdes; i++) {
13030 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13031 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13032 max_response += bde.tus.f.bdeSize;
13033 }
13034 break;
13035 default:
13036 max_response = wcqe->total_data_placed;
13037 break;
13038 }
13039 if (max_response < wcqe->total_data_placed)
13040 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13041 else
13042 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13043 wcqe->total_data_placed;
695a814e 13044 }
341af102 13045
acd6859b
JS
13046 /* Convert BG errors for completion status */
13047 if (status == CQE_STATUS_DI_ERROR) {
13048 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13049
13050 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13051 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13052 else
13053 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13054
13055 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13056 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13057 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13058 BGS_GUARD_ERR_MASK;
13059 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13060 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13061 BGS_APPTAG_ERR_MASK;
13062 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13063 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13064 BGS_REFTAG_ERR_MASK;
13065
13066 /* Check to see if there was any good data before the error */
13067 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13068 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13069 BGS_HI_WATER_MARK_PRESENT_MASK;
13070 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13071 wcqe->total_data_placed;
13072 }
13073
13074 /*
13075 * Set ALL the error bits to indicate we don't know what
13076 * type of error it is.
13077 */
13078 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13079 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13080 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13081 BGS_GUARD_ERR_MASK);
13082 }
13083
341af102
JS
13084 /* Pick up HBA exchange busy condition */
13085 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13086 spin_lock_irqsave(&phba->hbalock, iflags);
13087 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13088 spin_unlock_irqrestore(&phba->hbalock, iflags);
13089 }
4f774513
JS
13090}
13091
45ed1190
JS
13092/**
13093 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13094 * @phba: Pointer to HBA context object.
13095 * @wcqe: Pointer to work-queue completion queue entry.
13096 *
13097 * This routine handles an ELS work-queue completion event and construct
13098 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13099 * discovery engine to handle.
13100 *
13101 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13102 **/
13103static struct lpfc_iocbq *
13104lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13105 struct lpfc_iocbq *irspiocbq)
13106{
895427bd 13107 struct lpfc_sli_ring *pring;
45ed1190
JS
13108 struct lpfc_iocbq *cmdiocbq;
13109 struct lpfc_wcqe_complete *wcqe;
13110 unsigned long iflags;
13111
895427bd 13112 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
13113 if (unlikely(!pring))
13114 return NULL;
895427bd 13115
45ed1190 13116 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 13117 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
13118 pring->stats.iocb_event++;
13119 /* Look up the ELS command IOCB and create pseudo response IOCB */
13120 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13121 bf_get(lpfc_wcqe_c_request_tag, wcqe));
45ed1190 13122 if (unlikely(!cmdiocbq)) {
401bb416 13123 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
13124 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13125 "0386 ELS complete with no corresponding "
401bb416
DK
13126 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13127 wcqe->word0, wcqe->total_data_placed,
13128 wcqe->parameter, wcqe->word3);
45ed1190
JS
13129 lpfc_sli_release_iocbq(phba, irspiocbq);
13130 return NULL;
13131 }
13132
401bb416
DK
13133 /* Put the iocb back on the txcmplq */
13134 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13135 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13136
45ed1190 13137 /* Fake the irspiocbq and copy necessary response information */
341af102 13138 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
13139
13140 return irspiocbq;
13141}
13142
8a5ca109
JS
13143inline struct lpfc_cq_event *
13144lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13145{
13146 struct lpfc_cq_event *cq_event;
13147
13148 /* Allocate a new internal CQ_EVENT entry */
13149 cq_event = lpfc_sli4_cq_event_alloc(phba);
13150 if (!cq_event) {
13151 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13152 "0602 Failed to alloc CQ_EVENT entry\n");
13153 return NULL;
13154 }
13155
13156 /* Move the CQE into the event */
13157 memcpy(&cq_event->cqe, entry, size);
13158 return cq_event;
13159}
13160
04c68496
JS
13161/**
13162 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13163 * @phba: Pointer to HBA context object.
13164 * @cqe: Pointer to mailbox completion queue entry.
13165 *
13166 * This routine process a mailbox completion queue entry with asynchrous
13167 * event.
13168 *
13169 * Return: true if work posted to worker thread, otherwise false.
13170 **/
13171static bool
13172lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13173{
13174 struct lpfc_cq_event *cq_event;
13175 unsigned long iflags;
13176
13177 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13178 "0392 Async Event: word0:x%x, word1:x%x, "
13179 "word2:x%x, word3:x%x\n", mcqe->word0,
13180 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13181
8a5ca109
JS
13182 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13183 if (!cq_event)
04c68496 13184 return false;
04c68496
JS
13185 spin_lock_irqsave(&phba->hbalock, iflags);
13186 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13187 /* Set the async event flag */
13188 phba->hba_flag |= ASYNC_EVENT;
13189 spin_unlock_irqrestore(&phba->hbalock, iflags);
13190
13191 return true;
13192}
13193
13194/**
13195 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13196 * @phba: Pointer to HBA context object.
13197 * @cqe: Pointer to mailbox completion queue entry.
13198 *
13199 * This routine process a mailbox completion queue entry with mailbox
13200 * completion event.
13201 *
13202 * Return: true if work posted to worker thread, otherwise false.
13203 **/
13204static bool
13205lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13206{
13207 uint32_t mcqe_status;
13208 MAILBOX_t *mbox, *pmbox;
13209 struct lpfc_mqe *mqe;
13210 struct lpfc_vport *vport;
13211 struct lpfc_nodelist *ndlp;
13212 struct lpfc_dmabuf *mp;
13213 unsigned long iflags;
13214 LPFC_MBOXQ_t *pmb;
13215 bool workposted = false;
13216 int rc;
13217
13218 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13219 if (!bf_get(lpfc_trailer_completed, mcqe))
13220 goto out_no_mqe_complete;
13221
13222 /* Get the reference to the active mbox command */
13223 spin_lock_irqsave(&phba->hbalock, iflags);
13224 pmb = phba->sli.mbox_active;
13225 if (unlikely(!pmb)) {
13226 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13227 "1832 No pending MBOX command to handle\n");
13228 spin_unlock_irqrestore(&phba->hbalock, iflags);
13229 goto out_no_mqe_complete;
13230 }
13231 spin_unlock_irqrestore(&phba->hbalock, iflags);
13232 mqe = &pmb->u.mqe;
13233 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13234 mbox = phba->mbox;
13235 vport = pmb->vport;
13236
13237 /* Reset heartbeat timer */
13238 phba->last_completion_time = jiffies;
13239 del_timer(&phba->sli.mbox_tmo);
13240
13241 /* Move mbox data to caller's mailbox region, do endian swapping */
13242 if (pmb->mbox_cmpl && mbox)
48f8fdb4 13243 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 13244
73d91e50
JS
13245 /*
13246 * For mcqe errors, conditionally move a modified error code to
13247 * the mbox so that the error will not be missed.
13248 */
13249 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13250 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13251 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13252 bf_set(lpfc_mqe_status, mqe,
13253 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13254 }
04c68496
JS
13255 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13256 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13257 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13258 "MBOX dflt rpi: status:x%x rpi:x%x",
13259 mcqe_status,
13260 pmbox->un.varWords[0], 0);
13261 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
3e1f0718
JS
13262 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13263 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
04c68496
JS
13264 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13265 * RID of the PPI using the same mbox buffer.
13266 */
13267 lpfc_unreg_login(phba, vport->vpi,
13268 pmbox->un.varWords[0], pmb);
13269 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
13270 pmb->ctx_buf = mp;
13271 pmb->ctx_ndlp = ndlp;
04c68496
JS
13272 pmb->vport = vport;
13273 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13274 if (rc != MBX_BUSY)
13275 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13276 LOG_SLI, "0385 rc should "
13277 "have been MBX_BUSY\n");
13278 if (rc != MBX_NOT_FINISHED)
13279 goto send_current_mbox;
13280 }
13281 }
13282 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13283 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13284 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13285
13286 /* There is mailbox completion work to do */
13287 spin_lock_irqsave(&phba->hbalock, iflags);
13288 __lpfc_mbox_cmpl_put(phba, pmb);
13289 phba->work_ha |= HA_MBATT;
13290 spin_unlock_irqrestore(&phba->hbalock, iflags);
13291 workposted = true;
13292
13293send_current_mbox:
13294 spin_lock_irqsave(&phba->hbalock, iflags);
13295 /* Release the mailbox command posting token */
13296 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13297 /* Setting active mailbox pointer need to be in sync to flag clear */
13298 phba->sli.mbox_active = NULL;
13299 spin_unlock_irqrestore(&phba->hbalock, iflags);
13300 /* Wake up worker thread to post the next pending mailbox command */
13301 lpfc_worker_wake_up(phba);
13302out_no_mqe_complete:
13303 if (bf_get(lpfc_trailer_consumed, mcqe))
13304 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13305 return workposted;
13306}
13307
13308/**
13309 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13310 * @phba: Pointer to HBA context object.
13311 * @cqe: Pointer to mailbox completion queue entry.
13312 *
13313 * This routine process a mailbox completion queue entry, it invokes the
13314 * proper mailbox complete handling or asynchrous event handling routine
13315 * according to the MCQE's async bit.
13316 *
13317 * Return: true if work posted to worker thread, otherwise false.
13318 **/
13319static bool
13320lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
13321{
13322 struct lpfc_mcqe mcqe;
13323 bool workposted;
13324
13325 /* Copy the mailbox MCQE and convert endian order as needed */
48f8fdb4 13326 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
04c68496
JS
13327
13328 /* Invoke the proper event handling routine */
13329 if (!bf_get(lpfc_trailer_async, &mcqe))
13330 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13331 else
13332 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13333 return workposted;
13334}
13335
4f774513
JS
13336/**
13337 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13338 * @phba: Pointer to HBA context object.
2a76a283 13339 * @cq: Pointer to associated CQ
4f774513
JS
13340 * @wcqe: Pointer to work-queue completion queue entry.
13341 *
13342 * This routine handles an ELS work-queue completion event.
13343 *
13344 * Return: true if work posted to worker thread, otherwise false.
13345 **/
13346static bool
2a76a283 13347lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13348 struct lpfc_wcqe_complete *wcqe)
13349{
4f774513
JS
13350 struct lpfc_iocbq *irspiocbq;
13351 unsigned long iflags;
2a76a283 13352 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
13353 int txq_cnt = 0;
13354 int txcmplq_cnt = 0;
13355 int fcp_txcmplq_cnt = 0;
4f774513 13356
11f0e34f
JS
13357 /* Check for response status */
13358 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13359 /* Log the error status */
13360 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13361 "0357 ELS CQE error: status=x%x: "
13362 "CQE: %08x %08x %08x %08x\n",
13363 bf_get(lpfc_wcqe_c_status, wcqe),
13364 wcqe->word0, wcqe->total_data_placed,
13365 wcqe->parameter, wcqe->word3);
13366 }
13367
45ed1190 13368 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
13369 irspiocbq = lpfc_sli_get_iocbq(phba);
13370 if (!irspiocbq) {
0e9bb8d7
JS
13371 if (!list_empty(&pring->txq))
13372 txq_cnt++;
13373 if (!list_empty(&pring->txcmplq))
13374 txcmplq_cnt++;
4f774513 13375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
13376 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13377 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
13378 txq_cnt, phba->iocb_cnt,
13379 fcp_txcmplq_cnt,
13380 txcmplq_cnt);
45ed1190 13381 return false;
4f774513 13382 }
4f774513 13383
45ed1190
JS
13384 /* Save off the slow-path queue event for work thread to process */
13385 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 13386 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 13387 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
13388 &phba->sli4_hba.sp_queue_event);
13389 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 13390 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 13391
45ed1190 13392 return true;
4f774513
JS
13393}
13394
13395/**
13396 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13397 * @phba: Pointer to HBA context object.
13398 * @wcqe: Pointer to work-queue completion queue entry.
13399 *
3f8b6fb7 13400 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
13401 * proper WQ release routine to the slow-path WQ.
13402 **/
13403static void
13404lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13405 struct lpfc_wcqe_release *wcqe)
13406{
2e90f4b5
JS
13407 /* sanity check on queue memory */
13408 if (unlikely(!phba->sli4_hba.els_wq))
13409 return;
4f774513
JS
13410 /* Check for the slow-path ELS work queue */
13411 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13412 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13413 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13414 else
13415 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13416 "2579 Slow-path wqe consume event carries "
13417 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13418 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13419 phba->sli4_hba.els_wq->queue_id);
13420}
13421
13422/**
13423 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13424 * @phba: Pointer to HBA context object.
13425 * @cq: Pointer to a WQ completion queue.
13426 * @wcqe: Pointer to work-queue completion queue entry.
13427 *
13428 * This routine handles an XRI abort event.
13429 *
13430 * Return: true if work posted to worker thread, otherwise false.
13431 **/
13432static bool
13433lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13434 struct lpfc_queue *cq,
13435 struct sli4_wcqe_xri_aborted *wcqe)
13436{
13437 bool workposted = false;
13438 struct lpfc_cq_event *cq_event;
13439 unsigned long iflags;
13440
4f774513
JS
13441 switch (cq->subtype) {
13442 case LPFC_FCP:
8a5ca109
JS
13443 cq_event = lpfc_cq_event_setup(
13444 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13445 if (!cq_event)
13446 return false;
4f774513
JS
13447 spin_lock_irqsave(&phba->hbalock, iflags);
13448 list_add_tail(&cq_event->list,
13449 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13450 /* Set the fcp xri abort event flag */
13451 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13452 spin_unlock_irqrestore(&phba->hbalock, iflags);
13453 workposted = true;
13454 break;
422c4cb7 13455 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
4f774513 13456 case LPFC_ELS:
8a5ca109
JS
13457 cq_event = lpfc_cq_event_setup(
13458 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13459 if (!cq_event)
13460 return false;
4f774513
JS
13461 spin_lock_irqsave(&phba->hbalock, iflags);
13462 list_add_tail(&cq_event->list,
13463 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13464 /* Set the els xri abort event flag */
13465 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13466 spin_unlock_irqrestore(&phba->hbalock, iflags);
13467 workposted = true;
13468 break;
318083ad 13469 case LPFC_NVME:
8a5ca109
JS
13470 /* Notify aborted XRI for NVME work queue */
13471 if (phba->nvmet_support)
13472 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13473 else
13474 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13475
13476 workposted = false;
318083ad 13477 break;
4f774513
JS
13478 default:
13479 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
13480 "0603 Invalid CQ subtype %d: "
13481 "%08x %08x %08x %08x\n",
13482 cq->subtype, wcqe->word0, wcqe->parameter,
13483 wcqe->word2, wcqe->word3);
4f774513
JS
13484 workposted = false;
13485 break;
13486 }
13487 return workposted;
13488}
13489
e817e5d7
JS
13490#define FC_RCTL_MDS_DIAGS 0xF4
13491
4f774513
JS
13492/**
13493 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13494 * @phba: Pointer to HBA context object.
13495 * @rcqe: Pointer to receive-queue completion queue entry.
13496 *
13497 * This routine process a receive-queue completion queue entry.
13498 *
13499 * Return: true if work posted to worker thread, otherwise false.
13500 **/
13501static bool
4d9ab994 13502lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 13503{
4f774513 13504 bool workposted = false;
e817e5d7 13505 struct fc_frame_header *fc_hdr;
4f774513
JS
13506 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13507 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 13508 struct lpfc_nvmet_tgtport *tgtp;
4f774513 13509 struct hbq_dmabuf *dma_buf;
7851fe2c 13510 uint32_t status, rq_id;
4f774513
JS
13511 unsigned long iflags;
13512
2e90f4b5
JS
13513 /* sanity check on queue memory */
13514 if (unlikely(!hrq) || unlikely(!drq))
13515 return workposted;
13516
7851fe2c
JS
13517 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13518 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13519 else
13520 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13521 if (rq_id != hrq->queue_id)
4f774513
JS
13522 goto out;
13523
4d9ab994 13524 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
13525 switch (status) {
13526 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13527 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13528 "2537 Receive Frame Truncated!!\n");
13529 case FC_STATUS_RQ_SUCCESS:
13530 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13531 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
13532 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13533 if (!dma_buf) {
b84daac9 13534 hrq->RQ_no_buf_found++;
4f774513
JS
13535 spin_unlock_irqrestore(&phba->hbalock, iflags);
13536 goto out;
13537 }
b84daac9 13538 hrq->RQ_rcv_buf++;
547077a4 13539 hrq->RQ_buf_posted--;
4d9ab994 13540 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd 13541
e817e5d7
JS
13542 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13543
13544 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13545 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13546 spin_unlock_irqrestore(&phba->hbalock, iflags);
13547 /* Handle MDS Loopback frames */
13548 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13549 break;
13550 }
13551
13552 /* save off the frame for the work thread to process */
4d9ab994 13553 list_add_tail(&dma_buf->cq_event.list,
45ed1190 13554 &phba->sli4_hba.sp_queue_event);
4f774513 13555 /* Frame received */
45ed1190 13556 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
13557 spin_unlock_irqrestore(&phba->hbalock, iflags);
13558 workposted = true;
13559 break;
4f774513 13560 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13561 if (phba->nvmet_support) {
13562 tgtp = phba->targetport->private;
13563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13564 "6402 RQE Error x%x, posted %d err_cnt "
13565 "%d: %x %x %x\n",
13566 status, hrq->RQ_buf_posted,
13567 hrq->RQ_no_posted_buf,
13568 atomic_read(&tgtp->rcv_fcp_cmd_in),
13569 atomic_read(&tgtp->rcv_fcp_cmd_out),
13570 atomic_read(&tgtp->xmt_fcp_release));
13571 }
13572 /* fallthrough */
13573
13574 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 13575 hrq->RQ_no_posted_buf++;
4f774513
JS
13576 /* Post more buffers if possible */
13577 spin_lock_irqsave(&phba->hbalock, iflags);
13578 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13579 spin_unlock_irqrestore(&phba->hbalock, iflags);
13580 workposted = true;
13581 break;
13582 }
13583out:
13584 return workposted;
4f774513
JS
13585}
13586
4d9ab994
JS
13587/**
13588 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13589 * @phba: Pointer to HBA context object.
13590 * @cq: Pointer to the completion queue.
13591 * @wcqe: Pointer to a completion queue entry.
13592 *
25985edc 13593 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
13594 * entry.
13595 *
13596 * Return: true if work posted to worker thread, otherwise false.
13597 **/
13598static bool
13599lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13600 struct lpfc_cqe *cqe)
13601{
45ed1190 13602 struct lpfc_cqe cqevt;
4d9ab994
JS
13603 bool workposted = false;
13604
13605 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13606 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
13607
13608 /* Check and process for different type of WCQE and dispatch */
45ed1190 13609 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 13610 case CQE_CODE_COMPL_WQE:
45ed1190 13611 /* Process the WQ/RQ complete event */
bc73905a 13612 phba->last_completion_time = jiffies;
2a76a283 13613 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 13614 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
13615 break;
13616 case CQE_CODE_RELEASE_WQE:
13617 /* Process the WQ release event */
13618 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 13619 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
13620 break;
13621 case CQE_CODE_XRI_ABORTED:
13622 /* Process the WQ XRI abort event */
bc73905a 13623 phba->last_completion_time = jiffies;
4d9ab994 13624 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 13625 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
13626 break;
13627 case CQE_CODE_RECEIVE:
7851fe2c 13628 case CQE_CODE_RECEIVE_V1:
4d9ab994 13629 /* Process the RQ event */
bc73905a 13630 phba->last_completion_time = jiffies;
4d9ab994 13631 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 13632 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
13633 break;
13634 default:
13635 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13636 "0388 Not a valid WCQE code: x%x\n",
45ed1190 13637 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
13638 break;
13639 }
13640 return workposted;
13641}
13642
4f774513
JS
13643/**
13644 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13645 * @phba: Pointer to HBA context object.
13646 * @eqe: Pointer to fast-path event queue entry.
13647 *
13648 * This routine process a event queue entry from the slow-path event queue.
13649 * It will check the MajorCode and MinorCode to determine this is for a
13650 * completion event on a completion queue, if not, an error shall be logged
13651 * and just return. Otherwise, it will get to the corresponding completion
13652 * queue and process all the entries on that completion queue, rearm the
13653 * completion queue, and then return.
13654 *
13655 **/
f485c18d 13656static void
67d12733
JS
13657lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13658 struct lpfc_queue *speq)
4f774513 13659{
67d12733 13660 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
13661 uint16_t cqid;
13662
4f774513 13663 /* Get the reference to the corresponding CQ */
cb5172ea 13664 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 13665
4f774513
JS
13666 list_for_each_entry(childq, &speq->child_list, list) {
13667 if (childq->queue_id == cqid) {
13668 cq = childq;
13669 break;
13670 }
13671 }
13672 if (unlikely(!cq)) {
75baf696
JS
13673 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13675 "0365 Slow-path CQ identifier "
13676 "(%d) does not exist\n", cqid);
f485c18d 13677 return;
4f774513
JS
13678 }
13679
895427bd
JS
13680 /* Save EQ associated with this CQ */
13681 cq->assoc_qp = speq;
13682
f485c18d
DK
13683 if (!queue_work(phba->wq, &cq->spwork))
13684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13685 "0390 Cannot schedule soft IRQ "
13686 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13687 cqid, cq->queue_id, smp_processor_id());
13688}
13689
13690/**
13691 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13692 * @phba: Pointer to HBA context object.
13693 *
13694 * This routine process a event queue entry from the slow-path event queue.
13695 * It will check the MajorCode and MinorCode to determine this is for a
13696 * completion event on a completion queue, if not, an error shall be logged
13697 * and just return. Otherwise, it will get to the corresponding completion
13698 * queue and process all the entries on that completion queue, rearm the
13699 * completion queue, and then return.
13700 *
13701 **/
13702static void
13703lpfc_sli4_sp_process_cq(struct work_struct *work)
13704{
13705 struct lpfc_queue *cq =
13706 container_of(work, struct lpfc_queue, spwork);
13707 struct lpfc_hba *phba = cq->phba;
13708 struct lpfc_cqe *cqe;
13709 bool workposted = false;
13710 int ccount = 0;
13711
4f774513
JS
13712 /* Process all the entries to the CQ */
13713 switch (cq->type) {
13714 case LPFC_MCQ:
13715 while ((cqe = lpfc_sli4_cq_get(cq))) {
13716 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
f485c18d 13717 if (!(++ccount % cq->entry_repost))
7869da18 13718 break;
b84daac9 13719 cq->CQ_mbox++;
4f774513
JS
13720 }
13721 break;
13722 case LPFC_WCQ:
13723 while ((cqe = lpfc_sli4_cq_get(cq))) {
c8a4ce0b
DK
13724 if (cq->subtype == LPFC_FCP ||
13725 cq->subtype == LPFC_NVME) {
13726#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13727 if (phba->ktime_on)
13728 cq->isr_timestamp = ktime_get_ns();
13729 else
13730 cq->isr_timestamp = 0;
13731#endif
895427bd 13732 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
0558056c 13733 cqe);
c8a4ce0b 13734 } else {
0558056c
JS
13735 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13736 cqe);
c8a4ce0b 13737 }
f485c18d 13738 if (!(++ccount % cq->entry_repost))
7869da18 13739 break;
4f774513 13740 }
b84daac9
JS
13741
13742 /* Track the max number of CQEs processed in 1 EQ */
f485c18d
DK
13743 if (ccount > cq->CQ_max_cqe)
13744 cq->CQ_max_cqe = ccount;
4f774513
JS
13745 break;
13746 default:
13747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13748 "0370 Invalid completion queue type (%d)\n",
13749 cq->type);
f485c18d 13750 return;
4f774513
JS
13751 }
13752
13753 /* Catch the no cq entry condition, log an error */
f485c18d 13754 if (unlikely(ccount == 0))
4f774513
JS
13755 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13756 "0371 No entry from the CQ: identifier "
13757 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13758
13759 /* In any case, flash and re-arm the RCQ */
b71413dd 13760 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
4f774513
JS
13761
13762 /* wake up worker thread if there are works to be done */
13763 if (workposted)
13764 lpfc_worker_wake_up(phba);
13765}
13766
13767/**
13768 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13769 * @phba: Pointer to HBA context object.
13770 * @cq: Pointer to associated CQ
13771 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13772 *
13773 * This routine process a fast-path work queue completion entry from fast-path
13774 * event queue for FCP command response completion.
13775 **/
13776static void
2a76a283 13777lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13778 struct lpfc_wcqe_complete *wcqe)
13779{
2a76a283 13780 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13781 struct lpfc_iocbq *cmdiocbq;
13782 struct lpfc_iocbq irspiocbq;
13783 unsigned long iflags;
13784
4f774513
JS
13785 /* Check for response status */
13786 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13787 /* If resource errors reported from HBA, reduce queue
13788 * depth of the SCSI device.
13789 */
e3d2b802
JS
13790 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13791 IOSTAT_LOCAL_REJECT)) &&
13792 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13793 IOERR_NO_RESOURCES))
4f774513 13794 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13795
4f774513 13796 /* Log the error status */
11f0e34f
JS
13797 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13798 "0373 FCP CQE error: status=x%x: "
13799 "CQE: %08x %08x %08x %08x\n",
4f774513 13800 bf_get(lpfc_wcqe_c_status, wcqe),
11f0e34f
JS
13801 wcqe->word0, wcqe->total_data_placed,
13802 wcqe->parameter, wcqe->word3);
4f774513
JS
13803 }
13804
13805 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13806 spin_lock_irqsave(&pring->ring_lock, iflags);
13807 pring->stats.iocb_event++;
4f774513
JS
13808 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13809 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 13810 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13811 if (unlikely(!cmdiocbq)) {
13812 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13813 "0374 FCP complete with no corresponding "
13814 "cmdiocb: iotag (%d)\n",
13815 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13816 return;
13817 }
c8a4ce0b
DK
13818#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13819 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13820#endif
895427bd
JS
13821 if (cmdiocbq->iocb_cmpl == NULL) {
13822 if (cmdiocbq->wqe_cmpl) {
13823 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13824 spin_lock_irqsave(&phba->hbalock, iflags);
13825 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13826 spin_unlock_irqrestore(&phba->hbalock, iflags);
13827 }
13828
13829 /* Pass the cmd_iocb and the wcqe to the upper layer */
13830 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13831 return;
13832 }
4f774513
JS
13833 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13834 "0375 FCP cmdiocb not callback function "
13835 "iotag: (%d)\n",
13836 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13837 return;
13838 }
13839
13840 /* Fake the irspiocb and copy necessary response information */
341af102 13841 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13842
0f65ff68
JS
13843 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13844 spin_lock_irqsave(&phba->hbalock, iflags);
13845 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13846 spin_unlock_irqrestore(&phba->hbalock, iflags);
13847 }
13848
4f774513
JS
13849 /* Pass the cmd_iocb and the rsp state to the upper layer */
13850 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13851}
13852
13853/**
13854 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13855 * @phba: Pointer to HBA context object.
13856 * @cq: Pointer to completion queue.
13857 * @wcqe: Pointer to work-queue completion queue entry.
13858 *
3f8b6fb7 13859 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
13860 * proper WQ release routine to the slow-path WQ.
13861 **/
13862static void
13863lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13864 struct lpfc_wcqe_release *wcqe)
13865{
13866 struct lpfc_queue *childwq;
13867 bool wqid_matched = false;
895427bd 13868 uint16_t hba_wqid;
4f774513
JS
13869
13870 /* Check for fast-path FCP work queue release */
895427bd 13871 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 13872 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 13873 if (childwq->queue_id == hba_wqid) {
4f774513
JS
13874 lpfc_sli4_wq_release(childwq,
13875 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
6e8e1c14
JS
13876 if (childwq->q_flag & HBA_NVMET_WQFULL)
13877 lpfc_nvmet_wqfull_process(phba, childwq);
4f774513
JS
13878 wqid_matched = true;
13879 break;
13880 }
13881 }
13882 /* Report warning log message if no match found */
13883 if (wqid_matched != true)
13884 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13885 "2580 Fast-path wqe consume event carries "
895427bd 13886 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
13887}
13888
13889/**
2d7dbc4c
JS
13890 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13891 * @phba: Pointer to HBA context object.
13892 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 13893 *
2d7dbc4c
JS
13894 * This routine process a receive-queue completion queue entry.
13895 *
13896 * Return: true if work posted to worker thread, otherwise false.
13897 **/
13898static bool
13899lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13900 struct lpfc_rcqe *rcqe)
13901{
13902 bool workposted = false;
13903 struct lpfc_queue *hrq;
13904 struct lpfc_queue *drq;
13905 struct rqb_dmabuf *dma_buf;
13906 struct fc_frame_header *fc_hdr;
547077a4 13907 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
13908 uint32_t status, rq_id;
13909 unsigned long iflags;
13910 uint32_t fctl, idx;
13911
13912 if ((phba->nvmet_support == 0) ||
13913 (phba->sli4_hba.nvmet_cqset == NULL))
13914 return workposted;
13915
13916 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13917 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13918 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13919
13920 /* sanity check on queue memory */
13921 if (unlikely(!hrq) || unlikely(!drq))
13922 return workposted;
13923
13924 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13925 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13926 else
13927 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13928
13929 if ((phba->nvmet_support == 0) ||
13930 (rq_id != hrq->queue_id))
13931 return workposted;
13932
13933 status = bf_get(lpfc_rcqe_status, rcqe);
13934 switch (status) {
13935 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13937 "6126 Receive Frame Truncated!!\n");
78e1d200 13938 /* Drop thru */
2d7dbc4c 13939 case FC_STATUS_RQ_SUCCESS:
2d7dbc4c 13940 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13941 lpfc_sli4_rq_release(hrq, drq);
2d7dbc4c
JS
13942 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13943 if (!dma_buf) {
13944 hrq->RQ_no_buf_found++;
13945 spin_unlock_irqrestore(&phba->hbalock, iflags);
13946 goto out;
13947 }
13948 spin_unlock_irqrestore(&phba->hbalock, iflags);
13949 hrq->RQ_rcv_buf++;
547077a4 13950 hrq->RQ_buf_posted--;
2d7dbc4c
JS
13951 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13952
13953 /* Just some basic sanity checks on FCP Command frame */
13954 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13955 fc_hdr->fh_f_ctl[1] << 8 |
13956 fc_hdr->fh_f_ctl[2]);
13957 if (((fctl &
13958 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13959 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13960 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13961 goto drop;
13962
13963 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13964 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7 13965 lpfc_nvmet_unsol_fcp_event(
66d7ce93 13966 phba, idx, dma_buf,
c8a4ce0b 13967 cq->isr_timestamp);
2d7dbc4c
JS
13968 return false;
13969 }
13970drop:
13971 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13972 break;
2d7dbc4c 13973 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13974 if (phba->nvmet_support) {
13975 tgtp = phba->targetport->private;
13976 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13977 "6401 RQE Error x%x, posted %d err_cnt "
13978 "%d: %x %x %x\n",
13979 status, hrq->RQ_buf_posted,
13980 hrq->RQ_no_posted_buf,
13981 atomic_read(&tgtp->rcv_fcp_cmd_in),
13982 atomic_read(&tgtp->rcv_fcp_cmd_out),
13983 atomic_read(&tgtp->xmt_fcp_release));
13984 }
13985 /* fallthrough */
13986
13987 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
13988 hrq->RQ_no_posted_buf++;
13989 /* Post more buffers if possible */
2d7dbc4c
JS
13990 break;
13991 }
13992out:
13993 return workposted;
13994}
13995
4f774513 13996/**
895427bd 13997 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
4f774513
JS
13998 * @cq: Pointer to the completion queue.
13999 * @eqe: Pointer to fast-path completion queue entry.
14000 *
14001 * This routine process a fast-path work queue completion entry from fast-path
14002 * event queue for FCP command response completion.
14003 **/
14004static int
895427bd 14005lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
14006 struct lpfc_cqe *cqe)
14007{
14008 struct lpfc_wcqe_release wcqe;
14009 bool workposted = false;
14010
14011 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 14012 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
4f774513
JS
14013
14014 /* Check and process for different type of WCQE and dispatch */
14015 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14016 case CQE_CODE_COMPL_WQE:
895427bd 14017 case CQE_CODE_NVME_ERSP:
b84daac9 14018 cq->CQ_wq++;
4f774513 14019 /* Process the WQ complete event */
98fc5dd9 14020 phba->last_completion_time = jiffies;
895427bd
JS
14021 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14022 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14023 (struct lpfc_wcqe_complete *)&wcqe);
14024 if (cq->subtype == LPFC_NVME_LS)
14025 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
14026 (struct lpfc_wcqe_complete *)&wcqe);
14027 break;
14028 case CQE_CODE_RELEASE_WQE:
b84daac9 14029 cq->CQ_release_wqe++;
4f774513
JS
14030 /* Process the WQ release event */
14031 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14032 (struct lpfc_wcqe_release *)&wcqe);
14033 break;
14034 case CQE_CODE_XRI_ABORTED:
b84daac9 14035 cq->CQ_xri_aborted++;
4f774513 14036 /* Process the WQ XRI abort event */
bc73905a 14037 phba->last_completion_time = jiffies;
4f774513
JS
14038 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14039 (struct sli4_wcqe_xri_aborted *)&wcqe);
14040 break;
895427bd
JS
14041 case CQE_CODE_RECEIVE_V1:
14042 case CQE_CODE_RECEIVE:
14043 phba->last_completion_time = jiffies;
2d7dbc4c
JS
14044 if (cq->subtype == LPFC_NVMET) {
14045 workposted = lpfc_sli4_nvmet_handle_rcqe(
14046 phba, cq, (struct lpfc_rcqe *)&wcqe);
14047 }
895427bd 14048 break;
4f774513
JS
14049 default:
14050 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 14051 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
14052 bf_get(lpfc_wcqe_c_code, &wcqe));
14053 break;
14054 }
14055 return workposted;
14056}
14057
14058/**
67d12733 14059 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
14060 * @phba: Pointer to HBA context object.
14061 * @eqe: Pointer to fast-path event queue entry.
14062 *
14063 * This routine process a event queue entry from the fast-path event queue.
14064 * It will check the MajorCode and MinorCode to determine this is for a
14065 * completion event on a completion queue, if not, an error shall be logged
14066 * and just return. Otherwise, it will get to the corresponding completion
14067 * queue and process all the entries on the completion queue, rearm the
14068 * completion queue, and then return.
14069 **/
f485c18d 14070static void
67d12733
JS
14071lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14072 uint32_t qidx)
4f774513 14073{
895427bd 14074 struct lpfc_queue *cq = NULL;
2d7dbc4c 14075 uint16_t cqid, id;
4f774513 14076
cb5172ea 14077 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 14078 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 14079 "0366 Not a valid completion "
4f774513 14080 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
14081 bf_get_le32(lpfc_eqe_major_code, eqe),
14082 bf_get_le32(lpfc_eqe_minor_code, eqe));
f485c18d 14083 return;
4f774513
JS
14084 }
14085
67d12733
JS
14086 /* Get the reference to the corresponding CQ */
14087 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14088
2d7dbc4c
JS
14089 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14090 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14091 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14092 /* Process NVMET unsol rcv */
14093 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14094 goto process_cq;
14095 }
67d12733
JS
14096 }
14097
895427bd
JS
14098 if (phba->sli4_hba.nvme_cq_map &&
14099 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
f358dd0c 14100 /* Process NVME / NVMET command completion */
895427bd
JS
14101 cq = phba->sli4_hba.nvme_cq[qidx];
14102 goto process_cq;
2e90f4b5 14103 }
67d12733 14104
895427bd
JS
14105 if (phba->sli4_hba.fcp_cq_map &&
14106 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
14107 /* Process FCP command completion */
14108 cq = phba->sli4_hba.fcp_cq[qidx];
14109 goto process_cq;
2e90f4b5 14110 }
895427bd
JS
14111
14112 if (phba->sli4_hba.nvmels_cq &&
14113 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14114 /* Process NVME unsol rcv */
14115 cq = phba->sli4_hba.nvmels_cq;
14116 }
14117
14118 /* Otherwise this is a Slow path event */
14119 if (cq == NULL) {
f485c18d
DK
14120 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
14121 return;
4f774513
JS
14122 }
14123
895427bd 14124process_cq:
4f774513
JS
14125 if (unlikely(cqid != cq->queue_id)) {
14126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14127 "0368 Miss-matched fast-path completion "
14128 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14129 cqid, cq->queue_id);
f485c18d 14130 return;
4f774513
JS
14131 }
14132
895427bd
JS
14133 /* Save EQ associated with this CQ */
14134 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
14135
f485c18d
DK
14136 if (!queue_work(phba->wq, &cq->irqwork))
14137 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14138 "0363 Cannot schedule soft IRQ "
14139 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14140 cqid, cq->queue_id, smp_processor_id());
14141}
14142
14143/**
14144 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14145 * @phba: Pointer to HBA context object.
14146 * @eqe: Pointer to fast-path event queue entry.
14147 *
14148 * This routine process a event queue entry from the fast-path event queue.
14149 * It will check the MajorCode and MinorCode to determine this is for a
14150 * completion event on a completion queue, if not, an error shall be logged
14151 * and just return. Otherwise, it will get to the corresponding completion
14152 * queue and process all the entries on the completion queue, rearm the
14153 * completion queue, and then return.
14154 **/
14155static void
14156lpfc_sli4_hba_process_cq(struct work_struct *work)
14157{
14158 struct lpfc_queue *cq =
14159 container_of(work, struct lpfc_queue, irqwork);
14160 struct lpfc_hba *phba = cq->phba;
14161 struct lpfc_cqe *cqe;
14162 bool workposted = false;
14163 int ccount = 0;
14164
4f774513
JS
14165 /* Process all the entries to the CQ */
14166 while ((cqe = lpfc_sli4_cq_get(cq))) {
c8a4ce0b
DK
14167#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14168 if (phba->ktime_on)
14169 cq->isr_timestamp = ktime_get_ns();
14170 else
14171 cq->isr_timestamp = 0;
14172#endif
895427bd 14173 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
f485c18d 14174 if (!(++ccount % cq->entry_repost))
7869da18 14175 break;
4f774513
JS
14176 }
14177
b84daac9 14178 /* Track the max number of CQEs processed in 1 EQ */
f485c18d
DK
14179 if (ccount > cq->CQ_max_cqe)
14180 cq->CQ_max_cqe = ccount;
14181 cq->assoc_qp->EQ_cqe_cnt += ccount;
b84daac9 14182
4f774513 14183 /* Catch the no cq entry condition */
f485c18d 14184 if (unlikely(ccount == 0))
4f774513
JS
14185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14186 "0369 No entry from fast-path completion "
14187 "queue fcpcqid=%d\n", cq->queue_id);
14188
14189 /* In any case, flash and re-arm the CQ */
b71413dd 14190 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
4f774513
JS
14191
14192 /* wake up worker thread if there are works to be done */
14193 if (workposted)
14194 lpfc_worker_wake_up(phba);
14195}
14196
14197static void
14198lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
14199{
14200 struct lpfc_eqe *eqe;
14201
14202 /* walk all the EQ entries and drop on the floor */
14203 while ((eqe = lpfc_sli4_eq_get(eq)))
14204 ;
14205
14206 /* Clear and re-arm the EQ */
b71413dd 14207 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
4f774513
JS
14208}
14209
1ba981fd
JS
14210
14211/**
14212 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
14213 * entry
14214 * @phba: Pointer to HBA context object.
14215 * @eqe: Pointer to fast-path event queue entry.
14216 *
14217 * This routine process a event queue entry from the Flash Optimized Fabric
14218 * event queue. It will check the MajorCode and MinorCode to determine this
14219 * is for a completion event on a completion queue, if not, an error shall be
14220 * logged and just return. Otherwise, it will get to the corresponding
14221 * completion queue and process all the entries on the completion queue, rearm
14222 * the completion queue, and then return.
14223 **/
14224static void
14225lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
14226{
14227 struct lpfc_queue *cq;
1ba981fd 14228 uint16_t cqid;
1ba981fd
JS
14229
14230 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14231 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14232 "9147 Not a valid completion "
14233 "event: majorcode=x%x, minorcode=x%x\n",
14234 bf_get_le32(lpfc_eqe_major_code, eqe),
14235 bf_get_le32(lpfc_eqe_minor_code, eqe));
14236 return;
14237 }
14238
14239 /* Get the reference to the corresponding CQ */
14240 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14241
14242 /* Next check for OAS */
14243 cq = phba->sli4_hba.oas_cq;
14244 if (unlikely(!cq)) {
14245 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14247 "9148 OAS completion queue "
14248 "does not exist\n");
14249 return;
14250 }
14251
14252 if (unlikely(cqid != cq->queue_id)) {
14253 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14254 "9149 Miss-matched fast-path compl "
14255 "queue id: eqcqid=%d, fcpcqid=%d\n",
14256 cqid, cq->queue_id);
14257 return;
14258 }
14259
d41b65bc
JS
14260 /* Save EQ associated with this CQ */
14261 cq->assoc_qp = phba->sli4_hba.fof_eq;
14262
f485c18d
DK
14263 /* CQ work will be processed on CPU affinitized to this IRQ */
14264 if (!queue_work(phba->wq, &cq->irqwork))
1ba981fd 14265 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
f485c18d
DK
14266 "0367 Cannot schedule soft IRQ "
14267 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14268 cqid, cq->queue_id, smp_processor_id());
1ba981fd
JS
14269}
14270
14271/**
14272 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
14273 * @irq: Interrupt number.
14274 * @dev_id: The device context pointer.
14275 *
14276 * This function is directly called from the PCI layer as an interrupt
14277 * service routine when device with SLI-4 interface spec is enabled with
14278 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
14279 * IOCB ring event in the HBA. However, when the device is enabled with either
14280 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14281 * device-level interrupt handler. When the PCI slot is in error recovery
14282 * or the HBA is undergoing initialization, the interrupt handler will not
14283 * process the interrupt. The Flash Optimized Fabric ring event are handled in
14284 * the intrrupt context. This function is called without any lock held.
14285 * It gets the hbalock to access and update SLI data structures. Note that,
14286 * the EQ to CQ are one-to-one map such that the EQ index is
14287 * equal to that of CQ index.
14288 *
14289 * This function returns IRQ_HANDLED when interrupt is handled else it
14290 * returns IRQ_NONE.
14291 **/
14292irqreturn_t
14293lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
14294{
14295 struct lpfc_hba *phba;
895427bd 14296 struct lpfc_hba_eq_hdl *hba_eq_hdl;
1ba981fd
JS
14297 struct lpfc_queue *eq;
14298 struct lpfc_eqe *eqe;
14299 unsigned long iflag;
14300 int ecount = 0;
1ba981fd
JS
14301
14302 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14303 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14304 phba = hba_eq_hdl->phba;
1ba981fd
JS
14305
14306 if (unlikely(!phba))
14307 return IRQ_NONE;
14308
14309 /* Get to the EQ struct associated with this vector */
14310 eq = phba->sli4_hba.fof_eq;
14311 if (unlikely(!eq))
14312 return IRQ_NONE;
14313
14314 /* Check device state for handling interrupt */
14315 if (unlikely(lpfc_intr_state_check(phba))) {
1ba981fd
JS
14316 /* Check again for link_state with lock held */
14317 spin_lock_irqsave(&phba->hbalock, iflag);
14318 if (phba->link_state < LPFC_LINK_DOWN)
14319 /* Flush, clear interrupt, and rearm the EQ */
14320 lpfc_sli4_eq_flush(phba, eq);
14321 spin_unlock_irqrestore(&phba->hbalock, iflag);
14322 return IRQ_NONE;
14323 }
14324
14325 /*
14326 * Process all the event on FCP fast-path EQ
14327 */
14328 while ((eqe = lpfc_sli4_eq_get(eq))) {
14329 lpfc_sli4_fof_handle_eqe(phba, eqe);
14330 if (!(++ecount % eq->entry_repost))
7869da18 14331 break;
1ba981fd
JS
14332 eq->EQ_processed++;
14333 }
14334
14335 /* Track the max number of EQEs processed in 1 intr */
14336 if (ecount > eq->EQ_max_eqe)
14337 eq->EQ_max_eqe = ecount;
14338
14339
14340 if (unlikely(ecount == 0)) {
14341 eq->EQ_no_entry++;
14342
14343 if (phba->intr_type == MSIX)
14344 /* MSI-X treated interrupt served as no EQ share INT */
14345 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14346 "9145 MSI-X interrupt with no EQE\n");
14347 else {
14348 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14349 "9146 ISR interrupt with no EQE\n");
14350 /* Non MSI-X treated on interrupt as EQ share INT */
14351 return IRQ_NONE;
14352 }
14353 }
14354 /* Always clear and re-arm the fast-path EQ */
b71413dd 14355 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
1ba981fd
JS
14356 return IRQ_HANDLED;
14357}
14358
4f774513 14359/**
67d12733 14360 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
14361 * @irq: Interrupt number.
14362 * @dev_id: The device context pointer.
14363 *
14364 * This function is directly called from the PCI layer as an interrupt
14365 * service routine when device with SLI-4 interface spec is enabled with
14366 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14367 * ring event in the HBA. However, when the device is enabled with either
14368 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14369 * device-level interrupt handler. When the PCI slot is in error recovery
14370 * or the HBA is undergoing initialization, the interrupt handler will not
14371 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14372 * the intrrupt context. This function is called without any lock held.
14373 * It gets the hbalock to access and update SLI data structures. Note that,
14374 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14375 * equal to that of FCP CQ index.
14376 *
67d12733
JS
14377 * The link attention and ELS ring attention events are handled
14378 * by the worker thread. The interrupt handler signals the worker thread
14379 * and returns for these events. This function is called without any lock
14380 * held. It gets the hbalock to access and update SLI data structures.
14381 *
4f774513
JS
14382 * This function returns IRQ_HANDLED when interrupt is handled else it
14383 * returns IRQ_NONE.
14384 **/
14385irqreturn_t
67d12733 14386lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
14387{
14388 struct lpfc_hba *phba;
895427bd 14389 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513
JS
14390 struct lpfc_queue *fpeq;
14391 struct lpfc_eqe *eqe;
14392 unsigned long iflag;
14393 int ecount = 0;
895427bd 14394 int hba_eqidx;
4f774513
JS
14395
14396 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14397 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14398 phba = hba_eq_hdl->phba;
14399 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
14400
14401 if (unlikely(!phba))
14402 return IRQ_NONE;
67d12733 14403 if (unlikely(!phba->sli4_hba.hba_eq))
5350d872 14404 return IRQ_NONE;
4f774513
JS
14405
14406 /* Get to the EQ struct associated with this vector */
895427bd 14407 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
2e90f4b5
JS
14408 if (unlikely(!fpeq))
14409 return IRQ_NONE;
4f774513 14410
ba20c853 14411 if (lpfc_fcp_look_ahead) {
895427bd 14412 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
b71413dd 14413 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
ba20c853 14414 else {
895427bd 14415 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
14416 return IRQ_NONE;
14417 }
14418 }
14419
4f774513
JS
14420 /* Check device state for handling interrupt */
14421 if (unlikely(lpfc_intr_state_check(phba))) {
14422 /* Check again for link_state with lock held */
14423 spin_lock_irqsave(&phba->hbalock, iflag);
14424 if (phba->link_state < LPFC_LINK_DOWN)
14425 /* Flush, clear interrupt, and rearm the EQ */
14426 lpfc_sli4_eq_flush(phba, fpeq);
14427 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853 14428 if (lpfc_fcp_look_ahead)
895427bd 14429 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
4f774513
JS
14430 return IRQ_NONE;
14431 }
14432
14433 /*
14434 * Process all the event on FCP fast-path EQ
14435 */
14436 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
f485c18d
DK
14437 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14438 if (!(++ecount % fpeq->entry_repost))
7869da18 14439 break;
b84daac9 14440 fpeq->EQ_processed++;
4f774513
JS
14441 }
14442
b84daac9
JS
14443 /* Track the max number of EQEs processed in 1 intr */
14444 if (ecount > fpeq->EQ_max_eqe)
14445 fpeq->EQ_max_eqe = ecount;
14446
4f774513 14447 /* Always clear and re-arm the fast-path EQ */
b71413dd 14448 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
4f774513
JS
14449
14450 if (unlikely(ecount == 0)) {
b84daac9 14451 fpeq->EQ_no_entry++;
ba20c853
JS
14452
14453 if (lpfc_fcp_look_ahead) {
895427bd 14454 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
14455 return IRQ_NONE;
14456 }
14457
4f774513
JS
14458 if (phba->intr_type == MSIX)
14459 /* MSI-X treated interrupt served as no EQ share INT */
14460 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14461 "0358 MSI-X interrupt with no EQE\n");
14462 else
14463 /* Non MSI-X treated on interrupt as EQ share INT */
14464 return IRQ_NONE;
14465 }
14466
ba20c853 14467 if (lpfc_fcp_look_ahead)
895427bd
JS
14468 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14469
4f774513
JS
14470 return IRQ_HANDLED;
14471} /* lpfc_sli4_fp_intr_handler */
14472
14473/**
14474 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14475 * @irq: Interrupt number.
14476 * @dev_id: The device context pointer.
14477 *
14478 * This function is the device-level interrupt handler to device with SLI-4
14479 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14480 * interrupt mode is enabled and there is an event in the HBA which requires
14481 * driver attention. This function invokes the slow-path interrupt attention
14482 * handling function and fast-path interrupt attention handling function in
14483 * turn to process the relevant HBA attention events. This function is called
14484 * without any lock held. It gets the hbalock to access and update SLI data
14485 * structures.
14486 *
14487 * This function returns IRQ_HANDLED when interrupt is handled, else it
14488 * returns IRQ_NONE.
14489 **/
14490irqreturn_t
14491lpfc_sli4_intr_handler(int irq, void *dev_id)
14492{
14493 struct lpfc_hba *phba;
67d12733
JS
14494 irqreturn_t hba_irq_rc;
14495 bool hba_handled = false;
895427bd 14496 int qidx;
4f774513
JS
14497
14498 /* Get the driver's phba structure from the dev_id */
14499 phba = (struct lpfc_hba *)dev_id;
14500
14501 if (unlikely(!phba))
14502 return IRQ_NONE;
14503
4f774513
JS
14504 /*
14505 * Invoke fast-path host attention interrupt handling as appropriate.
14506 */
895427bd 14507 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
67d12733 14508 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 14509 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733
JS
14510 if (hba_irq_rc == IRQ_HANDLED)
14511 hba_handled |= true;
4f774513
JS
14512 }
14513
1ba981fd
JS
14514 if (phba->cfg_fof) {
14515 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
895427bd 14516 &phba->sli4_hba.hba_eq_hdl[qidx]);
1ba981fd
JS
14517 if (hba_irq_rc == IRQ_HANDLED)
14518 hba_handled |= true;
14519 }
14520
67d12733 14521 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
14522} /* lpfc_sli4_intr_handler */
14523
14524/**
14525 * lpfc_sli4_queue_free - free a queue structure and associated memory
14526 * @queue: The queue structure to free.
14527 *
b595076a 14528 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
14529 * the host resident queue. This function must be called after destroying the
14530 * queue on the HBA.
14531 **/
14532void
14533lpfc_sli4_queue_free(struct lpfc_queue *queue)
14534{
14535 struct lpfc_dmabuf *dmabuf;
14536
14537 if (!queue)
14538 return;
14539
14540 while (!list_empty(&queue->page_list)) {
14541 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14542 list);
81b96eda 14543 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
4f774513
JS
14544 dmabuf->virt, dmabuf->phys);
14545 kfree(dmabuf);
14546 }
895427bd
JS
14547 if (queue->rqbp) {
14548 lpfc_free_rq_buffer(queue->phba, queue);
14549 kfree(queue->rqbp);
14550 }
d1f525aa
JS
14551
14552 if (!list_empty(&queue->wq_list))
14553 list_del(&queue->wq_list);
14554
4f774513
JS
14555 kfree(queue);
14556 return;
14557}
14558
14559/**
14560 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14561 * @phba: The HBA that this queue is being created on.
81b96eda 14562 * @page_size: The size of a queue page
4f774513
JS
14563 * @entry_size: The size of each queue entry for this queue.
14564 * @entry count: The number of entries that this queue will handle.
14565 *
14566 * This function allocates a queue structure and the DMAable memory used for
14567 * the host resident queue. This function must be called before creating the
14568 * queue on the HBA.
14569 **/
14570struct lpfc_queue *
81b96eda
JS
14571lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14572 uint32_t entry_size, uint32_t entry_count)
4f774513
JS
14573{
14574 struct lpfc_queue *queue;
14575 struct lpfc_dmabuf *dmabuf;
14576 int x, total_qe_count;
14577 void *dma_pointer;
cb5172ea 14578 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 14579
cb5172ea 14580 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 14581 hw_page_size = page_size;
cb5172ea 14582
4f774513
JS
14583 queue = kzalloc(sizeof(struct lpfc_queue) +
14584 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14585 if (!queue)
14586 return NULL;
cb5172ea
JS
14587 queue->page_count = (ALIGN(entry_size * entry_count,
14588 hw_page_size))/hw_page_size;
895427bd
JS
14589
14590 /* If needed, Adjust page count to match the max the adapter supports */
4e87eb2f
EM
14591 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14592 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
895427bd
JS
14593 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14594
4f774513 14595 INIT_LIST_HEAD(&queue->list);
895427bd 14596 INIT_LIST_HEAD(&queue->wq_list);
6e8e1c14 14597 INIT_LIST_HEAD(&queue->wqfull_list);
4f774513
JS
14598 INIT_LIST_HEAD(&queue->page_list);
14599 INIT_LIST_HEAD(&queue->child_list);
81b96eda
JS
14600
14601 /* Set queue parameters now. If the system cannot provide memory
14602 * resources, the free routine needs to know what was allocated.
14603 */
14604 queue->entry_size = entry_size;
14605 queue->entry_count = entry_count;
14606 queue->page_size = hw_page_size;
14607 queue->phba = phba;
14608
4f774513
JS
14609 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14610 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14611 if (!dmabuf)
14612 goto out_fail;
1aee383d
JP
14613 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14614 hw_page_size, &dmabuf->phys,
14615 GFP_KERNEL);
4f774513
JS
14616 if (!dmabuf->virt) {
14617 kfree(dmabuf);
14618 goto out_fail;
14619 }
14620 dmabuf->buffer_tag = x;
14621 list_add_tail(&dmabuf->list, &queue->page_list);
14622 /* initialize queue's entry array */
14623 dma_pointer = dmabuf->virt;
14624 for (; total_qe_count < entry_count &&
cb5172ea 14625 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
14626 total_qe_count++, dma_pointer += entry_size) {
14627 queue->qe[total_qe_count].address = dma_pointer;
14628 }
14629 }
f485c18d
DK
14630 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14631 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
4f774513 14632
64eb4dcb
JS
14633 /* entry_repost will be set during q creation */
14634
4f774513
JS
14635 return queue;
14636out_fail:
14637 lpfc_sli4_queue_free(queue);
14638 return NULL;
14639}
14640
962bc51b
JS
14641/**
14642 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14643 * @phba: HBA structure that indicates port to create a queue on.
14644 * @pci_barset: PCI BAR set flag.
14645 *
14646 * This function shall perform iomap of the specified PCI BAR address to host
14647 * memory address if not already done so and return it. The returned host
14648 * memory address can be NULL.
14649 */
14650static void __iomem *
14651lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14652{
962bc51b
JS
14653 if (!phba->pcidev)
14654 return NULL;
962bc51b
JS
14655
14656 switch (pci_barset) {
14657 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
14658 return phba->pci_bar0_memmap_p;
14659 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
14660 return phba->pci_bar2_memmap_p;
14661 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
14662 return phba->pci_bar4_memmap_p;
14663 default:
14664 break;
14665 }
14666 return NULL;
14667}
14668
173edbb2 14669/**
895427bd 14670 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
173edbb2
JS
14671 * @phba: HBA structure that indicates port to create a queue on.
14672 * @startq: The starting FCP EQ to modify
14673 *
14674 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
43140ca6
JS
14675 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14676 * updated in one mailbox command.
173edbb2
JS
14677 *
14678 * The @phba struct is used to send mailbox command to HBA. The @startq
14679 * is used to get the starting FCP EQ to change.
14680 * This function is asynchronous and will wait for the mailbox
14681 * command to finish before continuing.
14682 *
14683 * On success this function will return a zero. If unable to allocate enough
14684 * memory this function will return -ENOMEM. If the queue create mailbox command
14685 * fails this function will return -ENXIO.
14686 **/
a2fc4aef 14687int
0cf07f84
JS
14688lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14689 uint32_t numq, uint32_t imax)
173edbb2
JS
14690{
14691 struct lpfc_mbx_modify_eq_delay *eq_delay;
14692 LPFC_MBOXQ_t *mbox;
14693 struct lpfc_queue *eq;
14694 int cnt, rc, length, status = 0;
14695 uint32_t shdr_status, shdr_add_status;
0cf07f84 14696 uint32_t result, val;
895427bd 14697 int qidx;
173edbb2
JS
14698 union lpfc_sli4_cfg_shdr *shdr;
14699 uint16_t dmult;
14700
895427bd 14701 if (startq >= phba->io_channel_irqs)
173edbb2
JS
14702 return 0;
14703
14704 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14705 if (!mbox)
14706 return -ENOMEM;
14707 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14708 sizeof(struct lpfc_sli4_cfg_mhdr));
14709 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14710 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14711 length, LPFC_SLI4_MBX_EMBED);
14712 eq_delay = &mbox->u.mqe.un.eq_delay;
14713
14714 /* Calculate delay multiper from maximum interrupt per second */
0cf07f84 14715 result = imax / phba->io_channel_irqs;
895427bd 14716 if (result > LPFC_DMULT_CONST || result == 0)
ee02006b
JS
14717 dmult = 0;
14718 else
14719 dmult = LPFC_DMULT_CONST/result - 1;
0cf07f84
JS
14720 if (dmult > LPFC_DMULT_MAX)
14721 dmult = LPFC_DMULT_MAX;
173edbb2
JS
14722
14723 cnt = 0;
895427bd
JS
14724 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14725 eq = phba->sli4_hba.hba_eq[qidx];
173edbb2
JS
14726 if (!eq)
14727 continue;
0cf07f84 14728 eq->q_mode = imax;
173edbb2
JS
14729 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14730 eq_delay->u.request.eq[cnt].phase = 0;
14731 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14732 cnt++;
0cf07f84
JS
14733
14734 /* q_mode is only used for auto_imax */
14735 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14736 /* Use EQ Delay Register method for q_mode */
14737
14738 /* Convert for EQ Delay register */
14739 val = phba->cfg_fcp_imax;
14740 if (val) {
14741 /* First, interrupts per sec per EQ */
14742 val = phba->cfg_fcp_imax /
14743 phba->io_channel_irqs;
14744
14745 /* us delay between each interrupt */
14746 val = LPFC_SEC_TO_USEC / val;
14747 }
14748 eq->q_mode = val;
14749 } else {
14750 eq->q_mode = imax;
14751 }
14752
14753 if (cnt >= numq)
173edbb2
JS
14754 break;
14755 }
14756 eq_delay->u.request.num_eq = cnt;
14757
14758 mbox->vport = phba->pport;
14759 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14760 mbox->ctx_buf = NULL;
14761 mbox->ctx_ndlp = NULL;
173edbb2
JS
14762 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14763 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14764 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14765 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14766 if (shdr_status || shdr_add_status || rc) {
14767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14768 "2512 MODIFY_EQ_DELAY mailbox failed with "
14769 "status x%x add_status x%x, mbx status x%x\n",
14770 shdr_status, shdr_add_status, rc);
14771 status = -ENXIO;
14772 }
14773 mempool_free(mbox, phba->mbox_mem_pool);
14774 return status;
14775}
14776
4f774513
JS
14777/**
14778 * lpfc_eq_create - Create an Event Queue on the HBA
14779 * @phba: HBA structure that indicates port to create a queue on.
14780 * @eq: The queue structure to use to create the event queue.
14781 * @imax: The maximum interrupt per second limit.
14782 *
14783 * This function creates an event queue, as detailed in @eq, on a port,
14784 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14785 *
14786 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14787 * is used to get the entry count and entry size that are necessary to
14788 * determine the number of pages to allocate and use for this queue. This
14789 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14790 * event queue. This function is asynchronous and will wait for the mailbox
14791 * command to finish before continuing.
14792 *
14793 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14794 * memory this function will return -ENOMEM. If the queue create mailbox command
14795 * fails this function will return -ENXIO.
4f774513 14796 **/
a2fc4aef 14797int
ee02006b 14798lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14799{
14800 struct lpfc_mbx_eq_create *eq_create;
14801 LPFC_MBOXQ_t *mbox;
14802 int rc, length, status = 0;
14803 struct lpfc_dmabuf *dmabuf;
14804 uint32_t shdr_status, shdr_add_status;
14805 union lpfc_sli4_cfg_shdr *shdr;
14806 uint16_t dmult;
49198b37
JS
14807 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14808
2e90f4b5
JS
14809 /* sanity check on queue memory */
14810 if (!eq)
14811 return -ENODEV;
49198b37
JS
14812 if (!phba->sli4_hba.pc_sli4_params.supported)
14813 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14814
14815 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14816 if (!mbox)
14817 return -ENOMEM;
14818 length = (sizeof(struct lpfc_mbx_eq_create) -
14819 sizeof(struct lpfc_sli4_cfg_mhdr));
14820 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14821 LPFC_MBOX_OPCODE_EQ_CREATE,
14822 length, LPFC_SLI4_MBX_EMBED);
14823 eq_create = &mbox->u.mqe.un.eq_create;
7365f6fd 14824 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
4f774513
JS
14825 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14826 eq->page_count);
14827 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14828 LPFC_EQE_SIZE);
14829 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
7365f6fd
JS
14830
14831 /* Use version 2 of CREATE_EQ if eqav is set */
14832 if (phba->sli4_hba.pc_sli4_params.eqav) {
14833 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14834 LPFC_Q_CREATE_VERSION_2);
14835 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14836 phba->sli4_hba.pc_sli4_params.eqav);
14837 }
14838
2c9c5a00
JS
14839 /* don't setup delay multiplier using EQ_CREATE */
14840 dmult = 0;
4f774513
JS
14841 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14842 dmult);
14843 switch (eq->entry_count) {
14844 default:
14845 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14846 "0360 Unsupported EQ count. (%d)\n",
14847 eq->entry_count);
14848 if (eq->entry_count < 256)
14849 return -EINVAL;
14850 /* otherwise default to smallest count (drop through) */
14851 case 256:
14852 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14853 LPFC_EQ_CNT_256);
14854 break;
14855 case 512:
14856 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14857 LPFC_EQ_CNT_512);
14858 break;
14859 case 1024:
14860 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14861 LPFC_EQ_CNT_1024);
14862 break;
14863 case 2048:
14864 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14865 LPFC_EQ_CNT_2048);
14866 break;
14867 case 4096:
14868 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14869 LPFC_EQ_CNT_4096);
14870 break;
14871 }
14872 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14873 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14874 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14875 putPaddrLow(dmabuf->phys);
14876 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14877 putPaddrHigh(dmabuf->phys);
14878 }
14879 mbox->vport = phba->pport;
14880 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14881 mbox->ctx_buf = NULL;
14882 mbox->ctx_ndlp = NULL;
4f774513 14883 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4f774513
JS
14884 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14885 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14886 if (shdr_status || shdr_add_status || rc) {
14887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14888 "2500 EQ_CREATE mailbox failed with "
14889 "status x%x add_status x%x, mbx status x%x\n",
14890 shdr_status, shdr_add_status, rc);
14891 status = -ENXIO;
14892 }
14893 eq->type = LPFC_EQ;
14894 eq->subtype = LPFC_NONE;
14895 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14896 if (eq->queue_id == 0xFFFF)
14897 status = -ENXIO;
14898 eq->host_index = 0;
14899 eq->hba_index = 0;
64eb4dcb 14900 eq->entry_repost = LPFC_EQ_REPOST;
4f774513 14901
8fa38513 14902 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14903 return status;
14904}
14905
14906/**
14907 * lpfc_cq_create - Create a Completion Queue on the HBA
14908 * @phba: HBA structure that indicates port to create a queue on.
14909 * @cq: The queue structure to use to create the completion queue.
14910 * @eq: The event queue to bind this completion queue to.
14911 *
14912 * This function creates a completion queue, as detailed in @wq, on a port,
14913 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14914 *
14915 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14916 * is used to get the entry count and entry size that are necessary to
14917 * determine the number of pages to allocate and use for this queue. The @eq
14918 * is used to indicate which event queue to bind this completion queue to. This
14919 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14920 * completion queue. This function is asynchronous and will wait for the mailbox
14921 * command to finish before continuing.
14922 *
14923 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14924 * memory this function will return -ENOMEM. If the queue create mailbox command
14925 * fails this function will return -ENXIO.
4f774513 14926 **/
a2fc4aef 14927int
4f774513
JS
14928lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14929 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14930{
14931 struct lpfc_mbx_cq_create *cq_create;
14932 struct lpfc_dmabuf *dmabuf;
14933 LPFC_MBOXQ_t *mbox;
14934 int rc, length, status = 0;
14935 uint32_t shdr_status, shdr_add_status;
14936 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14937
2e90f4b5
JS
14938 /* sanity check on queue memory */
14939 if (!cq || !eq)
14940 return -ENODEV;
49198b37 14941
4f774513
JS
14942 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14943 if (!mbox)
14944 return -ENOMEM;
14945 length = (sizeof(struct lpfc_mbx_cq_create) -
14946 sizeof(struct lpfc_sli4_cfg_mhdr));
14947 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14948 LPFC_MBOX_OPCODE_CQ_CREATE,
14949 length, LPFC_SLI4_MBX_EMBED);
14950 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 14951 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
14952 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14953 cq->page_count);
14954 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14955 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
14956 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14957 phba->sli4_hba.pc_sli4_params.cqv);
14958 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
81b96eda
JS
14959 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14960 (cq->page_size / SLI4_PAGE_SIZE));
5a6f133e
JS
14961 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14962 eq->queue_id);
7365f6fd
JS
14963 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14964 phba->sli4_hba.pc_sli4_params.cqav);
5a6f133e
JS
14965 } else {
14966 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14967 eq->queue_id);
14968 }
4f774513 14969 switch (cq->entry_count) {
81b96eda
JS
14970 case 2048:
14971 case 4096:
14972 if (phba->sli4_hba.pc_sli4_params.cqv ==
14973 LPFC_Q_CREATE_VERSION_2) {
14974 cq_create->u.request.context.lpfc_cq_context_count =
14975 cq->entry_count;
14976 bf_set(lpfc_cq_context_count,
14977 &cq_create->u.request.context,
14978 LPFC_CQ_CNT_WORD7);
14979 break;
14980 }
14981 /* Fall Thru */
4f774513
JS
14982 default:
14983 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee 14984 "0361 Unsupported CQ count: "
64eb4dcb 14985 "entry cnt %d sz %d pg cnt %d\n",
2ea259ee 14986 cq->entry_count, cq->entry_size,
64eb4dcb 14987 cq->page_count);
4f4c1863
JS
14988 if (cq->entry_count < 256) {
14989 status = -EINVAL;
14990 goto out;
14991 }
4f774513
JS
14992 /* otherwise default to smallest count (drop through) */
14993 case 256:
14994 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14995 LPFC_CQ_CNT_256);
14996 break;
14997 case 512:
14998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14999 LPFC_CQ_CNT_512);
15000 break;
15001 case 1024:
15002 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15003 LPFC_CQ_CNT_1024);
15004 break;
15005 }
15006 list_for_each_entry(dmabuf, &cq->page_list, list) {
81b96eda 15007 memset(dmabuf->virt, 0, cq->page_size);
4f774513
JS
15008 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15009 putPaddrLow(dmabuf->phys);
15010 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15011 putPaddrHigh(dmabuf->phys);
15012 }
15013 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15014
15015 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15016 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15017 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15018 if (shdr_status || shdr_add_status || rc) {
15019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15020 "2501 CQ_CREATE mailbox failed with "
15021 "status x%x add_status x%x, mbx status x%x\n",
15022 shdr_status, shdr_add_status, rc);
15023 status = -ENXIO;
15024 goto out;
15025 }
15026 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15027 if (cq->queue_id == 0xFFFF) {
15028 status = -ENXIO;
15029 goto out;
15030 }
15031 /* link the cq onto the parent eq child list */
15032 list_add_tail(&cq->list, &eq->child_list);
15033 /* Set up completion queue's type and subtype */
15034 cq->type = type;
15035 cq->subtype = subtype;
15036 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 15037 cq->assoc_qid = eq->queue_id;
4f774513
JS
15038 cq->host_index = 0;
15039 cq->hba_index = 0;
64eb4dcb 15040 cq->entry_repost = LPFC_CQ_REPOST;
4f774513 15041
8fa38513
JS
15042out:
15043 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15044 return status;
15045}
15046
2d7dbc4c
JS
15047/**
15048 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15049 * @phba: HBA structure that indicates port to create a queue on.
15050 * @cqp: The queue structure array to use to create the completion queues.
15051 * @eqp: The event queue array to bind these completion queues to.
15052 *
15053 * This function creates a set of completion queue, s to support MRQ
15054 * as detailed in @cqp, on a port,
15055 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15056 *
15057 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15058 * is used to get the entry count and entry size that are necessary to
15059 * determine the number of pages to allocate and use for this queue. The @eq
15060 * is used to indicate which event queue to bind this completion queue to. This
15061 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15062 * completion queue. This function is asynchronous and will wait for the mailbox
15063 * command to finish before continuing.
15064 *
15065 * On success this function will return a zero. If unable to allocate enough
15066 * memory this function will return -ENOMEM. If the queue create mailbox command
15067 * fails this function will return -ENXIO.
15068 **/
15069int
15070lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15071 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
15072{
15073 struct lpfc_queue *cq;
15074 struct lpfc_queue *eq;
15075 struct lpfc_mbx_cq_create_set *cq_set;
15076 struct lpfc_dmabuf *dmabuf;
15077 LPFC_MBOXQ_t *mbox;
15078 int rc, length, alloclen, status = 0;
15079 int cnt, idx, numcq, page_idx = 0;
15080 uint32_t shdr_status, shdr_add_status;
15081 union lpfc_sli4_cfg_shdr *shdr;
15082 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15083
15084 /* sanity check on queue memory */
15085 numcq = phba->cfg_nvmet_mrq;
15086 if (!cqp || !eqp || !numcq)
15087 return -ENODEV;
2d7dbc4c
JS
15088
15089 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15090 if (!mbox)
15091 return -ENOMEM;
15092
15093 length = sizeof(struct lpfc_mbx_cq_create_set);
15094 length += ((numcq * cqp[0]->page_count) *
15095 sizeof(struct dma_address));
15096 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15097 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15098 LPFC_SLI4_MBX_NEMBED);
15099 if (alloclen < length) {
15100 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15101 "3098 Allocated DMA memory size (%d) is "
15102 "less than the requested DMA memory size "
15103 "(%d)\n", alloclen, length);
15104 status = -ENOMEM;
15105 goto out;
15106 }
15107 cq_set = mbox->sge_array->addr[0];
15108 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15109 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15110
15111 for (idx = 0; idx < numcq; idx++) {
15112 cq = cqp[idx];
15113 eq = eqp[idx];
15114 if (!cq || !eq) {
15115 status = -ENOMEM;
15116 goto out;
15117 }
81b96eda
JS
15118 if (!phba->sli4_hba.pc_sli4_params.supported)
15119 hw_page_size = cq->page_size;
2d7dbc4c
JS
15120
15121 switch (idx) {
15122 case 0:
15123 bf_set(lpfc_mbx_cq_create_set_page_size,
15124 &cq_set->u.request,
15125 (hw_page_size / SLI4_PAGE_SIZE));
15126 bf_set(lpfc_mbx_cq_create_set_num_pages,
15127 &cq_set->u.request, cq->page_count);
15128 bf_set(lpfc_mbx_cq_create_set_evt,
15129 &cq_set->u.request, 1);
15130 bf_set(lpfc_mbx_cq_create_set_valid,
15131 &cq_set->u.request, 1);
15132 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15133 &cq_set->u.request, 0);
15134 bf_set(lpfc_mbx_cq_create_set_num_cq,
15135 &cq_set->u.request, numcq);
7365f6fd
JS
15136 bf_set(lpfc_mbx_cq_create_set_autovalid,
15137 &cq_set->u.request,
15138 phba->sli4_hba.pc_sli4_params.cqav);
2d7dbc4c 15139 switch (cq->entry_count) {
81b96eda
JS
15140 case 2048:
15141 case 4096:
15142 if (phba->sli4_hba.pc_sli4_params.cqv ==
15143 LPFC_Q_CREATE_VERSION_2) {
15144 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15145 &cq_set->u.request,
15146 cq->entry_count);
15147 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15148 &cq_set->u.request,
15149 LPFC_CQ_CNT_WORD7);
15150 break;
15151 }
15152 /* Fall Thru */
2d7dbc4c
JS
15153 default:
15154 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15155 "3118 Bad CQ count. (%d)\n",
15156 cq->entry_count);
15157 if (cq->entry_count < 256) {
15158 status = -EINVAL;
15159 goto out;
15160 }
15161 /* otherwise default to smallest (drop thru) */
15162 case 256:
15163 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15164 &cq_set->u.request, LPFC_CQ_CNT_256);
15165 break;
15166 case 512:
15167 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15168 &cq_set->u.request, LPFC_CQ_CNT_512);
15169 break;
15170 case 1024:
15171 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15172 &cq_set->u.request, LPFC_CQ_CNT_1024);
15173 break;
15174 }
15175 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15176 &cq_set->u.request, eq->queue_id);
15177 break;
15178 case 1:
15179 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15180 &cq_set->u.request, eq->queue_id);
15181 break;
15182 case 2:
15183 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15184 &cq_set->u.request, eq->queue_id);
15185 break;
15186 case 3:
15187 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15188 &cq_set->u.request, eq->queue_id);
15189 break;
15190 case 4:
15191 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15192 &cq_set->u.request, eq->queue_id);
15193 break;
15194 case 5:
15195 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15196 &cq_set->u.request, eq->queue_id);
15197 break;
15198 case 6:
15199 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15200 &cq_set->u.request, eq->queue_id);
15201 break;
15202 case 7:
15203 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15204 &cq_set->u.request, eq->queue_id);
15205 break;
15206 case 8:
15207 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15208 &cq_set->u.request, eq->queue_id);
15209 break;
15210 case 9:
15211 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15212 &cq_set->u.request, eq->queue_id);
15213 break;
15214 case 10:
15215 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15216 &cq_set->u.request, eq->queue_id);
15217 break;
15218 case 11:
15219 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15220 &cq_set->u.request, eq->queue_id);
15221 break;
15222 case 12:
15223 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15224 &cq_set->u.request, eq->queue_id);
15225 break;
15226 case 13:
15227 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15228 &cq_set->u.request, eq->queue_id);
15229 break;
15230 case 14:
15231 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15232 &cq_set->u.request, eq->queue_id);
15233 break;
15234 case 15:
15235 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15236 &cq_set->u.request, eq->queue_id);
15237 break;
15238 }
15239
15240 /* link the cq onto the parent eq child list */
15241 list_add_tail(&cq->list, &eq->child_list);
15242 /* Set up completion queue's type and subtype */
15243 cq->type = type;
15244 cq->subtype = subtype;
15245 cq->assoc_qid = eq->queue_id;
15246 cq->host_index = 0;
15247 cq->hba_index = 0;
64eb4dcb 15248 cq->entry_repost = LPFC_CQ_REPOST;
81b96eda 15249 cq->chann = idx;
2d7dbc4c
JS
15250
15251 rc = 0;
15252 list_for_each_entry(dmabuf, &cq->page_list, list) {
15253 memset(dmabuf->virt, 0, hw_page_size);
15254 cnt = page_idx + dmabuf->buffer_tag;
15255 cq_set->u.request.page[cnt].addr_lo =
15256 putPaddrLow(dmabuf->phys);
15257 cq_set->u.request.page[cnt].addr_hi =
15258 putPaddrHigh(dmabuf->phys);
15259 rc++;
15260 }
15261 page_idx += rc;
15262 }
15263
15264 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15265
15266 /* The IOCTL status is embedded in the mailbox subheader. */
15267 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15268 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15269 if (shdr_status || shdr_add_status || rc) {
15270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15271 "3119 CQ_CREATE_SET mailbox failed with "
15272 "status x%x add_status x%x, mbx status x%x\n",
15273 shdr_status, shdr_add_status, rc);
15274 status = -ENXIO;
15275 goto out;
15276 }
15277 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15278 if (rc == 0xFFFF) {
15279 status = -ENXIO;
15280 goto out;
15281 }
15282
15283 for (idx = 0; idx < numcq; idx++) {
15284 cq = cqp[idx];
15285 cq->queue_id = rc + idx;
15286 }
15287
15288out:
15289 lpfc_sli4_mbox_cmd_free(phba, mbox);
15290 return status;
15291}
15292
b19a061a
JS
15293/**
15294 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15295 * @phba: HBA structure that indicates port to create a queue on.
15296 * @mq: The queue structure to use to create the mailbox queue.
15297 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15298 * @cq: The completion queue to associate with this cq.
15299 *
15300 * This function provides failback (fb) functionality when the
15301 * mq_create_ext fails on older FW generations. It's purpose is identical
15302 * to mq_create_ext otherwise.
15303 *
15304 * This routine cannot fail as all attributes were previously accessed and
15305 * initialized in mq_create_ext.
15306 **/
15307static void
15308lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15309 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15310{
15311 struct lpfc_mbx_mq_create *mq_create;
15312 struct lpfc_dmabuf *dmabuf;
15313 int length;
15314
15315 length = (sizeof(struct lpfc_mbx_mq_create) -
15316 sizeof(struct lpfc_sli4_cfg_mhdr));
15317 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15318 LPFC_MBOX_OPCODE_MQ_CREATE,
15319 length, LPFC_SLI4_MBX_EMBED);
15320 mq_create = &mbox->u.mqe.un.mq_create;
15321 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15322 mq->page_count);
15323 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15324 cq->queue_id);
15325 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15326 switch (mq->entry_count) {
15327 case 16:
5a6f133e
JS
15328 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15329 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
15330 break;
15331 case 32:
5a6f133e
JS
15332 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15333 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
15334 break;
15335 case 64:
5a6f133e
JS
15336 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15337 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
15338 break;
15339 case 128:
5a6f133e
JS
15340 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15341 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
15342 break;
15343 }
15344 list_for_each_entry(dmabuf, &mq->page_list, list) {
15345 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15346 putPaddrLow(dmabuf->phys);
15347 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15348 putPaddrHigh(dmabuf->phys);
15349 }
15350}
15351
04c68496
JS
15352/**
15353 * lpfc_mq_create - Create a mailbox Queue on the HBA
15354 * @phba: HBA structure that indicates port to create a queue on.
15355 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
15356 * @cq: The completion queue to associate with this cq.
15357 * @subtype: The queue's subtype.
04c68496
JS
15358 *
15359 * This function creates a mailbox queue, as detailed in @mq, on a port,
15360 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15361 *
15362 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15363 * is used to get the entry count and entry size that are necessary to
15364 * determine the number of pages to allocate and use for this queue. This
15365 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15366 * mailbox queue. This function is asynchronous and will wait for the mailbox
15367 * command to finish before continuing.
15368 *
15369 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15370 * memory this function will return -ENOMEM. If the queue create mailbox command
15371 * fails this function will return -ENXIO.
04c68496 15372 **/
b19a061a 15373int32_t
04c68496
JS
15374lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15375 struct lpfc_queue *cq, uint32_t subtype)
15376{
15377 struct lpfc_mbx_mq_create *mq_create;
b19a061a 15378 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
15379 struct lpfc_dmabuf *dmabuf;
15380 LPFC_MBOXQ_t *mbox;
15381 int rc, length, status = 0;
15382 uint32_t shdr_status, shdr_add_status;
15383 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15384 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 15385
2e90f4b5
JS
15386 /* sanity check on queue memory */
15387 if (!mq || !cq)
15388 return -ENODEV;
49198b37
JS
15389 if (!phba->sli4_hba.pc_sli4_params.supported)
15390 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 15391
04c68496
JS
15392 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15393 if (!mbox)
15394 return -ENOMEM;
b19a061a 15395 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
15396 sizeof(struct lpfc_sli4_cfg_mhdr));
15397 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 15398 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 15399 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
15400
15401 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 15402 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
15403 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15404 &mq_create_ext->u.request, mq->page_count);
15405 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15406 &mq_create_ext->u.request, 1);
15407 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
15408 &mq_create_ext->u.request, 1);
15409 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15410 &mq_create_ext->u.request, 1);
70f3c073
JS
15411 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15412 &mq_create_ext->u.request, 1);
15413 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15414 &mq_create_ext->u.request, 1);
b19a061a 15415 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
15416 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15417 phba->sli4_hba.pc_sli4_params.mqv);
15418 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15419 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15420 cq->queue_id);
15421 else
15422 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15423 cq->queue_id);
04c68496
JS
15424 switch (mq->entry_count) {
15425 default:
15426 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15427 "0362 Unsupported MQ count. (%d)\n",
15428 mq->entry_count);
4f4c1863
JS
15429 if (mq->entry_count < 16) {
15430 status = -EINVAL;
15431 goto out;
15432 }
04c68496
JS
15433 /* otherwise default to smallest count (drop through) */
15434 case 16:
5a6f133e
JS
15435 bf_set(lpfc_mq_context_ring_size,
15436 &mq_create_ext->u.request.context,
15437 LPFC_MQ_RING_SIZE_16);
04c68496
JS
15438 break;
15439 case 32:
5a6f133e
JS
15440 bf_set(lpfc_mq_context_ring_size,
15441 &mq_create_ext->u.request.context,
15442 LPFC_MQ_RING_SIZE_32);
04c68496
JS
15443 break;
15444 case 64:
5a6f133e
JS
15445 bf_set(lpfc_mq_context_ring_size,
15446 &mq_create_ext->u.request.context,
15447 LPFC_MQ_RING_SIZE_64);
04c68496
JS
15448 break;
15449 case 128:
5a6f133e
JS
15450 bf_set(lpfc_mq_context_ring_size,
15451 &mq_create_ext->u.request.context,
15452 LPFC_MQ_RING_SIZE_128);
04c68496
JS
15453 break;
15454 }
15455 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 15456 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 15457 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 15458 putPaddrLow(dmabuf->phys);
b19a061a 15459 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
15460 putPaddrHigh(dmabuf->phys);
15461 }
15462 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
15463 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15464 &mq_create_ext->u.response);
15465 if (rc != MBX_SUCCESS) {
15466 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15467 "2795 MQ_CREATE_EXT failed with "
15468 "status x%x. Failback to MQ_CREATE.\n",
15469 rc);
15470 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15471 mq_create = &mbox->u.mqe.un.mq_create;
15472 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15473 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15474 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15475 &mq_create->u.response);
15476 }
15477
04c68496 15478 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
15479 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15480 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15481 if (shdr_status || shdr_add_status || rc) {
15482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15483 "2502 MQ_CREATE mailbox failed with "
15484 "status x%x add_status x%x, mbx status x%x\n",
15485 shdr_status, shdr_add_status, rc);
15486 status = -ENXIO;
15487 goto out;
15488 }
04c68496
JS
15489 if (mq->queue_id == 0xFFFF) {
15490 status = -ENXIO;
15491 goto out;
15492 }
15493 mq->type = LPFC_MQ;
2a622bfb 15494 mq->assoc_qid = cq->queue_id;
04c68496
JS
15495 mq->subtype = subtype;
15496 mq->host_index = 0;
15497 mq->hba_index = 0;
64eb4dcb 15498 mq->entry_repost = LPFC_MQ_REPOST;
04c68496
JS
15499
15500 /* link the mq onto the parent cq child list */
15501 list_add_tail(&mq->list, &cq->child_list);
15502out:
8fa38513 15503 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
15504 return status;
15505}
15506
4f774513
JS
15507/**
15508 * lpfc_wq_create - Create a Work Queue on the HBA
15509 * @phba: HBA structure that indicates port to create a queue on.
15510 * @wq: The queue structure to use to create the work queue.
15511 * @cq: The completion queue to bind this work queue to.
15512 * @subtype: The subtype of the work queue indicating its functionality.
15513 *
15514 * This function creates a work queue, as detailed in @wq, on a port, described
15515 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15516 *
15517 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15518 * is used to get the entry count and entry size that are necessary to
15519 * determine the number of pages to allocate and use for this queue. The @cq
15520 * is used to indicate which completion queue to bind this work queue to. This
15521 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15522 * work queue. This function is asynchronous and will wait for the mailbox
15523 * command to finish before continuing.
15524 *
15525 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15526 * memory this function will return -ENOMEM. If the queue create mailbox command
15527 * fails this function will return -ENXIO.
4f774513 15528 **/
a2fc4aef 15529int
4f774513
JS
15530lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15531 struct lpfc_queue *cq, uint32_t subtype)
15532{
15533 struct lpfc_mbx_wq_create *wq_create;
15534 struct lpfc_dmabuf *dmabuf;
15535 LPFC_MBOXQ_t *mbox;
15536 int rc, length, status = 0;
15537 uint32_t shdr_status, shdr_add_status;
15538 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15539 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 15540 struct dma_address *page;
962bc51b
JS
15541 void __iomem *bar_memmap_p;
15542 uint32_t db_offset;
15543 uint16_t pci_barset;
1351e69f
JS
15544 uint8_t dpp_barset;
15545 uint32_t dpp_offset;
15546 unsigned long pg_addr;
81b96eda 15547 uint8_t wq_create_version;
49198b37 15548
2e90f4b5
JS
15549 /* sanity check on queue memory */
15550 if (!wq || !cq)
15551 return -ENODEV;
49198b37 15552 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 15553 hw_page_size = wq->page_size;
4f774513
JS
15554
15555 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15556 if (!mbox)
15557 return -ENOMEM;
15558 length = (sizeof(struct lpfc_mbx_wq_create) -
15559 sizeof(struct lpfc_sli4_cfg_mhdr));
15560 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15561 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15562 length, LPFC_SLI4_MBX_EMBED);
15563 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 15564 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
15565 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15566 wq->page_count);
15567 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15568 cq->queue_id);
0c651878
JS
15569
15570 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
15571 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15572 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 15573
c176ffa0
JS
15574 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15575 (wq->page_size > SLI4_PAGE_SIZE))
81b96eda
JS
15576 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15577 else
15578 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15579
0c651878 15580
1351e69f
JS
15581 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15582 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15583 else
15584 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15585
15586 switch (wq_create_version) {
0c651878 15587 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
15588 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15589 wq->entry_count);
3f247de7
JS
15590 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15591 LPFC_Q_CREATE_VERSION_1);
15592
5a6f133e
JS
15593 switch (wq->entry_size) {
15594 default:
15595 case 64:
15596 bf_set(lpfc_mbx_wq_create_wqe_size,
15597 &wq_create->u.request_1,
15598 LPFC_WQ_WQE_SIZE_64);
15599 break;
15600 case 128:
15601 bf_set(lpfc_mbx_wq_create_wqe_size,
15602 &wq_create->u.request_1,
15603 LPFC_WQ_WQE_SIZE_128);
15604 break;
15605 }
1351e69f
JS
15606 /* Request DPP by default */
15607 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
8ea73db4
JS
15608 bf_set(lpfc_mbx_wq_create_page_size,
15609 &wq_create->u.request_1,
81b96eda 15610 (wq->page_size / SLI4_PAGE_SIZE));
5a6f133e 15611 page = wq_create->u.request_1.page;
0c651878
JS
15612 break;
15613 default:
1351e69f
JS
15614 page = wq_create->u.request.page;
15615 break;
5a6f133e 15616 }
0c651878 15617
4f774513 15618 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 15619 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
15620 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15621 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 15622 }
962bc51b
JS
15623
15624 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15625 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15626
4f774513
JS
15627 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15628 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15629 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15630 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15631 if (shdr_status || shdr_add_status || rc) {
15632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15633 "2503 WQ_CREATE mailbox failed with "
15634 "status x%x add_status x%x, mbx status x%x\n",
15635 shdr_status, shdr_add_status, rc);
15636 status = -ENXIO;
15637 goto out;
15638 }
1351e69f
JS
15639
15640 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15641 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15642 &wq_create->u.response);
15643 else
15644 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15645 &wq_create->u.response_1);
15646
4f774513
JS
15647 if (wq->queue_id == 0xFFFF) {
15648 status = -ENXIO;
15649 goto out;
15650 }
1351e69f
JS
15651
15652 wq->db_format = LPFC_DB_LIST_FORMAT;
15653 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15654 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15655 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15656 &wq_create->u.response);
15657 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15658 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15660 "3265 WQ[%d] doorbell format "
15661 "not supported: x%x\n",
15662 wq->queue_id, wq->db_format);
15663 status = -EINVAL;
15664 goto out;
15665 }
15666 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15667 &wq_create->u.response);
15668 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15669 pci_barset);
15670 if (!bar_memmap_p) {
15671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15672 "3263 WQ[%d] failed to memmap "
15673 "pci barset:x%x\n",
15674 wq->queue_id, pci_barset);
15675 status = -ENOMEM;
15676 goto out;
15677 }
15678 db_offset = wq_create->u.response.doorbell_offset;
15679 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15680 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15682 "3252 WQ[%d] doorbell offset "
15683 "not supported: x%x\n",
15684 wq->queue_id, db_offset);
15685 status = -EINVAL;
15686 goto out;
15687 }
15688 wq->db_regaddr = bar_memmap_p + db_offset;
15689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15690 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15691 "format:x%x\n", wq->queue_id,
15692 pci_barset, db_offset, wq->db_format);
15693 } else
15694 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15695 } else {
1351e69f
JS
15696 /* Check if DPP was honored by the firmware */
15697 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15698 &wq_create->u.response_1);
15699 if (wq->dpp_enable) {
15700 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15701 &wq_create->u.response_1);
15702 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15703 pci_barset);
15704 if (!bar_memmap_p) {
15705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15706 "3267 WQ[%d] failed to memmap "
15707 "pci barset:x%x\n",
15708 wq->queue_id, pci_barset);
15709 status = -ENOMEM;
15710 goto out;
15711 }
15712 db_offset = wq_create->u.response_1.doorbell_offset;
15713 wq->db_regaddr = bar_memmap_p + db_offset;
15714 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15715 &wq_create->u.response_1);
15716 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15717 &wq_create->u.response_1);
15718 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15719 dpp_barset);
15720 if (!bar_memmap_p) {
15721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15722 "3268 WQ[%d] failed to memmap "
15723 "pci barset:x%x\n",
15724 wq->queue_id, dpp_barset);
15725 status = -ENOMEM;
15726 goto out;
15727 }
15728 dpp_offset = wq_create->u.response_1.dpp_offset;
15729 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15730 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15731 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15732 "dpp_id:x%x dpp_barset:x%x "
15733 "dpp_offset:x%x\n",
15734 wq->queue_id, pci_barset, db_offset,
15735 wq->dpp_id, dpp_barset, dpp_offset);
15736
15737 /* Enable combined writes for DPP aperture */
15738 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15739#ifdef CONFIG_X86
15740 rc = set_memory_wc(pg_addr, 1);
15741 if (rc) {
15742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15743 "3272 Cannot setup Combined "
15744 "Write on WQ[%d] - disable DPP\n",
15745 wq->queue_id);
15746 phba->cfg_enable_dpp = 0;
15747 }
15748#else
15749 phba->cfg_enable_dpp = 0;
15750#endif
15751 } else
15752 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15753 }
895427bd
JS
15754 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15755 if (wq->pring == NULL) {
15756 status = -ENOMEM;
15757 goto out;
15758 }
4f774513 15759 wq->type = LPFC_WQ;
2a622bfb 15760 wq->assoc_qid = cq->queue_id;
4f774513
JS
15761 wq->subtype = subtype;
15762 wq->host_index = 0;
15763 wq->hba_index = 0;
ff78d8f9 15764 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
15765
15766 /* link the wq onto the parent cq child list */
15767 list_add_tail(&wq->list, &cq->child_list);
15768out:
8fa38513 15769 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15770 return status;
15771}
15772
15773/**
15774 * lpfc_rq_create - Create a Receive Queue on the HBA
15775 * @phba: HBA structure that indicates port to create a queue on.
15776 * @hrq: The queue structure to use to create the header receive queue.
15777 * @drq: The queue structure to use to create the data receive queue.
15778 * @cq: The completion queue to bind this work queue to.
15779 *
15780 * This function creates a receive buffer queue pair , as detailed in @hrq and
15781 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15782 * to the HBA.
15783 *
15784 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15785 * struct is used to get the entry count that is necessary to determine the
15786 * number of pages to use for this queue. The @cq is used to indicate which
15787 * completion queue to bind received buffers that are posted to these queues to.
15788 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15789 * receive queue pair. This function is asynchronous and will wait for the
15790 * mailbox command to finish before continuing.
15791 *
15792 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15793 * memory this function will return -ENOMEM. If the queue create mailbox command
15794 * fails this function will return -ENXIO.
4f774513 15795 **/
a2fc4aef 15796int
4f774513
JS
15797lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15798 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15799{
15800 struct lpfc_mbx_rq_create *rq_create;
15801 struct lpfc_dmabuf *dmabuf;
15802 LPFC_MBOXQ_t *mbox;
15803 int rc, length, status = 0;
15804 uint32_t shdr_status, shdr_add_status;
15805 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15806 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
15807 void __iomem *bar_memmap_p;
15808 uint32_t db_offset;
15809 uint16_t pci_barset;
49198b37 15810
2e90f4b5
JS
15811 /* sanity check on queue memory */
15812 if (!hrq || !drq || !cq)
15813 return -ENODEV;
49198b37
JS
15814 if (!phba->sli4_hba.pc_sli4_params.supported)
15815 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
15816
15817 if (hrq->entry_count != drq->entry_count)
15818 return -EINVAL;
15819 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15820 if (!mbox)
15821 return -ENOMEM;
15822 length = (sizeof(struct lpfc_mbx_rq_create) -
15823 sizeof(struct lpfc_sli4_cfg_mhdr));
15824 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15825 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15826 length, LPFC_SLI4_MBX_EMBED);
15827 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
15828 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15829 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15830 phba->sli4_hba.pc_sli4_params.rqv);
15831 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15832 bf_set(lpfc_rq_context_rqe_count_1,
15833 &rq_create->u.request.context,
15834 hrq->entry_count);
15835 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
15836 bf_set(lpfc_rq_context_rqe_size,
15837 &rq_create->u.request.context,
15838 LPFC_RQE_SIZE_8);
15839 bf_set(lpfc_rq_context_page_size,
15840 &rq_create->u.request.context,
8ea73db4 15841 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
15842 } else {
15843 switch (hrq->entry_count) {
15844 default:
15845 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15846 "2535 Unsupported RQ count. (%d)\n",
15847 hrq->entry_count);
4f4c1863
JS
15848 if (hrq->entry_count < 512) {
15849 status = -EINVAL;
15850 goto out;
15851 }
5a6f133e
JS
15852 /* otherwise default to smallest count (drop through) */
15853 case 512:
15854 bf_set(lpfc_rq_context_rqe_count,
15855 &rq_create->u.request.context,
15856 LPFC_RQ_RING_SIZE_512);
15857 break;
15858 case 1024:
15859 bf_set(lpfc_rq_context_rqe_count,
15860 &rq_create->u.request.context,
15861 LPFC_RQ_RING_SIZE_1024);
15862 break;
15863 case 2048:
15864 bf_set(lpfc_rq_context_rqe_count,
15865 &rq_create->u.request.context,
15866 LPFC_RQ_RING_SIZE_2048);
15867 break;
15868 case 4096:
15869 bf_set(lpfc_rq_context_rqe_count,
15870 &rq_create->u.request.context,
15871 LPFC_RQ_RING_SIZE_4096);
15872 break;
15873 }
15874 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15875 LPFC_HDR_BUF_SIZE);
4f774513
JS
15876 }
15877 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15878 cq->queue_id);
15879 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15880 hrq->page_count);
4f774513 15881 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 15882 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
15883 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15884 putPaddrLow(dmabuf->phys);
15885 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15886 putPaddrHigh(dmabuf->phys);
15887 }
962bc51b
JS
15888 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15889 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15890
4f774513
JS
15891 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15892 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15893 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15894 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15895 if (shdr_status || shdr_add_status || rc) {
15896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15897 "2504 RQ_CREATE mailbox failed with "
15898 "status x%x add_status x%x, mbx status x%x\n",
15899 shdr_status, shdr_add_status, rc);
15900 status = -ENXIO;
15901 goto out;
15902 }
15903 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15904 if (hrq->queue_id == 0xFFFF) {
15905 status = -ENXIO;
15906 goto out;
15907 }
962bc51b
JS
15908
15909 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15910 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15911 &rq_create->u.response);
15912 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15913 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15915 "3262 RQ [%d] doorbell format not "
15916 "supported: x%x\n", hrq->queue_id,
15917 hrq->db_format);
15918 status = -EINVAL;
15919 goto out;
15920 }
15921
15922 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15923 &rq_create->u.response);
15924 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15925 if (!bar_memmap_p) {
15926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15927 "3269 RQ[%d] failed to memmap pci "
15928 "barset:x%x\n", hrq->queue_id,
15929 pci_barset);
15930 status = -ENOMEM;
15931 goto out;
15932 }
15933
15934 db_offset = rq_create->u.response.doorbell_offset;
15935 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15936 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15938 "3270 RQ[%d] doorbell offset not "
15939 "supported: x%x\n", hrq->queue_id,
15940 db_offset);
15941 status = -EINVAL;
15942 goto out;
15943 }
15944 hrq->db_regaddr = bar_memmap_p + db_offset;
15945 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
15946 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15947 "format:x%x\n", hrq->queue_id, pci_barset,
15948 db_offset, hrq->db_format);
962bc51b
JS
15949 } else {
15950 hrq->db_format = LPFC_DB_RING_FORMAT;
15951 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15952 }
4f774513 15953 hrq->type = LPFC_HRQ;
2a622bfb 15954 hrq->assoc_qid = cq->queue_id;
4f774513
JS
15955 hrq->subtype = subtype;
15956 hrq->host_index = 0;
15957 hrq->hba_index = 0;
61f3d4bf 15958 hrq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
15959
15960 /* now create the data queue */
15961 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15962 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15963 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
15964 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15965 phba->sli4_hba.pc_sli4_params.rqv);
15966 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15967 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 15968 &rq_create->u.request.context, hrq->entry_count);
3c603be9
JS
15969 if (subtype == LPFC_NVMET)
15970 rq_create->u.request.context.buffer_size =
15971 LPFC_NVMET_DATA_BUF_SIZE;
15972 else
15973 rq_create->u.request.context.buffer_size =
15974 LPFC_DATA_BUF_SIZE;
c31098ce
JS
15975 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15976 LPFC_RQE_SIZE_8);
15977 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15978 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
15979 } else {
15980 switch (drq->entry_count) {
15981 default:
15982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15983 "2536 Unsupported RQ count. (%d)\n",
15984 drq->entry_count);
4f4c1863
JS
15985 if (drq->entry_count < 512) {
15986 status = -EINVAL;
15987 goto out;
15988 }
5a6f133e
JS
15989 /* otherwise default to smallest count (drop through) */
15990 case 512:
15991 bf_set(lpfc_rq_context_rqe_count,
15992 &rq_create->u.request.context,
15993 LPFC_RQ_RING_SIZE_512);
15994 break;
15995 case 1024:
15996 bf_set(lpfc_rq_context_rqe_count,
15997 &rq_create->u.request.context,
15998 LPFC_RQ_RING_SIZE_1024);
15999 break;
16000 case 2048:
16001 bf_set(lpfc_rq_context_rqe_count,
16002 &rq_create->u.request.context,
16003 LPFC_RQ_RING_SIZE_2048);
16004 break;
16005 case 4096:
16006 bf_set(lpfc_rq_context_rqe_count,
16007 &rq_create->u.request.context,
16008 LPFC_RQ_RING_SIZE_4096);
16009 break;
16010 }
3c603be9
JS
16011 if (subtype == LPFC_NVMET)
16012 bf_set(lpfc_rq_context_buf_size,
16013 &rq_create->u.request.context,
16014 LPFC_NVMET_DATA_BUF_SIZE);
16015 else
16016 bf_set(lpfc_rq_context_buf_size,
16017 &rq_create->u.request.context,
16018 LPFC_DATA_BUF_SIZE);
4f774513
JS
16019 }
16020 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16021 cq->queue_id);
16022 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16023 drq->page_count);
4f774513
JS
16024 list_for_each_entry(dmabuf, &drq->page_list, list) {
16025 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16026 putPaddrLow(dmabuf->phys);
16027 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16028 putPaddrHigh(dmabuf->phys);
16029 }
962bc51b
JS
16030 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16031 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
16032 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16033 /* The IOCTL status is embedded in the mailbox subheader. */
16034 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16035 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16036 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16037 if (shdr_status || shdr_add_status || rc) {
16038 status = -ENXIO;
16039 goto out;
16040 }
16041 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16042 if (drq->queue_id == 0xFFFF) {
16043 status = -ENXIO;
16044 goto out;
16045 }
16046 drq->type = LPFC_DRQ;
2a622bfb 16047 drq->assoc_qid = cq->queue_id;
4f774513
JS
16048 drq->subtype = subtype;
16049 drq->host_index = 0;
16050 drq->hba_index = 0;
61f3d4bf 16051 drq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
16052
16053 /* link the header and data RQs onto the parent cq child list */
16054 list_add_tail(&hrq->list, &cq->child_list);
16055 list_add_tail(&drq->list, &cq->child_list);
16056
16057out:
8fa38513 16058 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
16059 return status;
16060}
16061
2d7dbc4c
JS
16062/**
16063 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16064 * @phba: HBA structure that indicates port to create a queue on.
16065 * @hrqp: The queue structure array to use to create the header receive queues.
16066 * @drqp: The queue structure array to use to create the data receive queues.
16067 * @cqp: The completion queue array to bind these receive queues to.
16068 *
16069 * This function creates a receive buffer queue pair , as detailed in @hrq and
16070 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16071 * to the HBA.
16072 *
16073 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16074 * struct is used to get the entry count that is necessary to determine the
16075 * number of pages to use for this queue. The @cq is used to indicate which
16076 * completion queue to bind received buffers that are posted to these queues to.
16077 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16078 * receive queue pair. This function is asynchronous and will wait for the
16079 * mailbox command to finish before continuing.
16080 *
16081 * On success this function will return a zero. If unable to allocate enough
16082 * memory this function will return -ENOMEM. If the queue create mailbox command
16083 * fails this function will return -ENXIO.
16084 **/
16085int
16086lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16087 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16088 uint32_t subtype)
16089{
16090 struct lpfc_queue *hrq, *drq, *cq;
16091 struct lpfc_mbx_rq_create_v2 *rq_create;
16092 struct lpfc_dmabuf *dmabuf;
16093 LPFC_MBOXQ_t *mbox;
16094 int rc, length, alloclen, status = 0;
16095 int cnt, idx, numrq, page_idx = 0;
16096 uint32_t shdr_status, shdr_add_status;
16097 union lpfc_sli4_cfg_shdr *shdr;
16098 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16099
16100 numrq = phba->cfg_nvmet_mrq;
16101 /* sanity check on array memory */
16102 if (!hrqp || !drqp || !cqp || !numrq)
16103 return -ENODEV;
16104 if (!phba->sli4_hba.pc_sli4_params.supported)
16105 hw_page_size = SLI4_PAGE_SIZE;
16106
16107 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16108 if (!mbox)
16109 return -ENOMEM;
16110
16111 length = sizeof(struct lpfc_mbx_rq_create_v2);
16112 length += ((2 * numrq * hrqp[0]->page_count) *
16113 sizeof(struct dma_address));
16114
16115 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16116 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16117 LPFC_SLI4_MBX_NEMBED);
16118 if (alloclen < length) {
16119 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16120 "3099 Allocated DMA memory size (%d) is "
16121 "less than the requested DMA memory size "
16122 "(%d)\n", alloclen, length);
16123 status = -ENOMEM;
16124 goto out;
16125 }
16126
16127
16128
16129 rq_create = mbox->sge_array->addr[0];
16130 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16131
16132 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16133 cnt = 0;
16134
16135 for (idx = 0; idx < numrq; idx++) {
16136 hrq = hrqp[idx];
16137 drq = drqp[idx];
16138 cq = cqp[idx];
16139
2d7dbc4c
JS
16140 /* sanity check on queue memory */
16141 if (!hrq || !drq || !cq) {
16142 status = -ENODEV;
16143 goto out;
16144 }
16145
7aabe84b
JS
16146 if (hrq->entry_count != drq->entry_count) {
16147 status = -EINVAL;
16148 goto out;
16149 }
16150
2d7dbc4c
JS
16151 if (idx == 0) {
16152 bf_set(lpfc_mbx_rq_create_num_pages,
16153 &rq_create->u.request,
16154 hrq->page_count);
16155 bf_set(lpfc_mbx_rq_create_rq_cnt,
16156 &rq_create->u.request, (numrq * 2));
16157 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16158 1);
16159 bf_set(lpfc_rq_context_base_cq,
16160 &rq_create->u.request.context,
16161 cq->queue_id);
16162 bf_set(lpfc_rq_context_data_size,
16163 &rq_create->u.request.context,
3c603be9 16164 LPFC_NVMET_DATA_BUF_SIZE);
2d7dbc4c
JS
16165 bf_set(lpfc_rq_context_hdr_size,
16166 &rq_create->u.request.context,
16167 LPFC_HDR_BUF_SIZE);
16168 bf_set(lpfc_rq_context_rqe_count_1,
16169 &rq_create->u.request.context,
16170 hrq->entry_count);
16171 bf_set(lpfc_rq_context_rqe_size,
16172 &rq_create->u.request.context,
16173 LPFC_RQE_SIZE_8);
16174 bf_set(lpfc_rq_context_page_size,
16175 &rq_create->u.request.context,
16176 (PAGE_SIZE/SLI4_PAGE_SIZE));
16177 }
16178 rc = 0;
16179 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16180 memset(dmabuf->virt, 0, hw_page_size);
16181 cnt = page_idx + dmabuf->buffer_tag;
16182 rq_create->u.request.page[cnt].addr_lo =
16183 putPaddrLow(dmabuf->phys);
16184 rq_create->u.request.page[cnt].addr_hi =
16185 putPaddrHigh(dmabuf->phys);
16186 rc++;
16187 }
16188 page_idx += rc;
16189
16190 rc = 0;
16191 list_for_each_entry(dmabuf, &drq->page_list, list) {
16192 memset(dmabuf->virt, 0, hw_page_size);
16193 cnt = page_idx + dmabuf->buffer_tag;
16194 rq_create->u.request.page[cnt].addr_lo =
16195 putPaddrLow(dmabuf->phys);
16196 rq_create->u.request.page[cnt].addr_hi =
16197 putPaddrHigh(dmabuf->phys);
16198 rc++;
16199 }
16200 page_idx += rc;
16201
16202 hrq->db_format = LPFC_DB_RING_FORMAT;
16203 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16204 hrq->type = LPFC_HRQ;
16205 hrq->assoc_qid = cq->queue_id;
16206 hrq->subtype = subtype;
16207 hrq->host_index = 0;
16208 hrq->hba_index = 0;
61f3d4bf 16209 hrq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
16210
16211 drq->db_format = LPFC_DB_RING_FORMAT;
16212 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16213 drq->type = LPFC_DRQ;
16214 drq->assoc_qid = cq->queue_id;
16215 drq->subtype = subtype;
16216 drq->host_index = 0;
16217 drq->hba_index = 0;
61f3d4bf 16218 drq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
16219
16220 list_add_tail(&hrq->list, &cq->child_list);
16221 list_add_tail(&drq->list, &cq->child_list);
16222 }
16223
16224 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16225 /* The IOCTL status is embedded in the mailbox subheader. */
16226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16228 if (shdr_status || shdr_add_status || rc) {
16229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16230 "3120 RQ_CREATE mailbox failed with "
16231 "status x%x add_status x%x, mbx status x%x\n",
16232 shdr_status, shdr_add_status, rc);
16233 status = -ENXIO;
16234 goto out;
16235 }
16236 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16237 if (rc == 0xFFFF) {
16238 status = -ENXIO;
16239 goto out;
16240 }
16241
16242 /* Initialize all RQs with associated queue id */
16243 for (idx = 0; idx < numrq; idx++) {
16244 hrq = hrqp[idx];
16245 hrq->queue_id = rc + (2 * idx);
16246 drq = drqp[idx];
16247 drq->queue_id = rc + (2 * idx) + 1;
16248 }
16249
16250out:
16251 lpfc_sli4_mbox_cmd_free(phba, mbox);
16252 return status;
16253}
16254
4f774513
JS
16255/**
16256 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16257 * @eq: The queue structure associated with the queue to destroy.
16258 *
16259 * This function destroys a queue, as detailed in @eq by sending an mailbox
16260 * command, specific to the type of queue, to the HBA.
16261 *
16262 * The @eq struct is used to get the queue ID of the queue to destroy.
16263 *
16264 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16265 * command fails this function will return -ENXIO.
4f774513 16266 **/
a2fc4aef 16267int
4f774513
JS
16268lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16269{
16270 LPFC_MBOXQ_t *mbox;
16271 int rc, length, status = 0;
16272 uint32_t shdr_status, shdr_add_status;
16273 union lpfc_sli4_cfg_shdr *shdr;
16274
2e90f4b5 16275 /* sanity check on queue memory */
4f774513
JS
16276 if (!eq)
16277 return -ENODEV;
16278 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16279 if (!mbox)
16280 return -ENOMEM;
16281 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16282 sizeof(struct lpfc_sli4_cfg_mhdr));
16283 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16284 LPFC_MBOX_OPCODE_EQ_DESTROY,
16285 length, LPFC_SLI4_MBX_EMBED);
16286 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16287 eq->queue_id);
16288 mbox->vport = eq->phba->pport;
16289 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16290
16291 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16292 /* The IOCTL status is embedded in the mailbox subheader. */
16293 shdr = (union lpfc_sli4_cfg_shdr *)
16294 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16295 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16296 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16297 if (shdr_status || shdr_add_status || rc) {
16298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16299 "2505 EQ_DESTROY mailbox failed with "
16300 "status x%x add_status x%x, mbx status x%x\n",
16301 shdr_status, shdr_add_status, rc);
16302 status = -ENXIO;
16303 }
16304
16305 /* Remove eq from any list */
16306 list_del_init(&eq->list);
8fa38513 16307 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
16308 return status;
16309}
16310
16311/**
16312 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16313 * @cq: The queue structure associated with the queue to destroy.
16314 *
16315 * This function destroys a queue, as detailed in @cq by sending an mailbox
16316 * command, specific to the type of queue, to the HBA.
16317 *
16318 * The @cq struct is used to get the queue ID of the queue to destroy.
16319 *
16320 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16321 * command fails this function will return -ENXIO.
4f774513 16322 **/
a2fc4aef 16323int
4f774513
JS
16324lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16325{
16326 LPFC_MBOXQ_t *mbox;
16327 int rc, length, status = 0;
16328 uint32_t shdr_status, shdr_add_status;
16329 union lpfc_sli4_cfg_shdr *shdr;
16330
2e90f4b5 16331 /* sanity check on queue memory */
4f774513
JS
16332 if (!cq)
16333 return -ENODEV;
16334 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16335 if (!mbox)
16336 return -ENOMEM;
16337 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16338 sizeof(struct lpfc_sli4_cfg_mhdr));
16339 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16340 LPFC_MBOX_OPCODE_CQ_DESTROY,
16341 length, LPFC_SLI4_MBX_EMBED);
16342 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16343 cq->queue_id);
16344 mbox->vport = cq->phba->pport;
16345 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16346 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16347 /* The IOCTL status is embedded in the mailbox subheader. */
16348 shdr = (union lpfc_sli4_cfg_shdr *)
16349 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16350 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16351 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16352 if (shdr_status || shdr_add_status || rc) {
16353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16354 "2506 CQ_DESTROY mailbox failed with "
16355 "status x%x add_status x%x, mbx status x%x\n",
16356 shdr_status, shdr_add_status, rc);
16357 status = -ENXIO;
16358 }
16359 /* Remove cq from any list */
16360 list_del_init(&cq->list);
8fa38513 16361 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
16362 return status;
16363}
16364
04c68496
JS
16365/**
16366 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16367 * @qm: The queue structure associated with the queue to destroy.
16368 *
16369 * This function destroys a queue, as detailed in @mq by sending an mailbox
16370 * command, specific to the type of queue, to the HBA.
16371 *
16372 * The @mq struct is used to get the queue ID of the queue to destroy.
16373 *
16374 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16375 * command fails this function will return -ENXIO.
04c68496 16376 **/
a2fc4aef 16377int
04c68496
JS
16378lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16379{
16380 LPFC_MBOXQ_t *mbox;
16381 int rc, length, status = 0;
16382 uint32_t shdr_status, shdr_add_status;
16383 union lpfc_sli4_cfg_shdr *shdr;
16384
2e90f4b5 16385 /* sanity check on queue memory */
04c68496
JS
16386 if (!mq)
16387 return -ENODEV;
16388 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16389 if (!mbox)
16390 return -ENOMEM;
16391 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16392 sizeof(struct lpfc_sli4_cfg_mhdr));
16393 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16394 LPFC_MBOX_OPCODE_MQ_DESTROY,
16395 length, LPFC_SLI4_MBX_EMBED);
16396 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16397 mq->queue_id);
16398 mbox->vport = mq->phba->pport;
16399 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16400 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16401 /* The IOCTL status is embedded in the mailbox subheader. */
16402 shdr = (union lpfc_sli4_cfg_shdr *)
16403 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16404 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16405 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16406 if (shdr_status || shdr_add_status || rc) {
16407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16408 "2507 MQ_DESTROY mailbox failed with "
16409 "status x%x add_status x%x, mbx status x%x\n",
16410 shdr_status, shdr_add_status, rc);
16411 status = -ENXIO;
16412 }
16413 /* Remove mq from any list */
16414 list_del_init(&mq->list);
8fa38513 16415 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
16416 return status;
16417}
16418
4f774513
JS
16419/**
16420 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16421 * @wq: The queue structure associated with the queue to destroy.
16422 *
16423 * This function destroys a queue, as detailed in @wq by sending an mailbox
16424 * command, specific to the type of queue, to the HBA.
16425 *
16426 * The @wq struct is used to get the queue ID of the queue to destroy.
16427 *
16428 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16429 * command fails this function will return -ENXIO.
4f774513 16430 **/
a2fc4aef 16431int
4f774513
JS
16432lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16433{
16434 LPFC_MBOXQ_t *mbox;
16435 int rc, length, status = 0;
16436 uint32_t shdr_status, shdr_add_status;
16437 union lpfc_sli4_cfg_shdr *shdr;
16438
2e90f4b5 16439 /* sanity check on queue memory */
4f774513
JS
16440 if (!wq)
16441 return -ENODEV;
16442 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16443 if (!mbox)
16444 return -ENOMEM;
16445 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16446 sizeof(struct lpfc_sli4_cfg_mhdr));
16447 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16448 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16449 length, LPFC_SLI4_MBX_EMBED);
16450 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16451 wq->queue_id);
16452 mbox->vport = wq->phba->pport;
16453 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16454 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16455 shdr = (union lpfc_sli4_cfg_shdr *)
16456 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16457 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16458 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16459 if (shdr_status || shdr_add_status || rc) {
16460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16461 "2508 WQ_DESTROY mailbox failed with "
16462 "status x%x add_status x%x, mbx status x%x\n",
16463 shdr_status, shdr_add_status, rc);
16464 status = -ENXIO;
16465 }
16466 /* Remove wq from any list */
16467 list_del_init(&wq->list);
d1f525aa
JS
16468 kfree(wq->pring);
16469 wq->pring = NULL;
8fa38513 16470 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
16471 return status;
16472}
16473
16474/**
16475 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16476 * @rq: The queue structure associated with the queue to destroy.
16477 *
16478 * This function destroys a queue, as detailed in @rq by sending an mailbox
16479 * command, specific to the type of queue, to the HBA.
16480 *
16481 * The @rq struct is used to get the queue ID of the queue to destroy.
16482 *
16483 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16484 * command fails this function will return -ENXIO.
4f774513 16485 **/
a2fc4aef 16486int
4f774513
JS
16487lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16488 struct lpfc_queue *drq)
16489{
16490 LPFC_MBOXQ_t *mbox;
16491 int rc, length, status = 0;
16492 uint32_t shdr_status, shdr_add_status;
16493 union lpfc_sli4_cfg_shdr *shdr;
16494
2e90f4b5 16495 /* sanity check on queue memory */
4f774513
JS
16496 if (!hrq || !drq)
16497 return -ENODEV;
16498 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16499 if (!mbox)
16500 return -ENOMEM;
16501 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 16502 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
16503 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16504 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16505 length, LPFC_SLI4_MBX_EMBED);
16506 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16507 hrq->queue_id);
16508 mbox->vport = hrq->phba->pport;
16509 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16510 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16511 /* The IOCTL status is embedded in the mailbox subheader. */
16512 shdr = (union lpfc_sli4_cfg_shdr *)
16513 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16514 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16515 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16516 if (shdr_status || shdr_add_status || rc) {
16517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16518 "2509 RQ_DESTROY mailbox failed with "
16519 "status x%x add_status x%x, mbx status x%x\n",
16520 shdr_status, shdr_add_status, rc);
16521 if (rc != MBX_TIMEOUT)
16522 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16523 return -ENXIO;
16524 }
16525 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16526 drq->queue_id);
16527 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16528 shdr = (union lpfc_sli4_cfg_shdr *)
16529 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16530 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16531 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16532 if (shdr_status || shdr_add_status || rc) {
16533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16534 "2510 RQ_DESTROY mailbox failed with "
16535 "status x%x add_status x%x, mbx status x%x\n",
16536 shdr_status, shdr_add_status, rc);
16537 status = -ENXIO;
16538 }
16539 list_del_init(&hrq->list);
16540 list_del_init(&drq->list);
8fa38513 16541 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
16542 return status;
16543}
16544
16545/**
16546 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16547 * @phba: The virtual port for which this call being executed.
16548 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16549 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16550 * @xritag: the xritag that ties this io to the SGL pages.
16551 *
16552 * This routine will post the sgl pages for the IO that has the xritag
16553 * that is in the iocbq structure. The xritag is assigned during iocbq
16554 * creation and persists for as long as the driver is loaded.
16555 * if the caller has fewer than 256 scatter gather segments to map then
16556 * pdma_phys_addr1 should be 0.
16557 * If the caller needs to map more than 256 scatter gather segment then
16558 * pdma_phys_addr1 should be a valid physical address.
16559 * physical address for SGLs must be 64 byte aligned.
16560 * If you are going to map 2 SGL's then the first one must have 256 entries
16561 * the second sgl can have between 1 and 256 entries.
16562 *
16563 * Return codes:
16564 * 0 - Success
16565 * -ENXIO, -ENOMEM - Failure
16566 **/
16567int
16568lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16569 dma_addr_t pdma_phys_addr0,
16570 dma_addr_t pdma_phys_addr1,
16571 uint16_t xritag)
16572{
16573 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16574 LPFC_MBOXQ_t *mbox;
16575 int rc;
16576 uint32_t shdr_status, shdr_add_status;
6d368e53 16577 uint32_t mbox_tmo;
4f774513
JS
16578 union lpfc_sli4_cfg_shdr *shdr;
16579
16580 if (xritag == NO_XRI) {
16581 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16582 "0364 Invalid param:\n");
16583 return -EINVAL;
16584 }
16585
16586 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16587 if (!mbox)
16588 return -ENOMEM;
16589
16590 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16591 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16592 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 16593 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
16594
16595 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16596 &mbox->u.mqe.un.post_sgl_pages;
16597 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16598 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16599
16600 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16601 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16602 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16603 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16604
16605 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16606 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16607 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16608 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16609 if (!phba->sli4_hba.intr_enable)
16610 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 16611 else {
a183a15f 16612 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
16613 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16614 }
4f774513
JS
16615 /* The IOCTL status is embedded in the mailbox subheader. */
16616 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16617 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16618 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16619 if (rc != MBX_TIMEOUT)
16620 mempool_free(mbox, phba->mbox_mem_pool);
16621 if (shdr_status || shdr_add_status || rc) {
16622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16623 "2511 POST_SGL mailbox failed with "
16624 "status x%x add_status x%x, mbx status x%x\n",
16625 shdr_status, shdr_add_status, rc);
4f774513
JS
16626 }
16627 return 0;
16628}
4f774513 16629
6d368e53 16630/**
88a2cfbb 16631 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
16632 * @phba: pointer to lpfc hba data structure.
16633 *
16634 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
16635 * HBA consistent with the SLI-4 interface spec. This routine
16636 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16637 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 16638 *
88a2cfbb
JS
16639 * Returns
16640 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16641 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16642 **/
5d8b8167 16643static uint16_t
6d368e53
JS
16644lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16645{
16646 unsigned long xri;
16647
16648 /*
16649 * Fetch the next logical xri. Because this index is logical,
16650 * the driver starts at 0 each time.
16651 */
16652 spin_lock_irq(&phba->hbalock);
16653 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16654 phba->sli4_hba.max_cfg_param.max_xri, 0);
16655 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16656 spin_unlock_irq(&phba->hbalock);
16657 return NO_XRI;
16658 } else {
16659 set_bit(xri, phba->sli4_hba.xri_bmask);
16660 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 16661 }
6d368e53
JS
16662 spin_unlock_irq(&phba->hbalock);
16663 return xri;
16664}
16665
16666/**
16667 * lpfc_sli4_free_xri - Release an xri for reuse.
16668 * @phba: pointer to lpfc hba data structure.
16669 *
16670 * This routine is invoked to release an xri to the pool of
16671 * available rpis maintained by the driver.
16672 **/
5d8b8167 16673static void
6d368e53
JS
16674__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16675{
16676 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
16677 phba->sli4_hba.max_cfg_param.xri_used--;
16678 }
16679}
16680
16681/**
16682 * lpfc_sli4_free_xri - Release an xri for reuse.
16683 * @phba: pointer to lpfc hba data structure.
16684 *
16685 * This routine is invoked to release an xri to the pool of
16686 * available rpis maintained by the driver.
16687 **/
16688void
16689lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16690{
16691 spin_lock_irq(&phba->hbalock);
16692 __lpfc_sli4_free_xri(phba, xri);
16693 spin_unlock_irq(&phba->hbalock);
16694}
16695
4f774513
JS
16696/**
16697 * lpfc_sli4_next_xritag - Get an xritag for the io
16698 * @phba: Pointer to HBA context object.
16699 *
16700 * This function gets an xritag for the iocb. If there is no unused xritag
16701 * it will return 0xffff.
16702 * The function returns the allocated xritag if successful, else returns zero.
16703 * Zero is not a valid xritag.
16704 * The caller is not required to hold any lock.
16705 **/
16706uint16_t
16707lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16708{
6d368e53 16709 uint16_t xri_index;
4f774513 16710
6d368e53 16711 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
16712 if (xri_index == NO_XRI)
16713 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16714 "2004 Failed to allocate XRI.last XRITAG is %d"
16715 " Max XRI is %d, Used XRI is %d\n",
16716 xri_index,
16717 phba->sli4_hba.max_cfg_param.max_xri,
16718 phba->sli4_hba.max_cfg_param.xri_used);
16719 return xri_index;
4f774513
JS
16720}
16721
16722/**
895427bd 16723 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 16724 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
16725 * @post_sgl_list: pointer to els sgl entry list.
16726 * @count: number of els sgl entries on the list.
4f774513
JS
16727 *
16728 * This routine is invoked to post a block of driver's sgl pages to the
16729 * HBA using non-embedded mailbox command. No Lock is held. This routine
16730 * is only called when the driver is loading and after all IO has been
16731 * stopped.
16732 **/
8a9d2e80 16733static int
895427bd 16734lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
16735 struct list_head *post_sgl_list,
16736 int post_cnt)
4f774513 16737{
8a9d2e80 16738 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
16739 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16740 struct sgl_page_pairs *sgl_pg_pairs;
16741 void *viraddr;
16742 LPFC_MBOXQ_t *mbox;
16743 uint32_t reqlen, alloclen, pg_pairs;
16744 uint32_t mbox_tmo;
8a9d2e80
JS
16745 uint16_t xritag_start = 0;
16746 int rc = 0;
4f774513
JS
16747 uint32_t shdr_status, shdr_add_status;
16748 union lpfc_sli4_cfg_shdr *shdr;
16749
895427bd 16750 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 16751 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16752 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 16753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
16754 "2559 Block sgl registration required DMA "
16755 "size (%d) great than a page\n", reqlen);
16756 return -ENOMEM;
16757 }
895427bd 16758
4f774513 16759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 16760 if (!mbox)
4f774513 16761 return -ENOMEM;
4f774513
JS
16762
16763 /* Allocate DMA memory and set up the non-embedded mailbox command */
16764 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16765 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16766 LPFC_SLI4_MBX_NEMBED);
16767
16768 if (alloclen < reqlen) {
16769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16770 "0285 Allocated DMA memory size (%d) is "
16771 "less than the requested DMA memory "
16772 "size (%d)\n", alloclen, reqlen);
16773 lpfc_sli4_mbox_cmd_free(phba, mbox);
16774 return -ENOMEM;
16775 }
4f774513 16776 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 16777 viraddr = mbox->sge_array->addr[0];
4f774513
JS
16778 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16779 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16780
8a9d2e80
JS
16781 pg_pairs = 0;
16782 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
16783 /* Set up the sge entry */
16784 sgl_pg_pairs->sgl_pg0_addr_lo =
16785 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16786 sgl_pg_pairs->sgl_pg0_addr_hi =
16787 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16788 sgl_pg_pairs->sgl_pg1_addr_lo =
16789 cpu_to_le32(putPaddrLow(0));
16790 sgl_pg_pairs->sgl_pg1_addr_hi =
16791 cpu_to_le32(putPaddrHigh(0));
6d368e53 16792
4f774513
JS
16793 /* Keep the first xritag on the list */
16794 if (pg_pairs == 0)
16795 xritag_start = sglq_entry->sli4_xritag;
16796 sgl_pg_pairs++;
8a9d2e80 16797 pg_pairs++;
4f774513 16798 }
6d368e53
JS
16799
16800 /* Complete initialization and perform endian conversion. */
4f774513 16801 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 16802 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 16803 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 16804
4f774513
JS
16805 if (!phba->sli4_hba.intr_enable)
16806 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16807 else {
a183a15f 16808 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16809 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16810 }
16811 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16812 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16813 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16814 if (rc != MBX_TIMEOUT)
16815 lpfc_sli4_mbox_cmd_free(phba, mbox);
16816 if (shdr_status || shdr_add_status || rc) {
16817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16818 "2513 POST_SGL_BLOCK mailbox command failed "
16819 "status x%x add_status x%x mbx status x%x\n",
16820 shdr_status, shdr_add_status, rc);
16821 rc = -ENXIO;
16822 }
16823 return rc;
16824}
16825
16826/**
16827 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16828 * @phba: pointer to lpfc hba data structure.
16829 * @sblist: pointer to scsi buffer list.
16830 * @count: number of scsi buffers on the list.
16831 *
16832 * This routine is invoked to post a block of @count scsi sgl pages from a
16833 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16834 * No Lock is held.
16835 *
16836 **/
16837int
8a9d2e80
JS
16838lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16839 struct list_head *sblist,
16840 int count)
4f774513
JS
16841{
16842 struct lpfc_scsi_buf *psb;
16843 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16844 struct sgl_page_pairs *sgl_pg_pairs;
16845 void *viraddr;
16846 LPFC_MBOXQ_t *mbox;
16847 uint32_t reqlen, alloclen, pg_pairs;
16848 uint32_t mbox_tmo;
16849 uint16_t xritag_start = 0;
16850 int rc = 0;
16851 uint32_t shdr_status, shdr_add_status;
16852 dma_addr_t pdma_phys_bpl1;
16853 union lpfc_sli4_cfg_shdr *shdr;
16854
16855 /* Calculate the requested length of the dma memory */
8a9d2e80 16856 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 16857 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16858 if (reqlen > SLI4_PAGE_SIZE) {
4f774513
JS
16859 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16860 "0217 Block sgl registration required DMA "
16861 "size (%d) great than a page\n", reqlen);
16862 return -ENOMEM;
16863 }
16864 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16865 if (!mbox) {
16866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16867 "0283 Failed to allocate mbox cmd memory\n");
16868 return -ENOMEM;
16869 }
16870
16871 /* Allocate DMA memory and set up the non-embedded mailbox command */
16872 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16873 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16874 LPFC_SLI4_MBX_NEMBED);
16875
16876 if (alloclen < reqlen) {
16877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16878 "2561 Allocated DMA memory size (%d) is "
16879 "less than the requested DMA memory "
16880 "size (%d)\n", alloclen, reqlen);
16881 lpfc_sli4_mbox_cmd_free(phba, mbox);
16882 return -ENOMEM;
16883 }
6d368e53 16884
4f774513 16885 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
16886 viraddr = mbox->sge_array->addr[0];
16887
16888 /* Set up the SGL pages in the non-embedded DMA pages */
16889 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16890 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16891
16892 pg_pairs = 0;
16893 list_for_each_entry(psb, sblist, list) {
16894 /* Set up the sge entry */
16895 sgl_pg_pairs->sgl_pg0_addr_lo =
16896 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16897 sgl_pg_pairs->sgl_pg0_addr_hi =
16898 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16899 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16900 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16901 else
16902 pdma_phys_bpl1 = 0;
16903 sgl_pg_pairs->sgl_pg1_addr_lo =
16904 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16905 sgl_pg_pairs->sgl_pg1_addr_hi =
16906 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16907 /* Keep the first xritag on the list */
16908 if (pg_pairs == 0)
16909 xritag_start = psb->cur_iocbq.sli4_xritag;
16910 sgl_pg_pairs++;
16911 pg_pairs++;
16912 }
16913 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16914 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16915 /* Perform endian conversion if necessary */
16916 sgl->word0 = cpu_to_le32(sgl->word0);
16917
16918 if (!phba->sli4_hba.intr_enable)
16919 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16920 else {
a183a15f 16921 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16922 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16923 }
16924 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16925 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16926 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16927 if (rc != MBX_TIMEOUT)
16928 lpfc_sli4_mbox_cmd_free(phba, mbox);
16929 if (shdr_status || shdr_add_status || rc) {
16930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16931 "2564 POST_SGL_BLOCK mailbox command failed "
16932 "status x%x add_status x%x mbx status x%x\n",
16933 shdr_status, shdr_add_status, rc);
16934 rc = -ENXIO;
16935 }
16936 return rc;
16937}
16938
16939/**
16940 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16941 * @phba: pointer to lpfc_hba struct that the frame was received on
16942 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16943 *
16944 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16945 * valid type of frame that the LPFC driver will handle. This function will
16946 * return a zero if the frame is a valid frame or a non zero value when the
16947 * frame does not pass the check.
16948 **/
16949static int
16950lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16951{
474ffb74 16952 /* make rctl_names static to save stack space */
4f774513 16953 struct fc_vft_header *fc_vft_hdr;
546fc854 16954 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
16955
16956 switch (fc_hdr->fh_r_ctl) {
16957 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16958 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16959 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16960 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16961 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16962 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16963 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16964 case FC_RCTL_DD_CMD_STATUS: /* command status */
16965 case FC_RCTL_ELS_REQ: /* extended link services request */
16966 case FC_RCTL_ELS_REP: /* extended link services reply */
16967 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16968 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16969 case FC_RCTL_BA_NOP: /* basic link service NOP */
16970 case FC_RCTL_BA_ABTS: /* basic link service abort */
16971 case FC_RCTL_BA_RMC: /* remove connection */
16972 case FC_RCTL_BA_ACC: /* basic accept */
16973 case FC_RCTL_BA_RJT: /* basic reject */
16974 case FC_RCTL_BA_PRMT:
16975 case FC_RCTL_ACK_1: /* acknowledge_1 */
16976 case FC_RCTL_ACK_0: /* acknowledge_0 */
16977 case FC_RCTL_P_RJT: /* port reject */
16978 case FC_RCTL_F_RJT: /* fabric reject */
16979 case FC_RCTL_P_BSY: /* port busy */
16980 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16981 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16982 case FC_RCTL_LCR: /* link credit reset */
ae9e28f3 16983 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
4f774513
JS
16984 case FC_RCTL_END: /* end */
16985 break;
16986 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16987 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16988 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16989 return lpfc_fc_frame_check(phba, fc_hdr);
16990 default:
16991 goto drop;
16992 }
ae9e28f3 16993
4f774513
JS
16994 switch (fc_hdr->fh_type) {
16995 case FC_TYPE_BLS:
16996 case FC_TYPE_ELS:
16997 case FC_TYPE_FCP:
16998 case FC_TYPE_CT:
895427bd 16999 case FC_TYPE_NVME:
4f774513
JS
17000 break;
17001 case FC_TYPE_IP:
17002 case FC_TYPE_ILS:
17003 default:
17004 goto drop;
17005 }
546fc854 17006
4f774513 17007 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
78e1d200 17008 "2538 Received frame rctl:x%x, type:x%x, "
88f43a08 17009 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
78e1d200
JS
17010 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17011 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17012 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17013 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17014 be32_to_cpu(header[6]));
4f774513
JS
17015 return 0;
17016drop:
17017 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
78e1d200
JS
17018 "2539 Dropped frame rctl:x%x type:x%x\n",
17019 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
4f774513
JS
17020 return 1;
17021}
17022
17023/**
17024 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17025 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17026 *
17027 * This function processes the FC header to retrieve the VFI from the VF
17028 * header, if one exists. This function will return the VFI if one exists
17029 * or 0 if no VSAN Header exists.
17030 **/
17031static uint32_t
17032lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17033{
17034 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17035
17036 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17037 return 0;
17038 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17039}
17040
17041/**
17042 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17043 * @phba: Pointer to the HBA structure to search for the vport on
17044 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17045 * @fcfi: The FC Fabric ID that the frame came from
17046 *
17047 * This function searches the @phba for a vport that matches the content of the
17048 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17049 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17050 * returns the matching vport pointer or NULL if unable to match frame to a
17051 * vport.
17052 **/
17053static struct lpfc_vport *
17054lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 17055 uint16_t fcfi, uint32_t did)
4f774513
JS
17056{
17057 struct lpfc_vport **vports;
17058 struct lpfc_vport *vport = NULL;
17059 int i;
939723a4 17060
bf08611b
JS
17061 if (did == Fabric_DID)
17062 return phba->pport;
939723a4
JS
17063 if ((phba->pport->fc_flag & FC_PT2PT) &&
17064 !(phba->link_state == LPFC_HBA_READY))
17065 return phba->pport;
17066
4f774513 17067 vports = lpfc_create_vport_work_array(phba);
895427bd 17068 if (vports != NULL) {
4f774513
JS
17069 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17070 if (phba->fcf.fcfi == fcfi &&
17071 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17072 vports[i]->fc_myDID == did) {
17073 vport = vports[i];
17074 break;
17075 }
17076 }
895427bd 17077 }
4f774513
JS
17078 lpfc_destroy_vport_work_array(phba, vports);
17079 return vport;
17080}
17081
45ed1190
JS
17082/**
17083 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17084 * @vport: The vport to work on.
17085 *
17086 * This function updates the receive sequence time stamp for this vport. The
17087 * receive sequence time stamp indicates the time that the last frame of the
17088 * the sequence that has been idle for the longest amount of time was received.
17089 * the driver uses this time stamp to indicate if any received sequences have
17090 * timed out.
17091 **/
5d8b8167 17092static void
45ed1190
JS
17093lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17094{
17095 struct lpfc_dmabuf *h_buf;
17096 struct hbq_dmabuf *dmabuf = NULL;
17097
17098 /* get the oldest sequence on the rcv list */
17099 h_buf = list_get_first(&vport->rcv_buffer_list,
17100 struct lpfc_dmabuf, list);
17101 if (!h_buf)
17102 return;
17103 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17104 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17105}
17106
17107/**
17108 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17109 * @vport: The vport that the received sequences were sent to.
17110 *
17111 * This function cleans up all outstanding received sequences. This is called
17112 * by the driver when a link event or user action invalidates all the received
17113 * sequences.
17114 **/
17115void
17116lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17117{
17118 struct lpfc_dmabuf *h_buf, *hnext;
17119 struct lpfc_dmabuf *d_buf, *dnext;
17120 struct hbq_dmabuf *dmabuf = NULL;
17121
17122 /* start with the oldest sequence on the rcv list */
17123 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17124 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17125 list_del_init(&dmabuf->hbuf.list);
17126 list_for_each_entry_safe(d_buf, dnext,
17127 &dmabuf->dbuf.list, list) {
17128 list_del_init(&d_buf->list);
17129 lpfc_in_buf_free(vport->phba, d_buf);
17130 }
17131 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17132 }
17133}
17134
17135/**
17136 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17137 * @vport: The vport that the received sequences were sent to.
17138 *
17139 * This function determines whether any received sequences have timed out by
17140 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17141 * indicates that there is at least one timed out sequence this routine will
17142 * go through the received sequences one at a time from most inactive to most
17143 * active to determine which ones need to be cleaned up. Once it has determined
17144 * that a sequence needs to be cleaned up it will simply free up the resources
17145 * without sending an abort.
17146 **/
17147void
17148lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17149{
17150 struct lpfc_dmabuf *h_buf, *hnext;
17151 struct lpfc_dmabuf *d_buf, *dnext;
17152 struct hbq_dmabuf *dmabuf = NULL;
17153 unsigned long timeout;
17154 int abort_count = 0;
17155
17156 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17157 vport->rcv_buffer_time_stamp);
17158 if (list_empty(&vport->rcv_buffer_list) ||
17159 time_before(jiffies, timeout))
17160 return;
17161 /* start with the oldest sequence on the rcv list */
17162 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17163 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17164 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17165 dmabuf->time_stamp);
17166 if (time_before(jiffies, timeout))
17167 break;
17168 abort_count++;
17169 list_del_init(&dmabuf->hbuf.list);
17170 list_for_each_entry_safe(d_buf, dnext,
17171 &dmabuf->dbuf.list, list) {
17172 list_del_init(&d_buf->list);
17173 lpfc_in_buf_free(vport->phba, d_buf);
17174 }
17175 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17176 }
17177 if (abort_count)
17178 lpfc_update_rcv_time_stamp(vport);
17179}
17180
4f774513
JS
17181/**
17182 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17183 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17184 *
17185 * This function searches through the existing incomplete sequences that have
17186 * been sent to this @vport. If the frame matches one of the incomplete
17187 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17188 * make up that sequence. If no sequence is found that matches this frame then
17189 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17190 * This function returns a pointer to the first dmabuf in the sequence list that
17191 * the frame was linked to.
17192 **/
17193static struct hbq_dmabuf *
17194lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17195{
17196 struct fc_frame_header *new_hdr;
17197 struct fc_frame_header *temp_hdr;
17198 struct lpfc_dmabuf *d_buf;
17199 struct lpfc_dmabuf *h_buf;
17200 struct hbq_dmabuf *seq_dmabuf = NULL;
17201 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 17202 uint8_t found = 0;
4f774513 17203
4d9ab994 17204 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 17205 dmabuf->time_stamp = jiffies;
4f774513 17206 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 17207
4f774513
JS
17208 /* Use the hdr_buf to find the sequence that this frame belongs to */
17209 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17210 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17211 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17212 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17213 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17214 continue;
17215 /* found a pending sequence that matches this frame */
17216 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17217 break;
17218 }
17219 if (!seq_dmabuf) {
17220 /*
17221 * This indicates first frame received for this sequence.
17222 * Queue the buffer on the vport's rcv_buffer_list.
17223 */
17224 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 17225 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17226 return dmabuf;
17227 }
17228 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
17229 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17230 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
17231 list_del_init(&seq_dmabuf->hbuf.list);
17232 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17233 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 17234 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17235 return dmabuf;
17236 }
45ed1190
JS
17237 /* move this sequence to the tail to indicate a young sequence */
17238 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17239 seq_dmabuf->time_stamp = jiffies;
17240 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
17241 if (list_empty(&seq_dmabuf->dbuf.list)) {
17242 temp_hdr = dmabuf->hbuf.virt;
17243 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17244 return seq_dmabuf;
17245 }
4f774513 17246 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
17247 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17248 while (!found) {
4f774513
JS
17249 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17250 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17251 /*
17252 * If the frame's sequence count is greater than the frame on
17253 * the list then insert the frame right after this frame
17254 */
eeead811
JS
17255 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17256 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 17257 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
17258 found = 1;
17259 break;
4f774513 17260 }
4360ca9c
JS
17261
17262 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17263 break;
17264 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 17265 }
4360ca9c
JS
17266
17267 if (found)
17268 return seq_dmabuf;
4f774513
JS
17269 return NULL;
17270}
17271
6669f9bb
JS
17272/**
17273 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17274 * @vport: pointer to a vitural port
17275 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17276 *
17277 * This function tries to abort from the partially assembed sequence, described
17278 * by the information from basic abbort @dmabuf. It checks to see whether such
17279 * partially assembled sequence held by the driver. If so, it shall free up all
17280 * the frames from the partially assembled sequence.
17281 *
17282 * Return
17283 * true -- if there is matching partially assembled sequence present and all
17284 * the frames freed with the sequence;
17285 * false -- if there is no matching partially assembled sequence present so
17286 * nothing got aborted in the lower layer driver
17287 **/
17288static bool
17289lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17290 struct hbq_dmabuf *dmabuf)
17291{
17292 struct fc_frame_header *new_hdr;
17293 struct fc_frame_header *temp_hdr;
17294 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17295 struct hbq_dmabuf *seq_dmabuf = NULL;
17296
17297 /* Use the hdr_buf to find the sequence that matches this frame */
17298 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17299 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17300 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17301 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17302 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17303 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17304 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17305 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17306 continue;
17307 /* found a pending sequence that matches this frame */
17308 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17309 break;
17310 }
17311
17312 /* Free up all the frames from the partially assembled sequence */
17313 if (seq_dmabuf) {
17314 list_for_each_entry_safe(d_buf, n_buf,
17315 &seq_dmabuf->dbuf.list, list) {
17316 list_del_init(&d_buf->list);
17317 lpfc_in_buf_free(vport->phba, d_buf);
17318 }
17319 return true;
17320 }
17321 return false;
17322}
17323
6dd9e31c
JS
17324/**
17325 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17326 * @vport: pointer to a vitural port
17327 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17328 *
17329 * This function tries to abort from the assembed sequence from upper level
17330 * protocol, described by the information from basic abbort @dmabuf. It
17331 * checks to see whether such pending context exists at upper level protocol.
17332 * If so, it shall clean up the pending context.
17333 *
17334 * Return
17335 * true -- if there is matching pending context of the sequence cleaned
17336 * at ulp;
17337 * false -- if there is no matching pending context of the sequence present
17338 * at ulp.
17339 **/
17340static bool
17341lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17342{
17343 struct lpfc_hba *phba = vport->phba;
17344 int handled;
17345
17346 /* Accepting abort at ulp with SLI4 only */
17347 if (phba->sli_rev < LPFC_SLI_REV4)
17348 return false;
17349
17350 /* Register all caring upper level protocols to attend abort */
17351 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17352 if (handled)
17353 return true;
17354
17355 return false;
17356}
17357
6669f9bb 17358/**
546fc854 17359 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
17360 * @phba: Pointer to HBA context object.
17361 * @cmd_iocbq: pointer to the command iocbq structure.
17362 * @rsp_iocbq: pointer to the response iocbq structure.
17363 *
546fc854 17364 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
17365 * event. It properly releases the memory allocated to the sequence abort
17366 * accept iocb.
17367 **/
17368static void
546fc854 17369lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
17370 struct lpfc_iocbq *cmd_iocbq,
17371 struct lpfc_iocbq *rsp_iocbq)
17372{
6dd9e31c
JS
17373 struct lpfc_nodelist *ndlp;
17374
17375 if (cmd_iocbq) {
17376 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17377 lpfc_nlp_put(ndlp);
17378 lpfc_nlp_not_used(ndlp);
6669f9bb 17379 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 17380 }
6b5151fd
JS
17381
17382 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17383 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17384 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17385 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17386 rsp_iocbq->iocb.ulpStatus,
17387 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
17388}
17389
6d368e53
JS
17390/**
17391 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17392 * @phba: Pointer to HBA context object.
17393 * @xri: xri id in transaction.
17394 *
17395 * This function validates the xri maps to the known range of XRIs allocated an
17396 * used by the driver.
17397 **/
7851fe2c 17398uint16_t
6d368e53
JS
17399lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17400 uint16_t xri)
17401{
a2fc4aef 17402 uint16_t i;
6d368e53
JS
17403
17404 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17405 if (xri == phba->sli4_hba.xri_ids[i])
17406 return i;
17407 }
17408 return NO_XRI;
17409}
17410
6669f9bb 17411/**
546fc854 17412 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
17413 * @phba: Pointer to HBA context object.
17414 * @fc_hdr: pointer to a FC frame header.
17415 *
546fc854 17416 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
17417 * event after aborting the sequence handling.
17418 **/
86c67379 17419void
6dd9e31c
JS
17420lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17421 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 17422{
6dd9e31c 17423 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
17424 struct lpfc_iocbq *ctiocb = NULL;
17425 struct lpfc_nodelist *ndlp;
ee0f4fe1 17426 uint16_t oxid, rxid, xri, lxri;
5ffc266e 17427 uint32_t sid, fctl;
6669f9bb 17428 IOCB_t *icmd;
546fc854 17429 int rc;
6669f9bb
JS
17430
17431 if (!lpfc_is_link_up(phba))
17432 return;
17433
17434 sid = sli4_sid_from_fc_hdr(fc_hdr);
17435 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 17436 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 17437
6dd9e31c 17438 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 17439 if (!ndlp) {
9d3d340d 17440 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
17441 if (!ndlp) {
17442 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17443 "1268 Failed to allocate ndlp for "
17444 "oxid:x%x SID:x%x\n", oxid, sid);
17445 return;
17446 }
6dd9e31c
JS
17447 /* Put ndlp onto pport node list */
17448 lpfc_enqueue_node(vport, ndlp);
17449 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17450 /* re-setup ndlp without removing from node list */
17451 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17452 if (!ndlp) {
17453 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17454 "3275 Failed to active ndlp found "
17455 "for oxid:x%x SID:x%x\n", oxid, sid);
17456 return;
17457 }
6669f9bb
JS
17458 }
17459
546fc854 17460 /* Allocate buffer for rsp iocb */
6669f9bb
JS
17461 ctiocb = lpfc_sli_get_iocbq(phba);
17462 if (!ctiocb)
17463 return;
17464
5ffc266e
JS
17465 /* Extract the F_CTL field from FC_HDR */
17466 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17467
6669f9bb 17468 icmd = &ctiocb->iocb;
6669f9bb 17469 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 17470 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
17471 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17472 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17473 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17474
17475 /* Fill in the rest of iocb fields */
17476 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17477 icmd->ulpBdeCount = 0;
17478 icmd->ulpLe = 1;
17479 icmd->ulpClass = CLASS3;
6d368e53 17480 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 17481 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 17482
6669f9bb
JS
17483 ctiocb->iocb_cmpl = NULL;
17484 ctiocb->vport = phba->pport;
546fc854 17485 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 17486 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
17487 ctiocb->sli4_xritag = NO_XRI;
17488
ee0f4fe1
JS
17489 if (fctl & FC_FC_EX_CTX)
17490 /* Exchange responder sent the abort so we
17491 * own the oxid.
17492 */
17493 xri = oxid;
17494 else
17495 xri = rxid;
17496 lxri = lpfc_sli4_xri_inrange(phba, xri);
17497 if (lxri != NO_XRI)
17498 lpfc_set_rrq_active(phba, ndlp, lxri,
17499 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
17500 /* For BA_ABTS from exchange responder, if the logical xri with
17501 * the oxid maps to the FCP XRI range, the port no longer has
17502 * that exchange context, send a BLS_RJT. Override the IOCB for
17503 * a BA_RJT.
17504 */
17505 if ((fctl & FC_FC_EX_CTX) &&
895427bd 17506 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
17507 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17508 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17509 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17510 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17511 }
17512
17513 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17514 * the driver no longer has that exchange, send a BLS_RJT. Override
17515 * the IOCB for a BA_RJT.
546fc854 17516 */
6dd9e31c 17517 if (aborted == false) {
546fc854
JS
17518 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17519 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17520 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17521 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17522 }
6669f9bb 17523
5ffc266e
JS
17524 if (fctl & FC_FC_EX_CTX) {
17525 /* ABTS sent by responder to CT exchange, construction
17526 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17527 * field and RX_ID from ABTS for RX_ID field.
17528 */
546fc854 17529 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
17530 } else {
17531 /* ABTS sent by initiator to CT exchange, construction
17532 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 17533 * XRI_TAG field.
5ffc266e 17534 */
546fc854 17535 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 17536 }
f09c3acc 17537 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 17538 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 17539
546fc854 17540 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
17541 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17542 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17543 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
17544
17545 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17546 if (rc == IOCB_ERROR) {
6dd9e31c
JS
17547 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17548 "2925 Failed to issue CT ABTS RSP x%x on "
17549 "xri x%x, Data x%x\n",
17550 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17551 phba->link_state);
17552 lpfc_nlp_put(ndlp);
17553 ctiocb->context1 = NULL;
546fc854
JS
17554 lpfc_sli_release_iocbq(phba, ctiocb);
17555 }
6669f9bb
JS
17556}
17557
17558/**
17559 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17560 * @vport: Pointer to the vport on which this sequence was received
17561 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17562 *
17563 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17564 * receive sequence is only partially assembed by the driver, it shall abort
17565 * the partially assembled frames for the sequence. Otherwise, if the
17566 * unsolicited receive sequence has been completely assembled and passed to
17567 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17568 * unsolicited sequence has been aborted. After that, it will issue a basic
17569 * accept to accept the abort.
17570 **/
5d8b8167 17571static void
6669f9bb
JS
17572lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17573 struct hbq_dmabuf *dmabuf)
17574{
17575 struct lpfc_hba *phba = vport->phba;
17576 struct fc_frame_header fc_hdr;
5ffc266e 17577 uint32_t fctl;
6dd9e31c 17578 bool aborted;
6669f9bb 17579
6669f9bb
JS
17580 /* Make a copy of fc_hdr before the dmabuf being released */
17581 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 17582 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 17583
5ffc266e 17584 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
17585 /* ABTS by responder to exchange, no cleanup needed */
17586 aborted = true;
5ffc266e 17587 } else {
6dd9e31c
JS
17588 /* ABTS by initiator to exchange, need to do cleanup */
17589 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17590 if (aborted == false)
17591 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 17592 }
6dd9e31c
JS
17593 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17594
86c67379
JS
17595 if (phba->nvmet_support) {
17596 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17597 return;
17598 }
17599
6dd9e31c
JS
17600 /* Respond with BA_ACC or BA_RJT accordingly */
17601 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
17602}
17603
4f774513
JS
17604/**
17605 * lpfc_seq_complete - Indicates if a sequence is complete
17606 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17607 *
17608 * This function checks the sequence, starting with the frame described by
17609 * @dmabuf, to see if all the frames associated with this sequence are present.
17610 * the frames associated with this sequence are linked to the @dmabuf using the
17611 * dbuf list. This function looks for two major things. 1) That the first frame
17612 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17613 * set. 3) That there are no holes in the sequence count. The function will
17614 * return 1 when the sequence is complete, otherwise it will return 0.
17615 **/
17616static int
17617lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17618{
17619 struct fc_frame_header *hdr;
17620 struct lpfc_dmabuf *d_buf;
17621 struct hbq_dmabuf *seq_dmabuf;
17622 uint32_t fctl;
17623 int seq_count = 0;
17624
17625 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17626 /* make sure first fame of sequence has a sequence count of zero */
17627 if (hdr->fh_seq_cnt != seq_count)
17628 return 0;
17629 fctl = (hdr->fh_f_ctl[0] << 16 |
17630 hdr->fh_f_ctl[1] << 8 |
17631 hdr->fh_f_ctl[2]);
17632 /* If last frame of sequence we can return success. */
17633 if (fctl & FC_FC_END_SEQ)
17634 return 1;
17635 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17636 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17637 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17638 /* If there is a hole in the sequence count then fail. */
eeead811 17639 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
17640 return 0;
17641 fctl = (hdr->fh_f_ctl[0] << 16 |
17642 hdr->fh_f_ctl[1] << 8 |
17643 hdr->fh_f_ctl[2]);
17644 /* If last frame of sequence we can return success. */
17645 if (fctl & FC_FC_END_SEQ)
17646 return 1;
17647 }
17648 return 0;
17649}
17650
17651/**
17652 * lpfc_prep_seq - Prep sequence for ULP processing
17653 * @vport: Pointer to the vport on which this sequence was received
17654 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17655 *
17656 * This function takes a sequence, described by a list of frames, and creates
17657 * a list of iocbq structures to describe the sequence. This iocbq list will be
17658 * used to issue to the generic unsolicited sequence handler. This routine
17659 * returns a pointer to the first iocbq in the list. If the function is unable
17660 * to allocate an iocbq then it throw out the received frames that were not
17661 * able to be described and return a pointer to the first iocbq. If unable to
17662 * allocate any iocbqs (including the first) this function will return NULL.
17663 **/
17664static struct lpfc_iocbq *
17665lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17666{
7851fe2c 17667 struct hbq_dmabuf *hbq_buf;
4f774513
JS
17668 struct lpfc_dmabuf *d_buf, *n_buf;
17669 struct lpfc_iocbq *first_iocbq, *iocbq;
17670 struct fc_frame_header *fc_hdr;
17671 uint32_t sid;
7851fe2c 17672 uint32_t len, tot_len;
eeead811 17673 struct ulp_bde64 *pbde;
4f774513
JS
17674
17675 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17676 /* remove from receive buffer list */
17677 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 17678 lpfc_update_rcv_time_stamp(vport);
4f774513 17679 /* get the Remote Port's SID */
6669f9bb 17680 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 17681 tot_len = 0;
4f774513
JS
17682 /* Get an iocbq struct to fill in. */
17683 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17684 if (first_iocbq) {
17685 /* Initialize the first IOCB. */
8fa38513 17686 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 17687 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 17688 first_iocbq->vport = vport;
939723a4
JS
17689
17690 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17691 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17692 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17693 first_iocbq->iocb.un.rcvels.parmRo =
17694 sli4_did_from_fc_hdr(fc_hdr);
17695 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17696 } else
17697 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
17698 first_iocbq->iocb.ulpContext = NO_XRI;
17699 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17700 be16_to_cpu(fc_hdr->fh_ox_id);
17701 /* iocbq is prepped for internal consumption. Physical vpi. */
17702 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17703 vport->phba->vpi_ids[vport->vpi];
4f774513 17704 /* put the first buffer into the first IOCBq */
48a5a664
JS
17705 tot_len = bf_get(lpfc_rcqe_length,
17706 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17707
4f774513
JS
17708 first_iocbq->context2 = &seq_dmabuf->dbuf;
17709 first_iocbq->context3 = NULL;
17710 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17711 if (tot_len > LPFC_DATA_BUF_SIZE)
17712 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17713 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17714 else
17715 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17716
4f774513 17717 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 17718
7851fe2c 17719 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
17720 }
17721 iocbq = first_iocbq;
17722 /*
17723 * Each IOCBq can have two Buffers assigned, so go through the list
17724 * of buffers for this sequence and save two buffers in each IOCBq
17725 */
17726 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17727 if (!iocbq) {
17728 lpfc_in_buf_free(vport->phba, d_buf);
17729 continue;
17730 }
17731 if (!iocbq->context3) {
17732 iocbq->context3 = d_buf;
17733 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
17734 /* We need to get the size out of the right CQE */
17735 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17736 len = bf_get(lpfc_rcqe_length,
17737 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
17738 pbde = (struct ulp_bde64 *)
17739 &iocbq->iocb.unsli3.sli3Words[4];
17740 if (len > LPFC_DATA_BUF_SIZE)
17741 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17742 else
17743 pbde->tus.f.bdeSize = len;
17744
7851fe2c
JS
17745 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17746 tot_len += len;
4f774513
JS
17747 } else {
17748 iocbq = lpfc_sli_get_iocbq(vport->phba);
17749 if (!iocbq) {
17750 if (first_iocbq) {
17751 first_iocbq->iocb.ulpStatus =
17752 IOSTAT_FCP_RSP_ERROR;
17753 first_iocbq->iocb.un.ulpWord[4] =
17754 IOERR_NO_RESOURCES;
17755 }
17756 lpfc_in_buf_free(vport->phba, d_buf);
17757 continue;
17758 }
48a5a664
JS
17759 /* We need to get the size out of the right CQE */
17760 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17761 len = bf_get(lpfc_rcqe_length,
17762 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
17763 iocbq->context2 = d_buf;
17764 iocbq->context3 = NULL;
17765 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17766 if (len > LPFC_DATA_BUF_SIZE)
17767 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17768 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17769 else
17770 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 17771
7851fe2c
JS
17772 tot_len += len;
17773 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17774
4f774513
JS
17775 iocbq->iocb.un.rcvels.remoteID = sid;
17776 list_add_tail(&iocbq->list, &first_iocbq->list);
17777 }
17778 }
17779 return first_iocbq;
17780}
17781
6669f9bb
JS
17782static void
17783lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17784 struct hbq_dmabuf *seq_dmabuf)
17785{
17786 struct fc_frame_header *fc_hdr;
17787 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17788 struct lpfc_hba *phba = vport->phba;
17789
17790 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17791 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17792 if (!iocbq) {
17793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17794 "2707 Ring %d handler: Failed to allocate "
17795 "iocb Rctl x%x Type x%x received\n",
17796 LPFC_ELS_RING,
17797 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17798 return;
17799 }
17800 if (!lpfc_complete_unsol_iocb(phba,
895427bd 17801 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
17802 iocbq, fc_hdr->fh_r_ctl,
17803 fc_hdr->fh_type))
6d368e53 17804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
17805 "2540 Ring %d handler: unexpected Rctl "
17806 "x%x Type x%x received\n",
17807 LPFC_ELS_RING,
17808 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17809
17810 /* Free iocb created in lpfc_prep_seq */
17811 list_for_each_entry_safe(curr_iocb, next_iocb,
17812 &iocbq->list, list) {
17813 list_del_init(&curr_iocb->list);
17814 lpfc_sli_release_iocbq(phba, curr_iocb);
17815 }
17816 lpfc_sli_release_iocbq(phba, iocbq);
17817}
17818
ae9e28f3
JS
17819static void
17820lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17821 struct lpfc_iocbq *rspiocb)
17822{
17823 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17824
17825 if (pcmd && pcmd->virt)
771db5c0 17826 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3
JS
17827 kfree(pcmd);
17828 lpfc_sli_release_iocbq(phba, cmdiocb);
e817e5d7 17829 lpfc_drain_txq(phba);
ae9e28f3
JS
17830}
17831
17832static void
17833lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17834 struct hbq_dmabuf *dmabuf)
17835{
17836 struct fc_frame_header *fc_hdr;
17837 struct lpfc_hba *phba = vport->phba;
17838 struct lpfc_iocbq *iocbq = NULL;
17839 union lpfc_wqe *wqe;
17840 struct lpfc_dmabuf *pcmd = NULL;
17841 uint32_t frame_len;
17842 int rc;
e817e5d7 17843 unsigned long iflags;
ae9e28f3
JS
17844
17845 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17846 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17847
17848 /* Send the received frame back */
17849 iocbq = lpfc_sli_get_iocbq(phba);
e817e5d7
JS
17850 if (!iocbq) {
17851 /* Queue cq event and wakeup worker thread to process it */
17852 spin_lock_irqsave(&phba->hbalock, iflags);
17853 list_add_tail(&dmabuf->cq_event.list,
17854 &phba->sli4_hba.sp_queue_event);
17855 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17856 spin_unlock_irqrestore(&phba->hbalock, iflags);
17857 lpfc_worker_wake_up(phba);
17858 return;
17859 }
ae9e28f3
JS
17860
17861 /* Allocate buffer for command payload */
17862 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17863 if (pcmd)
771db5c0 17864 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
ae9e28f3
JS
17865 &pcmd->phys);
17866 if (!pcmd || !pcmd->virt)
17867 goto exit;
17868
17869 INIT_LIST_HEAD(&pcmd->list);
17870
17871 /* copyin the payload */
17872 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17873
17874 /* fill in BDE's for command */
17875 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17876 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17877 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17878 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17879
17880 iocbq->context2 = pcmd;
17881 iocbq->vport = vport;
17882 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17883 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17884
17885 /*
17886 * Setup rest of the iocb as though it were a WQE
17887 * Build the SEND_FRAME WQE
17888 */
17889 wqe = (union lpfc_wqe *)&iocbq->iocb;
17890
17891 wqe->send_frame.frame_len = frame_len;
17892 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17893 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17894 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17895 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17896 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17897 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17898
17899 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17900 iocbq->iocb.ulpLe = 1;
17901 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17902 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17903 if (rc == IOCB_ERROR)
17904 goto exit;
17905
17906 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17907 return;
17908
17909exit:
17910 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17911 "2023 Unable to process MDS loopback frame\n");
17912 if (pcmd && pcmd->virt)
771db5c0 17913 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3 17914 kfree(pcmd);
401bb416
DK
17915 if (iocbq)
17916 lpfc_sli_release_iocbq(phba, iocbq);
ae9e28f3
JS
17917 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17918}
17919
4f774513
JS
17920/**
17921 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17922 * @phba: Pointer to HBA context object.
17923 *
17924 * This function is called with no lock held. This function processes all
17925 * the received buffers and gives it to upper layers when a received buffer
17926 * indicates that it is the final frame in the sequence. The interrupt
895427bd 17927 * service routine processes received buffers at interrupt contexts.
4f774513
JS
17928 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17929 * appropriate receive function when the final frame in a sequence is received.
17930 **/
4d9ab994
JS
17931void
17932lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17933 struct hbq_dmabuf *dmabuf)
4f774513 17934{
4d9ab994 17935 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
17936 struct fc_frame_header *fc_hdr;
17937 struct lpfc_vport *vport;
17938 uint32_t fcfi;
939723a4 17939 uint32_t did;
4f774513 17940
4f774513 17941 /* Process each received buffer */
4d9ab994 17942 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 17943
e817e5d7
JS
17944 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17945 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17946 vport = phba->pport;
17947 /* Handle MDS Loopback frames */
17948 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17949 return;
17950 }
17951
4d9ab994
JS
17952 /* check to see if this a valid type of frame */
17953 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17954 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17955 return;
17956 }
2ea259ee 17957
7851fe2c
JS
17958 if ((bf_get(lpfc_cqe_code,
17959 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17960 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17961 &dmabuf->cq_event.cqe.rcqe_cmpl);
17962 else
17963 fcfi = bf_get(lpfc_rcqe_fcf_id,
17964 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 17965
895427bd
JS
17966 /* d_id this frame is directed to */
17967 did = sli4_did_from_fc_hdr(fc_hdr);
17968
17969 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 17970 if (!vport) {
4d9ab994
JS
17971 /* throw out the frame */
17972 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17973 return;
17974 }
939723a4 17975
939723a4
JS
17976 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17977 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17978 (did != Fabric_DID)) {
17979 /*
17980 * Throw out the frame if we are not pt2pt.
17981 * The pt2pt protocol allows for discovery frames
17982 * to be received without a registered VPI.
17983 */
17984 if (!(vport->fc_flag & FC_PT2PT) ||
17985 (phba->link_state == LPFC_HBA_READY)) {
17986 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17987 return;
17988 }
17989 }
17990
6669f9bb
JS
17991 /* Handle the basic abort sequence (BA_ABTS) event */
17992 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17993 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17994 return;
17995 }
17996
4d9ab994
JS
17997 /* Link this frame */
17998 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17999 if (!seq_dmabuf) {
18000 /* unable to add frame to vport - throw it out */
18001 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18002 return;
18003 }
18004 /* If not last frame in sequence continue processing frames. */
def9c7a9 18005 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 18006 return;
def9c7a9 18007
6669f9bb
JS
18008 /* Send the complete sequence to the upper layer protocol */
18009 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 18010}
6fb120a7
JS
18011
18012/**
18013 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18014 * @phba: pointer to lpfc hba data structure.
18015 *
18016 * This routine is invoked to post rpi header templates to the
18017 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18018 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18019 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18020 *
18021 * This routine does not require any locks. It's usage is expected
18022 * to be driver load or reset recovery when the driver is
18023 * sequential.
18024 *
18025 * Return codes
af901ca1 18026 * 0 - successful
d439d286 18027 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18028 * When this error occurs, the driver is not guaranteed
18029 * to have any rpi regions posted to the device and
18030 * must either attempt to repost the regions or take a
18031 * fatal error.
18032 **/
18033int
18034lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18035{
18036 struct lpfc_rpi_hdr *rpi_page;
18037 uint32_t rc = 0;
6d368e53
JS
18038 uint16_t lrpi = 0;
18039
18040 /* SLI4 ports that support extents do not require RPI headers. */
18041 if (!phba->sli4_hba.rpi_hdrs_in_use)
18042 goto exit;
18043 if (phba->sli4_hba.extents_in_use)
18044 return -EIO;
6fb120a7 18045
6fb120a7 18046 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
18047 /*
18048 * Assign the rpi headers a physical rpi only if the driver
18049 * has not initialized those resources. A port reset only
18050 * needs the headers posted.
18051 */
18052 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18053 LPFC_RPI_RSRC_RDY)
18054 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18055
6fb120a7
JS
18056 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18057 if (rc != MBX_SUCCESS) {
18058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18059 "2008 Error %d posting all rpi "
18060 "headers\n", rc);
18061 rc = -EIO;
18062 break;
18063 }
18064 }
18065
6d368e53
JS
18066 exit:
18067 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18068 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
18069 return rc;
18070}
18071
18072/**
18073 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18074 * @phba: pointer to lpfc hba data structure.
18075 * @rpi_page: pointer to the rpi memory region.
18076 *
18077 * This routine is invoked to post a single rpi header to the
18078 * HBA consistent with the SLI-4 interface spec. This memory region
18079 * maps up to 64 rpi context regions.
18080 *
18081 * Return codes
af901ca1 18082 * 0 - successful
d439d286
JS
18083 * -ENOMEM - No available memory
18084 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18085 **/
18086int
18087lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18088{
18089 LPFC_MBOXQ_t *mboxq;
18090 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18091 uint32_t rc = 0;
6fb120a7
JS
18092 uint32_t shdr_status, shdr_add_status;
18093 union lpfc_sli4_cfg_shdr *shdr;
18094
6d368e53
JS
18095 /* SLI4 ports that support extents do not require RPI headers. */
18096 if (!phba->sli4_hba.rpi_hdrs_in_use)
18097 return rc;
18098 if (phba->sli4_hba.extents_in_use)
18099 return -EIO;
18100
6fb120a7
JS
18101 /* The port is notified of the header region via a mailbox command. */
18102 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18103 if (!mboxq) {
18104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18105 "2001 Unable to allocate memory for issuing "
18106 "SLI_CONFIG_SPECIAL mailbox command\n");
18107 return -ENOMEM;
18108 }
18109
18110 /* Post all rpi memory regions to the port. */
18111 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
18112 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18113 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18114 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
18115 sizeof(struct lpfc_sli4_cfg_mhdr),
18116 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
18117
18118
18119 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
18120 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18121 rpi_page->start_rpi);
6d368e53
JS
18122 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18123 hdr_tmpl, rpi_page->page_count);
18124
6fb120a7
JS
18125 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18126 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 18127 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
18128 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18129 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18130 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18131 if (rc != MBX_TIMEOUT)
18132 mempool_free(mboxq, phba->mbox_mem_pool);
18133 if (shdr_status || shdr_add_status || rc) {
18134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18135 "2514 POST_RPI_HDR mailbox failed with "
18136 "status x%x add_status x%x, mbx status x%x\n",
18137 shdr_status, shdr_add_status, rc);
18138 rc = -ENXIO;
845d9e8d
JS
18139 } else {
18140 /*
18141 * The next_rpi stores the next logical module-64 rpi value used
18142 * to post physical rpis in subsequent rpi postings.
18143 */
18144 spin_lock_irq(&phba->hbalock);
18145 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18146 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
18147 }
18148 return rc;
18149}
18150
18151/**
18152 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18153 * @phba: pointer to lpfc hba data structure.
18154 *
18155 * This routine is invoked to post rpi header templates to the
18156 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18157 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18158 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18159 *
18160 * Returns
af901ca1 18161 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
18162 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18163 **/
18164int
18165lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18166{
6d368e53
JS
18167 unsigned long rpi;
18168 uint16_t max_rpi, rpi_limit;
18169 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 18170 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 18171 unsigned long iflag;
6fb120a7 18172
6fb120a7 18173 /*
6d368e53
JS
18174 * Fetch the next logical rpi. Because this index is logical,
18175 * the driver starts at 0 each time.
6fb120a7 18176 */
4902b381 18177 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
18178 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18179 rpi_limit = phba->sli4_hba.next_rpi;
18180
6d368e53
JS
18181 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18182 if (rpi >= rpi_limit)
6fb120a7
JS
18183 rpi = LPFC_RPI_ALLOC_ERROR;
18184 else {
18185 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18186 phba->sli4_hba.max_cfg_param.rpi_used++;
18187 phba->sli4_hba.rpi_count++;
18188 }
be6bb941
JS
18189 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18190 "0001 rpi:%x max:%x lim:%x\n",
18191 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
18192
18193 /*
18194 * Don't try to allocate more rpi header regions if the device limit
6d368e53 18195 * has been exhausted.
6fb120a7
JS
18196 */
18197 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18198 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 18199 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18200 return rpi;
18201 }
18202
6d368e53
JS
18203 /*
18204 * RPI header postings are not required for SLI4 ports capable of
18205 * extents.
18206 */
18207 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 18208 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
18209 return rpi;
18210 }
18211
6fb120a7
JS
18212 /*
18213 * If the driver is running low on rpi resources, allocate another
18214 * page now. Note that the next_rpi value is used because
18215 * it represents how many are actually in use whereas max_rpi notes
18216 * how many are supported max by the device.
18217 */
6d368e53 18218 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 18219 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18220 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18221 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18222 if (!rpi_hdr) {
18223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18224 "2002 Error Could not grow rpi "
18225 "count\n");
18226 } else {
6d368e53
JS
18227 lrpi = rpi_hdr->start_rpi;
18228 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
18229 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18230 }
18231 }
18232
18233 return rpi;
18234}
18235
d7c47992
JS
18236/**
18237 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18238 * @phba: pointer to lpfc hba data structure.
18239 *
18240 * This routine is invoked to release an rpi to the pool of
18241 * available rpis maintained by the driver.
18242 **/
5d8b8167 18243static void
d7c47992
JS
18244__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18245{
18246 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18247 phba->sli4_hba.rpi_count--;
18248 phba->sli4_hba.max_cfg_param.rpi_used--;
18249 }
18250}
18251
6fb120a7
JS
18252/**
18253 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18254 * @phba: pointer to lpfc hba data structure.
18255 *
18256 * This routine is invoked to release an rpi to the pool of
18257 * available rpis maintained by the driver.
18258 **/
18259void
18260lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18261{
18262 spin_lock_irq(&phba->hbalock);
d7c47992 18263 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
18264 spin_unlock_irq(&phba->hbalock);
18265}
18266
18267/**
18268 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18269 * @phba: pointer to lpfc hba data structure.
18270 *
18271 * This routine is invoked to remove the memory region that
18272 * provided rpi via a bitmask.
18273 **/
18274void
18275lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18276{
18277 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
18278 kfree(phba->sli4_hba.rpi_ids);
18279 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
18280}
18281
18282/**
18283 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18284 * @phba: pointer to lpfc hba data structure.
18285 *
18286 * This routine is invoked to remove the memory region that
18287 * provided rpi via a bitmask.
18288 **/
18289int
6b5151fd
JS
18290lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18291 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
18292{
18293 LPFC_MBOXQ_t *mboxq;
18294 struct lpfc_hba *phba = ndlp->phba;
18295 int rc;
18296
18297 /* The port is notified of the header region via a mailbox command. */
18298 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18299 if (!mboxq)
18300 return -ENOMEM;
18301
18302 /* Post all rpi memory regions to the port. */
18303 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
18304 if (cmpl) {
18305 mboxq->mbox_cmpl = cmpl;
3e1f0718
JS
18306 mboxq->ctx_buf = arg;
18307 mboxq->ctx_ndlp = ndlp;
72859909
JS
18308 } else
18309 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 18310 mboxq->vport = ndlp->vport;
6fb120a7
JS
18311 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18312 if (rc == MBX_NOT_FINISHED) {
18313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18314 "2010 Resume RPI Mailbox failed "
18315 "status %d, mbxStatus x%x\n", rc,
18316 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18317 mempool_free(mboxq, phba->mbox_mem_pool);
18318 return -EIO;
18319 }
18320 return 0;
18321}
18322
18323/**
18324 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 18325 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 18326 *
76a95d75 18327 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
18328 *
18329 * Returns:
18330 * 0 success
18331 * -Evalue otherwise
18332 **/
18333int
76a95d75 18334lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
18335{
18336 LPFC_MBOXQ_t *mboxq;
18337 int rc = 0;
6a9c52cf 18338 int retval = MBX_SUCCESS;
6fb120a7 18339 uint32_t mbox_tmo;
76a95d75 18340 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
18341 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18342 if (!mboxq)
18343 return -ENOMEM;
76a95d75 18344 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 18345 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 18346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 18347 if (rc != MBX_SUCCESS) {
76a95d75 18348 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
18349 "2022 INIT VPI Mailbox failed "
18350 "status %d, mbxStatus x%x\n", rc,
18351 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 18352 retval = -EIO;
6fb120a7 18353 }
6a9c52cf 18354 if (rc != MBX_TIMEOUT)
76a95d75 18355 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
18356
18357 return retval;
6fb120a7
JS
18358}
18359
18360/**
18361 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18362 * @phba: pointer to lpfc hba data structure.
18363 * @mboxq: Pointer to mailbox object.
18364 *
18365 * This routine is invoked to manually add a single FCF record. The caller
18366 * must pass a completely initialized FCF_Record. This routine takes
18367 * care of the nonembedded mailbox operations.
18368 **/
18369static void
18370lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18371{
18372 void *virt_addr;
18373 union lpfc_sli4_cfg_shdr *shdr;
18374 uint32_t shdr_status, shdr_add_status;
18375
18376 virt_addr = mboxq->sge_array->addr[0];
18377 /* The IOCTL status is embedded in the mailbox subheader. */
18378 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18379 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18380 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18381
18382 if ((shdr_status || shdr_add_status) &&
18383 (shdr_status != STATUS_FCF_IN_USE))
18384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18385 "2558 ADD_FCF_RECORD mailbox failed with "
18386 "status x%x add_status x%x\n",
18387 shdr_status, shdr_add_status);
18388
18389 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18390}
18391
18392/**
18393 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18394 * @phba: pointer to lpfc hba data structure.
18395 * @fcf_record: pointer to the initialized fcf record to add.
18396 *
18397 * This routine is invoked to manually add a single FCF record. The caller
18398 * must pass a completely initialized FCF_Record. This routine takes
18399 * care of the nonembedded mailbox operations.
18400 **/
18401int
18402lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18403{
18404 int rc = 0;
18405 LPFC_MBOXQ_t *mboxq;
18406 uint8_t *bytep;
18407 void *virt_addr;
6fb120a7
JS
18408 struct lpfc_mbx_sge sge;
18409 uint32_t alloc_len, req_len;
18410 uint32_t fcfindex;
18411
18412 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18413 if (!mboxq) {
18414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18415 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18416 return -ENOMEM;
18417 }
18418
18419 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18420 sizeof(uint32_t);
18421
18422 /* Allocate DMA memory and set up the non-embedded mailbox command */
18423 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18424 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18425 req_len, LPFC_SLI4_MBX_NEMBED);
18426 if (alloc_len < req_len) {
18427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18428 "2523 Allocated DMA memory size (x%x) is "
18429 "less than the requested DMA memory "
18430 "size (x%x)\n", alloc_len, req_len);
18431 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18432 return -ENOMEM;
18433 }
18434
18435 /*
18436 * Get the first SGE entry from the non-embedded DMA memory. This
18437 * routine only uses a single SGE.
18438 */
18439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
18440 virt_addr = mboxq->sge_array->addr[0];
18441 /*
18442 * Configure the FCF record for FCFI 0. This is the driver's
18443 * hardcoded default and gets used in nonFIP mode.
18444 */
18445 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18446 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18447 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18448
18449 /*
18450 * Copy the fcf_index and the FCF Record Data. The data starts after
18451 * the FCoE header plus word10. The data copy needs to be endian
18452 * correct.
18453 */
18454 bytep += sizeof(uint32_t);
18455 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18456 mboxq->vport = phba->pport;
18457 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18458 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18459 if (rc == MBX_NOT_FINISHED) {
18460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18461 "2515 ADD_FCF_RECORD mailbox failed with "
18462 "status 0x%x\n", rc);
18463 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18464 rc = -EIO;
18465 } else
18466 rc = 0;
18467
18468 return rc;
18469}
18470
18471/**
18472 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18473 * @phba: pointer to lpfc hba data structure.
18474 * @fcf_record: pointer to the fcf record to write the default data.
18475 * @fcf_index: FCF table entry index.
18476 *
18477 * This routine is invoked to build the driver's default FCF record. The
18478 * values used are hardcoded. This routine handles memory initialization.
18479 *
18480 **/
18481void
18482lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18483 struct fcf_record *fcf_record,
18484 uint16_t fcf_index)
18485{
18486 memset(fcf_record, 0, sizeof(struct fcf_record));
18487 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18488 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18489 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18490 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18491 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18492 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18493 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18494 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18495 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18496 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18497 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18498 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18499 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 18500 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
18501 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18502 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18503 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18504 /* Set the VLAN bit map */
18505 if (phba->valid_vlan) {
18506 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18507 = 1 << (phba->vlan_id % 8);
18508 }
18509}
18510
18511/**
0c9ab6f5 18512 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
18513 * @phba: pointer to lpfc hba data structure.
18514 * @fcf_index: FCF table entry offset.
18515 *
0c9ab6f5
JS
18516 * This routine is invoked to scan the entire FCF table by reading FCF
18517 * record and processing it one at a time starting from the @fcf_index
18518 * for initial FCF discovery or fast FCF failover rediscovery.
18519 *
25985edc 18520 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 18521 * otherwise.
6fb120a7
JS
18522 **/
18523int
0c9ab6f5 18524lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
18525{
18526 int rc = 0, error;
18527 LPFC_MBOXQ_t *mboxq;
6fb120a7 18528
32b9793f 18529 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 18530 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
18531 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18532 if (!mboxq) {
18533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18534 "2000 Failed to allocate mbox for "
18535 "READ_FCF cmd\n");
4d9ab994 18536 error = -ENOMEM;
0c9ab6f5 18537 goto fail_fcf_scan;
6fb120a7 18538 }
ecfd03c6 18539 /* Construct the read FCF record mailbox command */
0c9ab6f5 18540 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
18541 if (rc) {
18542 error = -EINVAL;
0c9ab6f5 18543 goto fail_fcf_scan;
6fb120a7 18544 }
ecfd03c6 18545 /* Issue the mailbox command asynchronously */
6fb120a7 18546 mboxq->vport = phba->pport;
0c9ab6f5 18547 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
18548
18549 spin_lock_irq(&phba->hbalock);
18550 phba->hba_flag |= FCF_TS_INPROG;
18551 spin_unlock_irq(&phba->hbalock);
18552
6fb120a7 18553 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 18554 if (rc == MBX_NOT_FINISHED)
6fb120a7 18555 error = -EIO;
ecfd03c6 18556 else {
38b92ef8
JS
18557 /* Reset eligible FCF count for new scan */
18558 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 18559 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 18560 error = 0;
32b9793f 18561 }
0c9ab6f5 18562fail_fcf_scan:
4d9ab994
JS
18563 if (error) {
18564 if (mboxq)
18565 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 18566 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 18567 spin_lock_irq(&phba->hbalock);
a93ff37a 18568 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
18569 spin_unlock_irq(&phba->hbalock);
18570 }
6fb120a7
JS
18571 return error;
18572}
a0c87cbd 18573
0c9ab6f5 18574/**
a93ff37a 18575 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
18576 * @phba: pointer to lpfc hba data structure.
18577 * @fcf_index: FCF table entry offset.
18578 *
18579 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 18580 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 18581 *
25985edc 18582 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18583 * otherwise.
18584 **/
18585int
18586lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18587{
18588 int rc = 0, error;
18589 LPFC_MBOXQ_t *mboxq;
18590
18591 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18592 if (!mboxq) {
18593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18594 "2763 Failed to allocate mbox for "
18595 "READ_FCF cmd\n");
18596 error = -ENOMEM;
18597 goto fail_fcf_read;
18598 }
18599 /* Construct the read FCF record mailbox command */
18600 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18601 if (rc) {
18602 error = -EINVAL;
18603 goto fail_fcf_read;
18604 }
18605 /* Issue the mailbox command asynchronously */
18606 mboxq->vport = phba->pport;
18607 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18609 if (rc == MBX_NOT_FINISHED)
18610 error = -EIO;
18611 else
18612 error = 0;
18613
18614fail_fcf_read:
18615 if (error && mboxq)
18616 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18617 return error;
18618}
18619
18620/**
18621 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18622 * @phba: pointer to lpfc hba data structure.
18623 * @fcf_index: FCF table entry offset.
18624 *
18625 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 18626 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 18627 *
25985edc 18628 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18629 * otherwise.
18630 **/
18631int
18632lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18633{
18634 int rc = 0, error;
18635 LPFC_MBOXQ_t *mboxq;
18636
18637 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18638 if (!mboxq) {
18639 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18640 "2758 Failed to allocate mbox for "
18641 "READ_FCF cmd\n");
18642 error = -ENOMEM;
18643 goto fail_fcf_read;
18644 }
18645 /* Construct the read FCF record mailbox command */
18646 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18647 if (rc) {
18648 error = -EINVAL;
18649 goto fail_fcf_read;
18650 }
18651 /* Issue the mailbox command asynchronously */
18652 mboxq->vport = phba->pport;
18653 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18654 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18655 if (rc == MBX_NOT_FINISHED)
18656 error = -EIO;
18657 else
18658 error = 0;
18659
18660fail_fcf_read:
18661 if (error && mboxq)
18662 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18663 return error;
18664}
18665
7d791df7 18666/**
f5cb5304 18667 * lpfc_check_next_fcf_pri_level
7d791df7
JS
18668 * phba pointer to the lpfc_hba struct for this port.
18669 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18670 * routine when the rr_bmask is empty. The FCF indecies are put into the
18671 * rr_bmask based on their priority level. Starting from the highest priority
18672 * to the lowest. The most likely FCF candidate will be in the highest
18673 * priority group. When this routine is called it searches the fcf_pri list for
18674 * next lowest priority group and repopulates the rr_bmask with only those
18675 * fcf_indexes.
18676 * returns:
18677 * 1=success 0=failure
18678 **/
5d8b8167 18679static int
7d791df7
JS
18680lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18681{
18682 uint16_t next_fcf_pri;
18683 uint16_t last_index;
18684 struct lpfc_fcf_pri *fcf_pri;
18685 int rc;
18686 int ret = 0;
18687
18688 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18689 LPFC_SLI4_FCF_TBL_INDX_MAX);
18690 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18691 "3060 Last IDX %d\n", last_index);
2562669c
JS
18692
18693 /* Verify the priority list has 2 or more entries */
18694 spin_lock_irq(&phba->hbalock);
18695 if (list_empty(&phba->fcf.fcf_pri_list) ||
18696 list_is_singular(&phba->fcf.fcf_pri_list)) {
18697 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
18698 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18699 "3061 Last IDX %d\n", last_index);
18700 return 0; /* Empty rr list */
18701 }
2562669c
JS
18702 spin_unlock_irq(&phba->hbalock);
18703
7d791df7
JS
18704 next_fcf_pri = 0;
18705 /*
18706 * Clear the rr_bmask and set all of the bits that are at this
18707 * priority.
18708 */
18709 memset(phba->fcf.fcf_rr_bmask, 0,
18710 sizeof(*phba->fcf.fcf_rr_bmask));
18711 spin_lock_irq(&phba->hbalock);
18712 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18713 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18714 continue;
18715 /*
18716 * the 1st priority that has not FLOGI failed
18717 * will be the highest.
18718 */
18719 if (!next_fcf_pri)
18720 next_fcf_pri = fcf_pri->fcf_rec.priority;
18721 spin_unlock_irq(&phba->hbalock);
18722 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18723 rc = lpfc_sli4_fcf_rr_index_set(phba,
18724 fcf_pri->fcf_rec.fcf_index);
18725 if (rc)
18726 return 0;
18727 }
18728 spin_lock_irq(&phba->hbalock);
18729 }
18730 /*
18731 * if next_fcf_pri was not set above and the list is not empty then
18732 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 18733 * and start at the beginning.
7d791df7
JS
18734 */
18735 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18736 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18737 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18738 /*
18739 * the 1st priority that has not FLOGI failed
18740 * will be the highest.
18741 */
18742 if (!next_fcf_pri)
18743 next_fcf_pri = fcf_pri->fcf_rec.priority;
18744 spin_unlock_irq(&phba->hbalock);
18745 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18746 rc = lpfc_sli4_fcf_rr_index_set(phba,
18747 fcf_pri->fcf_rec.fcf_index);
18748 if (rc)
18749 return 0;
18750 }
18751 spin_lock_irq(&phba->hbalock);
18752 }
18753 } else
18754 ret = 1;
18755 spin_unlock_irq(&phba->hbalock);
18756
18757 return ret;
18758}
0c9ab6f5
JS
18759/**
18760 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18761 * @phba: pointer to lpfc hba data structure.
18762 *
18763 * This routine is to get the next eligible FCF record index in a round
18764 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 18765 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
18766 * shall be returned, otherwise, the next eligible FCF record's index
18767 * shall be returned.
18768 **/
18769uint16_t
18770lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18771{
18772 uint16_t next_fcf_index;
18773
421c6622 18774initial_priority:
3804dc84 18775 /* Search start from next bit of currently registered FCF index */
421c6622
JS
18776 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18777
7d791df7 18778next_priority:
421c6622
JS
18779 /* Determine the next fcf index to check */
18780 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
18781 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18782 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
18783 next_fcf_index);
18784
0c9ab6f5 18785 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
18786 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18787 /*
18788 * If we have wrapped then we need to clear the bits that
18789 * have been tested so that we can detect when we should
18790 * change the priority level.
18791 */
0c9ab6f5
JS
18792 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18793 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
18794 }
18795
3804dc84
JS
18796
18797 /* Check roundrobin failover list empty condition */
7d791df7
JS
18798 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18799 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18800 /*
18801 * If next fcf index is not found check if there are lower
18802 * Priority level fcf's in the fcf_priority list.
18803 * Set up the rr_bmask with all of the avaiable fcf bits
18804 * at that level and continue the selection process.
18805 */
18806 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 18807 goto initial_priority;
3804dc84
JS
18808 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18809 "2844 No roundrobin failover FCF available\n");
036cad1f
JS
18810
18811 return LPFC_FCOE_FCF_NEXT_NONE;
3804dc84
JS
18812 }
18813
7d791df7
JS
18814 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18815 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
18816 LPFC_FCF_FLOGI_FAILED) {
18817 if (list_is_singular(&phba->fcf.fcf_pri_list))
18818 return LPFC_FCOE_FCF_NEXT_NONE;
18819
7d791df7 18820 goto next_priority;
f5cb5304 18821 }
7d791df7 18822
3804dc84 18823 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
18824 "2845 Get next roundrobin failover FCF (x%x)\n",
18825 next_fcf_index);
18826
0c9ab6f5
JS
18827 return next_fcf_index;
18828}
18829
18830/**
18831 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18832 * @phba: pointer to lpfc hba data structure.
18833 *
18834 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 18835 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18836 * does not go beyond the range of the driver allocated bmask dimension
18837 * before setting the bit.
18838 *
18839 * Returns 0 if the index bit successfully set, otherwise, it returns
18840 * -EINVAL.
18841 **/
18842int
18843lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18844{
18845 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18846 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18847 "2610 FCF (x%x) reached driver's book "
18848 "keeping dimension:x%x\n",
0c9ab6f5
JS
18849 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18850 return -EINVAL;
18851 }
18852 /* Set the eligible FCF record index bmask */
18853 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18854
3804dc84 18855 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18856 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
18857 "bmask\n", fcf_index);
18858
0c9ab6f5
JS
18859 return 0;
18860}
18861
18862/**
3804dc84 18863 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
18864 * @phba: pointer to lpfc hba data structure.
18865 *
18866 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 18867 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18868 * does not go beyond the range of the driver allocated bmask dimension
18869 * before clearing the bit.
18870 **/
18871void
18872lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18873{
9a803a74 18874 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
18875 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18876 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18877 "2762 FCF (x%x) reached driver's book "
18878 "keeping dimension:x%x\n",
0c9ab6f5
JS
18879 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18880 return;
18881 }
18882 /* Clear the eligible FCF record index bmask */
7d791df7 18883 spin_lock_irq(&phba->hbalock);
9a803a74
JS
18884 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18885 list) {
7d791df7
JS
18886 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18887 list_del_init(&fcf_pri->list);
18888 break;
18889 }
18890 }
18891 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 18892 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
18893
18894 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18895 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 18896 "bmask\n", fcf_index);
0c9ab6f5
JS
18897}
18898
ecfd03c6
JS
18899/**
18900 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18901 * @phba: pointer to lpfc hba data structure.
18902 *
18903 * This routine is the completion routine for the rediscover FCF table mailbox
18904 * command. If the mailbox command returned failure, it will try to stop the
18905 * FCF rediscover wait timer.
18906 **/
5d8b8167 18907static void
ecfd03c6
JS
18908lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18909{
18910 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18911 uint32_t shdr_status, shdr_add_status;
18912
18913 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18914
18915 shdr_status = bf_get(lpfc_mbox_hdr_status,
18916 &redisc_fcf->header.cfg_shdr.response);
18917 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18918 &redisc_fcf->header.cfg_shdr.response);
18919 if (shdr_status || shdr_add_status) {
0c9ab6f5 18920 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
18921 "2746 Requesting for FCF rediscovery failed "
18922 "status x%x add_status x%x\n",
18923 shdr_status, shdr_add_status);
0c9ab6f5 18924 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 18925 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18926 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
18927 spin_unlock_irq(&phba->hbalock);
18928 /*
18929 * CVL event triggered FCF rediscover request failed,
18930 * last resort to re-try current registered FCF entry.
18931 */
18932 lpfc_retry_pport_discovery(phba);
18933 } else {
18934 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18935 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
18936 spin_unlock_irq(&phba->hbalock);
18937 /*
18938 * DEAD FCF event triggered FCF rediscover request
18939 * failed, last resort to fail over as a link down
18940 * to FCF registration.
18941 */
18942 lpfc_sli4_fcf_dead_failthrough(phba);
18943 }
0c9ab6f5
JS
18944 } else {
18945 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18946 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
18947 /*
18948 * Start FCF rediscovery wait timer for pending FCF
18949 * before rescan FCF record table.
18950 */
18951 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 18952 }
ecfd03c6
JS
18953
18954 mempool_free(mbox, phba->mbox_mem_pool);
18955}
18956
18957/**
3804dc84 18958 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
18959 * @phba: pointer to lpfc hba data structure.
18960 *
18961 * This routine is invoked to request for rediscovery of the entire FCF table
18962 * by the port.
18963 **/
18964int
18965lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18966{
18967 LPFC_MBOXQ_t *mbox;
18968 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18969 int rc, length;
18970
0c9ab6f5
JS
18971 /* Cancel retry delay timers to all vports before FCF rediscover */
18972 lpfc_cancel_all_vport_retry_delay_timer(phba);
18973
ecfd03c6
JS
18974 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18975 if (!mbox) {
18976 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18977 "2745 Failed to allocate mbox for "
18978 "requesting FCF rediscover.\n");
18979 return -ENOMEM;
18980 }
18981
18982 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18983 sizeof(struct lpfc_sli4_cfg_mhdr));
18984 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18985 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18986 length, LPFC_SLI4_MBX_EMBED);
18987
18988 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18989 /* Set count to 0 for invalidating the entire FCF database */
18990 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18991
18992 /* Issue the mailbox command asynchronously */
18993 mbox->vport = phba->pport;
18994 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18995 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18996
18997 if (rc == MBX_NOT_FINISHED) {
18998 mempool_free(mbox, phba->mbox_mem_pool);
18999 return -EIO;
19000 }
19001 return 0;
19002}
19003
fc2b989b
JS
19004/**
19005 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19006 * @phba: pointer to lpfc hba data structure.
19007 *
19008 * This function is the failover routine as a last resort to the FCF DEAD
19009 * event when driver failed to perform fast FCF failover.
19010 **/
19011void
19012lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19013{
19014 uint32_t link_state;
19015
19016 /*
19017 * Last resort as FCF DEAD event failover will treat this as
19018 * a link down, but save the link state because we don't want
19019 * it to be changed to Link Down unless it is already down.
19020 */
19021 link_state = phba->link_state;
19022 lpfc_linkdown(phba);
19023 phba->link_state = link_state;
19024
19025 /* Unregister FCF if no devices connected to it */
19026 lpfc_unregister_unused_fcf(phba);
19027}
19028
a0c87cbd 19029/**
026abb87 19030 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 19031 * @phba: pointer to lpfc hba data structure.
026abb87 19032 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 19033 *
026abb87
JS
19034 * This function gets SLI3 port configure region 23 data through memory dump
19035 * mailbox command. When it successfully retrieves data, the size of the data
19036 * will be returned, otherwise, 0 will be returned.
a0c87cbd 19037 **/
026abb87
JS
19038static uint32_t
19039lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
19040{
19041 LPFC_MBOXQ_t *pmb = NULL;
19042 MAILBOX_t *mb;
026abb87 19043 uint32_t offset = 0;
a0c87cbd
JS
19044 int rc;
19045
026abb87
JS
19046 if (!rgn23_data)
19047 return 0;
19048
a0c87cbd
JS
19049 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19050 if (!pmb) {
19051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
19052 "2600 failed to allocate mailbox memory\n");
19053 return 0;
a0c87cbd
JS
19054 }
19055 mb = &pmb->u.mb;
19056
a0c87cbd
JS
19057 do {
19058 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19059 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19060
19061 if (rc != MBX_SUCCESS) {
19062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
19063 "2601 failed to read config "
19064 "region 23, rc 0x%x Status 0x%x\n",
19065 rc, mb->mbxStatus);
a0c87cbd
JS
19066 mb->un.varDmp.word_cnt = 0;
19067 }
19068 /*
19069 * dump mem may return a zero when finished or we got a
19070 * mailbox error, either way we are done.
19071 */
19072 if (mb->un.varDmp.word_cnt == 0)
19073 break;
19074 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19075 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19076
19077 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
19078 rgn23_data + offset,
19079 mb->un.varDmp.word_cnt);
a0c87cbd
JS
19080 offset += mb->un.varDmp.word_cnt;
19081 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19082
026abb87
JS
19083 mempool_free(pmb, phba->mbox_mem_pool);
19084 return offset;
19085}
19086
19087/**
19088 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19089 * @phba: pointer to lpfc hba data structure.
19090 * @rgn23_data: pointer to configure region 23 data.
19091 *
19092 * This function gets SLI4 port configure region 23 data through memory dump
19093 * mailbox command. When it successfully retrieves data, the size of the data
19094 * will be returned, otherwise, 0 will be returned.
19095 **/
19096static uint32_t
19097lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19098{
19099 LPFC_MBOXQ_t *mboxq = NULL;
19100 struct lpfc_dmabuf *mp = NULL;
19101 struct lpfc_mqe *mqe;
19102 uint32_t data_length = 0;
19103 int rc;
19104
19105 if (!rgn23_data)
19106 return 0;
19107
19108 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19109 if (!mboxq) {
19110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19111 "3105 failed to allocate mailbox memory\n");
19112 return 0;
19113 }
19114
19115 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19116 goto out;
19117 mqe = &mboxq->u.mqe;
3e1f0718 19118 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
026abb87
JS
19119 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19120 if (rc)
19121 goto out;
19122 data_length = mqe->un.mb_words[5];
19123 if (data_length == 0)
19124 goto out;
19125 if (data_length > DMP_RGN23_SIZE) {
19126 data_length = 0;
19127 goto out;
19128 }
19129 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19130out:
19131 mempool_free(mboxq, phba->mbox_mem_pool);
19132 if (mp) {
19133 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19134 kfree(mp);
19135 }
19136 return data_length;
19137}
19138
19139/**
19140 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19141 * @phba: pointer to lpfc hba data structure.
19142 *
19143 * This function read region 23 and parse TLV for port status to
19144 * decide if the user disaled the port. If the TLV indicates the
19145 * port is disabled, the hba_flag is set accordingly.
19146 **/
19147void
19148lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19149{
19150 uint8_t *rgn23_data = NULL;
19151 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19152 uint32_t offset = 0;
19153
19154 /* Get adapter Region 23 data */
19155 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19156 if (!rgn23_data)
19157 goto out;
19158
19159 if (phba->sli_rev < LPFC_SLI_REV4)
19160 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19161 else {
19162 if_type = bf_get(lpfc_sli_intf_if_type,
19163 &phba->sli4_hba.sli_intf);
19164 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19165 goto out;
19166 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19167 }
a0c87cbd
JS
19168
19169 if (!data_size)
19170 goto out;
19171
19172 /* Check the region signature first */
19173 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19175 "2619 Config region 23 has bad signature\n");
19176 goto out;
19177 }
19178 offset += 4;
19179
19180 /* Check the data structure version */
19181 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19183 "2620 Config region 23 has bad version\n");
19184 goto out;
19185 }
19186 offset += 4;
19187
19188 /* Parse TLV entries in the region */
19189 while (offset < data_size) {
19190 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19191 break;
19192 /*
19193 * If the TLV is not driver specific TLV or driver id is
19194 * not linux driver id, skip the record.
19195 */
19196 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19197 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19198 (rgn23_data[offset + 3] != 0)) {
19199 offset += rgn23_data[offset + 1] * 4 + 4;
19200 continue;
19201 }
19202
19203 /* Driver found a driver specific TLV in the config region */
19204 sub_tlv_len = rgn23_data[offset + 1] * 4;
19205 offset += 4;
19206 tlv_offset = 0;
19207
19208 /*
19209 * Search for configured port state sub-TLV.
19210 */
19211 while ((offset < data_size) &&
19212 (tlv_offset < sub_tlv_len)) {
19213 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19214 offset += 4;
19215 tlv_offset += 4;
19216 break;
19217 }
19218 if (rgn23_data[offset] != PORT_STE_TYPE) {
19219 offset += rgn23_data[offset + 1] * 4 + 4;
19220 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19221 continue;
19222 }
19223
19224 /* This HBA contains PORT_STE configured */
19225 if (!rgn23_data[offset + 2])
19226 phba->hba_flag |= LINK_DISABLED;
19227
19228 goto out;
19229 }
19230 }
026abb87 19231
a0c87cbd 19232out:
a0c87cbd
JS
19233 kfree(rgn23_data);
19234 return;
19235}
695a814e 19236
52d52440
JS
19237/**
19238 * lpfc_wr_object - write an object to the firmware
19239 * @phba: HBA structure that indicates port to create a queue on.
19240 * @dmabuf_list: list of dmabufs to write to the port.
19241 * @size: the total byte value of the objects to write to the port.
19242 * @offset: the current offset to be used to start the transfer.
19243 *
19244 * This routine will create a wr_object mailbox command to send to the port.
19245 * the mailbox command will be constructed using the dma buffers described in
19246 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19247 * BDEs that the imbedded mailbox can support. The @offset variable will be
19248 * used to indicate the starting offset of the transfer and will also return
19249 * the offset after the write object mailbox has completed. @size is used to
19250 * determine the end of the object and whether the eof bit should be set.
19251 *
19252 * Return 0 is successful and offset will contain the the new offset to use
19253 * for the next write.
19254 * Return negative value for error cases.
19255 **/
19256int
19257lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19258 uint32_t size, uint32_t *offset)
19259{
19260 struct lpfc_mbx_wr_object *wr_object;
19261 LPFC_MBOXQ_t *mbox;
19262 int rc = 0, i = 0;
19263 uint32_t shdr_status, shdr_add_status;
19264 uint32_t mbox_tmo;
19265 union lpfc_sli4_cfg_shdr *shdr;
19266 struct lpfc_dmabuf *dmabuf;
19267 uint32_t written = 0;
19268
19269 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19270 if (!mbox)
19271 return -ENOMEM;
19272
19273 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19274 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19275 sizeof(struct lpfc_mbx_wr_object) -
19276 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19277
19278 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19279 wr_object->u.request.write_offset = *offset;
19280 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19281 wr_object->u.request.object_name[0] =
19282 cpu_to_le32(wr_object->u.request.object_name[0]);
19283 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19284 list_for_each_entry(dmabuf, dmabuf_list, list) {
19285 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19286 break;
19287 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19288 wr_object->u.request.bde[i].addrHigh =
19289 putPaddrHigh(dmabuf->phys);
19290 if (written + SLI4_PAGE_SIZE >= size) {
19291 wr_object->u.request.bde[i].tus.f.bdeSize =
19292 (size - written);
19293 written += (size - written);
19294 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19295 } else {
19296 wr_object->u.request.bde[i].tus.f.bdeSize =
19297 SLI4_PAGE_SIZE;
19298 written += SLI4_PAGE_SIZE;
19299 }
19300 i++;
19301 }
19302 wr_object->u.request.bde_count = i;
19303 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19304 if (!phba->sli4_hba.intr_enable)
19305 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19306 else {
a183a15f 19307 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
19308 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19309 }
19310 /* The IOCTL status is embedded in the mailbox subheader. */
19311 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
19312 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19313 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19314 if (rc != MBX_TIMEOUT)
19315 mempool_free(mbox, phba->mbox_mem_pool);
19316 if (shdr_status || shdr_add_status || rc) {
19317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19318 "3025 Write Object mailbox failed with "
19319 "status x%x add_status x%x, mbx status x%x\n",
19320 shdr_status, shdr_add_status, rc);
19321 rc = -ENXIO;
1feb8204 19322 *offset = shdr_add_status;
52d52440
JS
19323 } else
19324 *offset += wr_object->u.response.actual_write_length;
19325 return rc;
19326}
19327
695a814e
JS
19328/**
19329 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19330 * @vport: pointer to vport data structure.
19331 *
19332 * This function iterate through the mailboxq and clean up all REG_LOGIN
19333 * and REG_VPI mailbox commands associated with the vport. This function
19334 * is called when driver want to restart discovery of the vport due to
19335 * a Clear Virtual Link event.
19336 **/
19337void
19338lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19339{
19340 struct lpfc_hba *phba = vport->phba;
19341 LPFC_MBOXQ_t *mb, *nextmb;
19342 struct lpfc_dmabuf *mp;
78730cfe 19343 struct lpfc_nodelist *ndlp;
d439d286 19344 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 19345 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 19346 LIST_HEAD(mbox_cmd_list);
63e801ce 19347 uint8_t restart_loop;
695a814e 19348
d439d286 19349 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
19350 spin_lock_irq(&phba->hbalock);
19351 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19352 if (mb->vport != vport)
19353 continue;
19354
19355 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19356 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19357 continue;
19358
d439d286
JS
19359 list_del(&mb->list);
19360 list_add_tail(&mb->list, &mbox_cmd_list);
19361 }
19362 /* Clean up active mailbox command with the vport */
19363 mb = phba->sli.mbox_active;
19364 if (mb && (mb->vport == vport)) {
19365 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19366 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19367 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19368 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19369 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
d439d286
JS
19370 /* Put reference count for delayed processing */
19371 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19372 /* Unregister the RPI when mailbox complete */
19373 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19374 }
19375 }
63e801ce
JS
19376 /* Cleanup any mailbox completions which are not yet processed */
19377 do {
19378 restart_loop = 0;
19379 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19380 /*
19381 * If this mailox is already processed or it is
19382 * for another vport ignore it.
19383 */
19384 if ((mb->vport != vport) ||
19385 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19386 continue;
19387
19388 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19389 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19390 continue;
19391
19392 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19393 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19394 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
63e801ce
JS
19395 /* Unregister the RPI when mailbox complete */
19396 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19397 restart_loop = 1;
19398 spin_unlock_irq(&phba->hbalock);
19399 spin_lock(shost->host_lock);
19400 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19401 spin_unlock(shost->host_lock);
19402 spin_lock_irq(&phba->hbalock);
19403 break;
19404 }
19405 }
19406 } while (restart_loop);
19407
d439d286
JS
19408 spin_unlock_irq(&phba->hbalock);
19409
19410 /* Release the cleaned-up mailbox commands */
19411 while (!list_empty(&mbox_cmd_list)) {
19412 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e 19413 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19414 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
695a814e
JS
19415 if (mp) {
19416 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19417 kfree(mp);
19418 }
3e1f0718
JS
19419 mb->ctx_buf = NULL;
19420 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19421 mb->ctx_ndlp = NULL;
78730cfe 19422 if (ndlp) {
ec21b3b0 19423 spin_lock(shost->host_lock);
589a52d6 19424 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 19425 spin_unlock(shost->host_lock);
78730cfe 19426 lpfc_nlp_put(ndlp);
78730cfe 19427 }
695a814e 19428 }
695a814e
JS
19429 mempool_free(mb, phba->mbox_mem_pool);
19430 }
d439d286
JS
19431
19432 /* Release the ndlp with the cleaned-up active mailbox command */
19433 if (act_mbx_ndlp) {
19434 spin_lock(shost->host_lock);
19435 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19436 spin_unlock(shost->host_lock);
19437 lpfc_nlp_put(act_mbx_ndlp);
695a814e 19438 }
695a814e
JS
19439}
19440
2a9bf3d0
JS
19441/**
19442 * lpfc_drain_txq - Drain the txq
19443 * @phba: Pointer to HBA context object.
19444 *
19445 * This function attempt to submit IOCBs on the txq
19446 * to the adapter. For SLI4 adapters, the txq contains
19447 * ELS IOCBs that have been deferred because the there
19448 * are no SGLs. This congestion can occur with large
19449 * vport counts during node discovery.
19450 **/
19451
19452uint32_t
19453lpfc_drain_txq(struct lpfc_hba *phba)
19454{
19455 LIST_HEAD(completions);
895427bd 19456 struct lpfc_sli_ring *pring;
2e706377 19457 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
19458 unsigned long iflags = 0;
19459 char *fail_msg = NULL;
19460 struct lpfc_sglq *sglq;
205e8240 19461 union lpfc_wqe128 wqe;
a2fc4aef 19462 uint32_t txq_cnt = 0;
dc19e3b4 19463 struct lpfc_queue *wq;
2a9bf3d0 19464
dc19e3b4
JS
19465 if (phba->link_flag & LS_MDS_LOOPBACK) {
19466 /* MDS WQE are posted only to first WQ*/
19467 wq = phba->sli4_hba.fcp_wq[0];
19468 if (unlikely(!wq))
19469 return 0;
19470 pring = wq->pring;
19471 } else {
19472 wq = phba->sli4_hba.els_wq;
19473 if (unlikely(!wq))
19474 return 0;
19475 pring = lpfc_phba_elsring(phba);
19476 }
19477
19478 if (unlikely(!pring) || list_empty(&pring->txq))
1234a6d5 19479 return 0;
895427bd 19480
398d81c9 19481 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
19482 list_for_each_entry(piocbq, &pring->txq, list) {
19483 txq_cnt++;
19484 }
19485
19486 if (txq_cnt > pring->txq_max)
19487 pring->txq_max = txq_cnt;
2a9bf3d0 19488
398d81c9 19489 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19490
0e9bb8d7 19491 while (!list_empty(&pring->txq)) {
398d81c9 19492 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 19493
19ca7609 19494 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 19495 if (!piocbq) {
398d81c9 19496 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
19497 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19498 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 19499 txq_cnt);
a629852a
JS
19500 break;
19501 }
895427bd 19502 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 19503 if (!sglq) {
19ca7609 19504 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 19505 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19506 break;
2a9bf3d0 19507 }
0e9bb8d7 19508 txq_cnt--;
2a9bf3d0
JS
19509
19510 /* The xri and iocb resources secured,
19511 * attempt to issue request
19512 */
6d368e53 19513 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
19514 piocbq->sli4_xritag = sglq->sli4_xritag;
19515 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19516 fail_msg = "to convert bpl to sgl";
205e8240 19517 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
2a9bf3d0 19518 fail_msg = "to convert iocb to wqe";
dc19e3b4 19519 else if (lpfc_sli4_wq_put(wq, &wqe))
2a9bf3d0
JS
19520 fail_msg = " - Wq is full";
19521 else
19522 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19523
19524 if (fail_msg) {
19525 /* Failed means we can't issue and need to cancel */
19526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19527 "2822 IOCB failed %s iotag 0x%x "
19528 "xri 0x%x\n",
19529 fail_msg,
19530 piocbq->iotag, piocbq->sli4_xritag);
19531 list_add_tail(&piocbq->list, &completions);
19532 }
398d81c9 19533 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
19534 }
19535
2a9bf3d0
JS
19536 /* Cancel all the IOCBs that cannot be issued */
19537 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19538 IOERR_SLI_ABORTED);
19539
0e9bb8d7 19540 return txq_cnt;
2a9bf3d0 19541}
895427bd
JS
19542
19543/**
19544 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19545 * @phba: Pointer to HBA context object.
19546 * @pwqe: Pointer to command WQE.
19547 * @sglq: Pointer to the scatter gather queue object.
19548 *
19549 * This routine converts the bpl or bde that is in the WQE
19550 * to a sgl list for the sli4 hardware. The physical address
19551 * of the bpl/bde is converted back to a virtual address.
19552 * If the WQE contains a BPL then the list of BDE's is
19553 * converted to sli4_sge's. If the WQE contains a single
19554 * BDE then it is converted to a single sli_sge.
19555 * The WQE is still in cpu endianness so the contents of
19556 * the bpl can be used without byte swapping.
19557 *
19558 * Returns valid XRI = Success, NO_XRI = Failure.
19559 */
19560static uint16_t
19561lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19562 struct lpfc_sglq *sglq)
19563{
19564 uint16_t xritag = NO_XRI;
19565 struct ulp_bde64 *bpl = NULL;
19566 struct ulp_bde64 bde;
19567 struct sli4_sge *sgl = NULL;
19568 struct lpfc_dmabuf *dmabuf;
205e8240 19569 union lpfc_wqe128 *wqe;
895427bd
JS
19570 int numBdes = 0;
19571 int i = 0;
19572 uint32_t offset = 0; /* accumulated offset in the sg request list */
19573 int inbound = 0; /* number of sg reply entries inbound from firmware */
19574 uint32_t cmd;
19575
19576 if (!pwqeq || !sglq)
19577 return xritag;
19578
19579 sgl = (struct sli4_sge *)sglq->sgl;
19580 wqe = &pwqeq->wqe;
19581 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19582
19583 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19584 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19585 return sglq->sli4_xritag;
19586 numBdes = pwqeq->rsvd2;
19587 if (numBdes) {
19588 /* The addrHigh and addrLow fields within the WQE
19589 * have not been byteswapped yet so there is no
19590 * need to swap them back.
19591 */
19592 if (pwqeq->context3)
19593 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19594 else
19595 return xritag;
19596
19597 bpl = (struct ulp_bde64 *)dmabuf->virt;
19598 if (!bpl)
19599 return xritag;
19600
19601 for (i = 0; i < numBdes; i++) {
19602 /* Should already be byte swapped. */
19603 sgl->addr_hi = bpl->addrHigh;
19604 sgl->addr_lo = bpl->addrLow;
19605
19606 sgl->word2 = le32_to_cpu(sgl->word2);
19607 if ((i+1) == numBdes)
19608 bf_set(lpfc_sli4_sge_last, sgl, 1);
19609 else
19610 bf_set(lpfc_sli4_sge_last, sgl, 0);
19611 /* swap the size field back to the cpu so we
19612 * can assign it to the sgl.
19613 */
19614 bde.tus.w = le32_to_cpu(bpl->tus.w);
19615 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19616 /* The offsets in the sgl need to be accumulated
19617 * separately for the request and reply lists.
19618 * The request is always first, the reply follows.
19619 */
19620 switch (cmd) {
19621 case CMD_GEN_REQUEST64_WQE:
19622 /* add up the reply sg entries */
19623 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19624 inbound++;
19625 /* first inbound? reset the offset */
19626 if (inbound == 1)
19627 offset = 0;
19628 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19629 bf_set(lpfc_sli4_sge_type, sgl,
19630 LPFC_SGE_TYPE_DATA);
19631 offset += bde.tus.f.bdeSize;
19632 break;
19633 case CMD_FCP_TRSP64_WQE:
19634 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19635 bf_set(lpfc_sli4_sge_type, sgl,
19636 LPFC_SGE_TYPE_DATA);
19637 break;
19638 case CMD_FCP_TSEND64_WQE:
19639 case CMD_FCP_TRECEIVE64_WQE:
19640 bf_set(lpfc_sli4_sge_type, sgl,
19641 bpl->tus.f.bdeFlags);
19642 if (i < 3)
19643 offset = 0;
19644 else
19645 offset += bde.tus.f.bdeSize;
19646 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19647 break;
19648 }
19649 sgl->word2 = cpu_to_le32(sgl->word2);
19650 bpl++;
19651 sgl++;
19652 }
19653 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19654 /* The addrHigh and addrLow fields of the BDE have not
19655 * been byteswapped yet so they need to be swapped
19656 * before putting them in the sgl.
19657 */
19658 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19659 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19660 sgl->word2 = le32_to_cpu(sgl->word2);
19661 bf_set(lpfc_sli4_sge_last, sgl, 1);
19662 sgl->word2 = cpu_to_le32(sgl->word2);
19663 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19664 }
19665 return sglq->sli4_xritag;
19666}
19667
19668/**
19669 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19670 * @phba: Pointer to HBA context object.
19671 * @ring_number: Base sli ring number
19672 * @pwqe: Pointer to command WQE.
19673 **/
19674int
19675lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19676 struct lpfc_iocbq *pwqe)
19677{
205e8240 19678 union lpfc_wqe128 *wqe = &pwqe->wqe;
f358dd0c 19679 struct lpfc_nvmet_rcv_ctx *ctxp;
895427bd
JS
19680 struct lpfc_queue *wq;
19681 struct lpfc_sglq *sglq;
19682 struct lpfc_sli_ring *pring;
19683 unsigned long iflags;
cd22d605 19684 uint32_t ret = 0;
895427bd
JS
19685
19686 /* NVME_LS and NVME_LS ABTS requests. */
19687 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19688 pring = phba->sli4_hba.nvmels_wq->pring;
19689 spin_lock_irqsave(&pring->ring_lock, iflags);
19690 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19691 if (!sglq) {
19692 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19693 return WQE_BUSY;
19694 }
19695 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19696 pwqe->sli4_xritag = sglq->sli4_xritag;
19697 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19698 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19699 return WQE_ERROR;
19700 }
19701 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19702 pwqe->sli4_xritag);
cd22d605
DK
19703 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19704 if (ret) {
895427bd 19705 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19706 return ret;
895427bd 19707 }
cd22d605 19708
895427bd
JS
19709 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19710 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19711 return 0;
19712 }
19713
19714 /* NVME_FCREQ and NVME_ABTS requests */
19715 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19716 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19717 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19718
19719 spin_lock_irqsave(&pring->ring_lock, iflags);
19720 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19721 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19722 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
cd22d605
DK
19723 ret = lpfc_sli4_wq_put(wq, wqe);
19724 if (ret) {
895427bd 19725 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19726 return ret;
895427bd
JS
19727 }
19728 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19729 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19730 return 0;
19731 }
19732
f358dd0c
JS
19733 /* NVMET requests */
19734 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19735 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19736 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19737
19738 spin_lock_irqsave(&pring->ring_lock, iflags);
19739 ctxp = pwqe->context2;
6c621a22 19740 sglq = ctxp->ctxbuf->sglq;
f358dd0c
JS
19741 if (pwqe->sli4_xritag == NO_XRI) {
19742 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19743 pwqe->sli4_xritag = sglq->sli4_xritag;
19744 }
19745 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19746 pwqe->sli4_xritag);
19747 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19748 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19749 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
cd22d605
DK
19750 ret = lpfc_sli4_wq_put(wq, wqe);
19751 if (ret) {
f358dd0c 19752 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19753 return ret;
f358dd0c
JS
19754 }
19755 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19756 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19757 return 0;
19758 }
895427bd
JS
19759 return WQE_ERROR;
19760}