scsi: lpfc: Migrate to %px and %pf in kernel print calls
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
0d041215 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
5a0e3ad6 28#include <linux/slab.h>
1c2ba475 29#include <linux/lockdep.h>
dea3101e 30
91886523 31#include <scsi/scsi.h>
dea3101e 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
f888ba3c 35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include <scsi/fc/fc_fs.h>
0d878419 37#include <linux/aer.h>
1351e69f
JS
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea3101e 41
895427bd
JS
42#include <linux/nvme-fc-driver.h>
43
da0436e9 44#include "lpfc_hw4.h"
dea3101e 45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
da0436e9 47#include "lpfc_sli4.h"
ea2151b4 48#include "lpfc_nl.h"
dea3101e 49#include "lpfc_disc.h"
dea3101e 50#include "lpfc.h"
895427bd
JS
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
f358dd0c 53#include "lpfc_nvmet.h"
dea3101e 54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
858c9f6c 57#include "lpfc_debugfs.h"
04c68496 58#include "lpfc_vport.h"
61bda8f7 59#include "lpfc_version.h"
dea3101e 60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
4f774513
JS
69
70/* Provide function prototypes local to this module. */
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
6669f9bb
JS
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
ae9e28f3
JS
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
32517fc0
JS
81static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
895427bd 83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 84 int);
f485c18d 85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
32517fc0
JS
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
e8d3c3b1
JS
88static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
0558056c 90
4f774513
JS
91static IOCB_t *
92lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
93{
94 return &iocbq->iocb;
95}
96
48f8fdb4
JS
97#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
98/**
99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
100 * @srcp: Source memory pointer.
101 * @destp: Destination memory pointer.
102 * @cnt: Number of words required to be copied.
103 * Must be a multiple of sizeof(uint64_t)
104 *
105 * This function is used for copying data between driver memory
106 * and the SLI WQ. This function also changes the endianness
107 * of each word if native endianness is different from SLI
108 * endianness. This function can be called with or without
109 * lock.
110 **/
d7b761b0 111static void
48f8fdb4
JS
112lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
113{
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
116 int i;
117
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
119 *dest++ = *src++;
120}
121#else
122#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
123#endif
124
4f774513
JS
125/**
126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
127 * @q: The Work Queue to operate on.
128 * @wqe: The work Queue Entry to put on the Work queue.
129 *
130 * This routine will copy the contents of @wqe to the next available entry on
131 * the @q. This function will then ring the Work Queue Doorbell to signal the
132 * HBA to start processing the Work Queue Entry. This function returns 0 if
133 * successful. If no entries are available on @q then this function will return
134 * -ENOMEM.
135 * The caller is expected to hold the hbalock when calling this routine.
136 **/
cd22d605 137static int
205e8240 138lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
4f774513 139{
2e90f4b5 140 union lpfc_wqe *temp_wqe;
4f774513
JS
141 struct lpfc_register doorbell;
142 uint32_t host_index;
027140ea 143 uint32_t idx;
1351e69f
JS
144 uint32_t i = 0;
145 uint8_t *tmp;
5cc167dd 146 u32 if_type;
4f774513 147
2e90f4b5
JS
148 /* sanity check on queue memory */
149 if (unlikely(!q))
150 return -ENOMEM;
9afbee3d 151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 152
4f774513 153 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
b84daac9 156 q->WQ_overflow++;
cd22d605 157 return -EBUSY;
b84daac9
JS
158 }
159 q->WQ_posted++;
4f774513 160 /* set consumption flag every once in a while */
32517fc0 161 if (!((q->host_index + 1) % q->notify_interval))
f0d9bccc 162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
04673e38
JS
163 else
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
fedd3b7b
JS
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
48f8fdb4 167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
1351e69f
JS
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169 /* write to DPP aperture taking advatage of Combined Writes */
4c06619f
JS
170 tmp = (uint8_t *)temp_wqe;
171#ifdef __raw_writeq
1351e69f 172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
4c06619f
JS
173 __raw_writeq(*((uint64_t *)(tmp + i)),
174 q->dpp_regaddr + i);
175#else
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
178 q->dpp_regaddr + i);
179#endif
1351e69f
JS
180 }
181 /* ensure WQE bcopy and DPP flushed before doorbell write */
6b3b3bdb 182 wmb();
4f774513
JS
183
184 /* Update the host index before invoking device */
185 host_index = q->host_index;
027140ea
JS
186
187 q->host_index = idx;
4f774513
JS
188
189 /* Ring Doorbell */
190 doorbell.word0 = 0;
962bc51b 191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
1351e69f
JS
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
196 q->dpp_id);
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
198 q->queue_id);
199 } else {
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
1351e69f 201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
5cc167dd
JS
202
203 /* Leave bits <23:16> clear for if_type 6 dpp */
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
208 host_index);
1351e69f 209 }
962bc51b
JS
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
213 } else {
214 return -EINVAL;
215 }
216 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
217
218 return 0;
219}
220
221/**
222 * lpfc_sli4_wq_release - Updates internal hba index for WQ
223 * @q: The Work Queue to operate on.
224 * @index: The index to advance the hba index to.
225 *
226 * This routine will update the HBA index of a queue to reflect consumption of
227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
228 * an entry the host calls this function to update the queue's internal
229 * pointers. This routine returns the number of entries that were consumed by
230 * the HBA.
231 **/
232static uint32_t
233lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
234{
235 uint32_t released = 0;
236
2e90f4b5
JS
237 /* sanity check on queue memory */
238 if (unlikely(!q))
239 return 0;
240
4f774513
JS
241 if (q->hba_index == index)
242 return 0;
243 do {
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
245 released++;
246 } while (q->hba_index != index);
247 return released;
248}
249
250/**
251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
252 * @q: The Mailbox Queue to operate on.
253 * @wqe: The Mailbox Queue Entry to put on the Work queue.
254 *
255 * This routine will copy the contents of @mqe to the next available entry on
256 * the @q. This function will then ring the Work Queue Doorbell to signal the
257 * HBA to start processing the Work Queue Entry. This function returns 0 if
258 * successful. If no entries are available on @q then this function will return
259 * -ENOMEM.
260 * The caller is expected to hold the hbalock when calling this routine.
261 **/
262static uint32_t
263lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
264{
2e90f4b5 265 struct lpfc_mqe *temp_mqe;
4f774513 266 struct lpfc_register doorbell;
4f774513 267
2e90f4b5
JS
268 /* sanity check on queue memory */
269 if (unlikely(!q))
270 return -ENOMEM;
9afbee3d 271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 272
4f774513
JS
273 /* If the host has not yet processed the next entry then we are done */
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
275 return -ENOMEM;
48f8fdb4 276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
4f774513
JS
277 /* Save off the mailbox pointer for completion */
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
279
280 /* Update the host index before invoking device */
4f774513
JS
281 q->host_index = ((q->host_index + 1) % q->entry_count);
282
283 /* Ring Doorbell */
284 doorbell.word0 = 0;
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
288 return 0;
289}
290
291/**
292 * lpfc_sli4_mq_release - Updates internal hba index for MQ
293 * @q: The Mailbox Queue to operate on.
294 *
295 * This routine will update the HBA index of a queue to reflect consumption of
296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
297 * an entry the host calls this function to update the queue's internal
298 * pointers. This routine returns the number of entries that were consumed by
299 * the HBA.
300 **/
301static uint32_t
302lpfc_sli4_mq_release(struct lpfc_queue *q)
303{
2e90f4b5
JS
304 /* sanity check on queue memory */
305 if (unlikely(!q))
306 return 0;
307
4f774513
JS
308 /* Clear the mailbox pointer for completion */
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
311 return 1;
312}
313
314/**
315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
316 * @q: The Event Queue to get the first valid EQE from
317 *
318 * This routine will get the first valid Event Queue Entry from @q, update
319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
320 * the Queue (no more work to do), or the Queue is full of EQEs that have been
321 * processed, but not popped back to the HBA then this routine will return NULL.
322 **/
323static struct lpfc_eqe *
324lpfc_sli4_eq_get(struct lpfc_queue *q)
325{
2e90f4b5
JS
326 struct lpfc_eqe *eqe;
327
328 /* sanity check on queue memory */
329 if (unlikely(!q))
330 return NULL;
9afbee3d 331 eqe = lpfc_sli4_qe(q, q->host_index);
4f774513
JS
332
333 /* If the next EQE is not valid then we are done */
7365f6fd 334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
4f774513 335 return NULL;
27f344eb
JS
336
337 /*
338 * insert barrier for instruction interlock : data from the hardware
339 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
340 * upon. Speculative instructions were allowing a bcopy at the start
341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
342 * after our return, to copy data before the valid bit check above
343 * was done. As such, some of the copied data was stale. The barrier
344 * ensures the check is before any data is copied.
27f344eb
JS
345 */
346 mb();
4f774513
JS
347 return eqe;
348}
349
ba20c853
JS
350/**
351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
352 * @q: The Event Queue to disable interrupts
353 *
354 **/
92f3b327 355void
ba20c853
JS
356lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
357{
358 struct lpfc_register doorbell;
359
360 doorbell.word0 = 0;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
ba20c853
JS
367}
368
27d6ac0a
JS
369/**
370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
371 * @q: The Event Queue to disable interrupts
372 *
373 **/
92f3b327 374void
27d6ac0a
JS
375lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
376{
377 struct lpfc_register doorbell;
378
379 doorbell.word0 = 0;
aad59d5d 380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
27d6ac0a
JS
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
382}
383
4f774513 384/**
32517fc0
JS
385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
386 * @phba: adapter with EQ
4f774513 387 * @q: The Event Queue that the host has completed processing for.
32517fc0 388 * @count: Number of elements that have been consumed
4f774513
JS
389 * @arm: Indicates whether the host wants to arms this CQ.
390 *
32517fc0
JS
391 * This routine will notify the HBA, by ringing the doorbell, that count
392 * number of EQEs have been processed. The @arm parameter indicates whether
393 * the queue should be rearmed when ringing the doorbell.
4f774513 394 **/
32517fc0
JS
395void
396lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
4f774513 398{
4f774513
JS
399 struct lpfc_register doorbell;
400
2e90f4b5 401 /* sanity check on queue memory */
32517fc0
JS
402 if (unlikely(!q || (count == 0 && !arm)))
403 return;
4f774513
JS
404
405 /* ring doorbell for number popped */
406 doorbell.word0 = 0;
407 if (arm) {
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
410 }
32517fc0 411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
a747c9ce
JS
417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
9dd35425 419 readl(q->phba->sli4_hba.EQDBregaddr);
4f774513
JS
420}
421
27d6ac0a 422/**
32517fc0
JS
423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
424 * @phba: adapter with EQ
27d6ac0a 425 * @q: The Event Queue that the host has completed processing for.
32517fc0 426 * @count: Number of elements that have been consumed
27d6ac0a
JS
427 * @arm: Indicates whether the host wants to arms this CQ.
428 *
32517fc0
JS
429 * This routine will notify the HBA, by ringing the doorbell, that count
430 * number of EQEs have been processed. The @arm parameter indicates whether
431 * the queue should be rearmed when ringing the doorbell.
27d6ac0a 432 **/
32517fc0
JS
433void
434lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
27d6ac0a 436{
27d6ac0a
JS
437 struct lpfc_register doorbell;
438
439 /* sanity check on queue memory */
32517fc0
JS
440 if (unlikely(!q || (count == 0 && !arm)))
441 return;
27d6ac0a
JS
442
443 /* ring doorbell for number popped */
444 doorbell.word0 = 0;
445 if (arm)
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
32517fc0 447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
32517fc0
JS
453}
454
455static void
456__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
458{
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
461
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
463
464 /* if the index wrapped around, toggle the valid bit */
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
467}
468
469static void
470lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
471{
472 struct lpfc_eqe *eqe;
473 uint32_t count = 0;
474
475 /* walk all the EQ entries and drop on the floor */
476 eqe = lpfc_sli4_eq_get(eq);
477 while (eqe) {
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
479 count++;
480 eqe = lpfc_sli4_eq_get(eq);
481 }
482
483 /* Clear and re-arm the EQ */
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
485}
486
487static int
488lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
489{
490 struct lpfc_eqe *eqe;
491 int count = 0, consumed = 0;
492
493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
494 goto rearm_and_exit;
495
496 eqe = lpfc_sli4_eq_get(eq);
497 while (eqe) {
498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
499 __lpfc_sli4_consume_eqe(phba, eq, eqe);
500
501 consumed++;
502 if (!(++count % eq->max_proc_limit))
503 break;
504
505 if (!(count % eq->notify_interval)) {
506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
507 LPFC_QUEUE_NOARM);
508 consumed = 0;
509 }
510
511 eqe = lpfc_sli4_eq_get(eq);
512 }
513 eq->EQ_processed += count;
514
515 /* Track the max number of EQEs processed in 1 intr */
516 if (count > eq->EQ_max_eqe)
517 eq->EQ_max_eqe = count;
518
519 eq->queue_claimed = 0;
520
521rearm_and_exit:
522 /* Always clear and re-arm the EQ */
523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
524
525 return count;
27d6ac0a
JS
526}
527
4f774513
JS
528/**
529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
530 * @q: The Completion Queue to get the first valid CQE from
531 *
532 * This routine will get the first valid Completion Queue Entry from @q, update
533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
534 * the Queue (no more work to do), or the Queue is full of CQEs that have been
535 * processed, but not popped back to the HBA then this routine will return NULL.
536 **/
537static struct lpfc_cqe *
538lpfc_sli4_cq_get(struct lpfc_queue *q)
539{
540 struct lpfc_cqe *cqe;
541
2e90f4b5
JS
542 /* sanity check on queue memory */
543 if (unlikely(!q))
544 return NULL;
9afbee3d 545 cqe = lpfc_sli4_qe(q, q->host_index);
2e90f4b5 546
4f774513 547 /* If the next CQE is not valid then we are done */
7365f6fd 548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
4f774513 549 return NULL;
27f344eb
JS
550
551 /*
552 * insert barrier for instruction interlock : data from the hardware
553 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
555 * instructions allowing action on content before valid bit checked,
556 * add barrier here as well. May not be needed as "content" is a
557 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
558 */
559 mb();
4f774513
JS
560 return cqe;
561}
562
32517fc0
JS
563static void
564__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
565 struct lpfc_cqe *cqe)
566{
567 if (!phba->sli4_hba.pc_sli4_params.cqav)
568 bf_set_le32(lpfc_cqe_valid, cqe, 0);
569
570 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
571
572 /* if the index wrapped around, toggle the valid bit */
573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
574 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
575}
576
4f774513 577/**
32517fc0
JS
578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
579 * @phba: the adapter with the CQ
4f774513 580 * @q: The Completion Queue that the host has completed processing for.
32517fc0 581 * @count: the number of elements that were consumed
4f774513
JS
582 * @arm: Indicates whether the host wants to arms this CQ.
583 *
32517fc0
JS
584 * This routine will notify the HBA, by ringing the doorbell, that the
585 * CQEs have been processed. The @arm parameter specifies whether the
586 * queue should be rearmed when ringing the doorbell.
4f774513 587 **/
32517fc0
JS
588void
589lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
590 uint32_t count, bool arm)
4f774513 591{
4f774513
JS
592 struct lpfc_register doorbell;
593
2e90f4b5 594 /* sanity check on queue memory */
32517fc0
JS
595 if (unlikely(!q || (count == 0 && !arm)))
596 return;
4f774513
JS
597
598 /* ring doorbell for number popped */
599 doorbell.word0 = 0;
600 if (arm)
601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
32517fc0 602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
4f774513 603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
9dd35425 607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
4f774513
JS
608}
609
27d6ac0a 610/**
32517fc0
JS
611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
612 * @phba: the adapter with the CQ
27d6ac0a 613 * @q: The Completion Queue that the host has completed processing for.
32517fc0 614 * @count: the number of elements that were consumed
27d6ac0a
JS
615 * @arm: Indicates whether the host wants to arms this CQ.
616 *
32517fc0
JS
617 * This routine will notify the HBA, by ringing the doorbell, that the
618 * CQEs have been processed. The @arm parameter specifies whether the
619 * queue should be rearmed when ringing the doorbell.
27d6ac0a 620 **/
32517fc0
JS
621void
622lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
623 uint32_t count, bool arm)
27d6ac0a 624{
27d6ac0a
JS
625 struct lpfc_register doorbell;
626
627 /* sanity check on queue memory */
32517fc0
JS
628 if (unlikely(!q || (count == 0 && !arm)))
629 return;
27d6ac0a
JS
630
631 /* ring doorbell for number popped */
632 doorbell.word0 = 0;
633 if (arm)
634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
32517fc0 635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
27d6ac0a
JS
636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
27d6ac0a
JS
638}
639
4f774513
JS
640/**
641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
642 * @q: The Header Receive Queue to operate on.
643 * @wqe: The Receive Queue Entry to put on the Receive queue.
644 *
645 * This routine will copy the contents of @wqe to the next available entry on
646 * the @q. This function will then ring the Receive Queue Doorbell to signal the
647 * HBA to start processing the Receive Queue Entry. This function returns the
648 * index that the rqe was copied to if successful. If no entries are available
649 * on @q then this function will return -ENOMEM.
650 * The caller is expected to hold the hbalock when calling this routine.
651 **/
895427bd 652int
4f774513
JS
653lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
655{
2e90f4b5
JS
656 struct lpfc_rqe *temp_hrqe;
657 struct lpfc_rqe *temp_drqe;
4f774513 658 struct lpfc_register doorbell;
cbc5de1b
JS
659 int hq_put_index;
660 int dq_put_index;
4f774513 661
2e90f4b5
JS
662 /* sanity check on queue memory */
663 if (unlikely(!hq) || unlikely(!dq))
664 return -ENOMEM;
cbc5de1b
JS
665 hq_put_index = hq->host_index;
666 dq_put_index = dq->host_index;
9afbee3d
JS
667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
2e90f4b5 669
4f774513
JS
670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
671 return -EINVAL;
cbc5de1b 672 if (hq_put_index != dq_put_index)
4f774513
JS
673 return -EINVAL;
674 /* If the host has not yet processed the next entry then we are done */
cbc5de1b 675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513 676 return -EBUSY;
48f8fdb4
JS
677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
4f774513
JS
679
680 /* Update the host index to point to the next slot */
cbc5de1b
JS
681 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
682 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
61f3d4bf 683 hq->RQ_buf_posted++;
4f774513
JS
684
685 /* Ring The Header Receive Queue Doorbell */
32517fc0 686 if (!(hq->host_index % hq->notify_interval)) {
4f774513 687 doorbell.word0 = 0;
962bc51b
JS
688 if (hq->db_format == LPFC_DB_RING_FORMAT) {
689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
32517fc0 690 hq->notify_interval);
962bc51b
JS
691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
32517fc0 694 hq->notify_interval);
962bc51b
JS
695 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
696 hq->host_index);
697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
698 } else {
699 return -EINVAL;
700 }
701 writel(doorbell.word0, hq->db_regaddr);
4f774513 702 }
cbc5de1b 703 return hq_put_index;
4f774513
JS
704}
705
706/**
707 * lpfc_sli4_rq_release - Updates internal hba index for RQ
708 * @q: The Header Receive Queue to operate on.
709 *
710 * This routine will update the HBA index of a queue to reflect consumption of
711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
712 * consumed an entry the host calls this function to update the queue's
713 * internal pointers. This routine returns the number of entries that were
714 * consumed by the HBA.
715 **/
716static uint32_t
717lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
718{
2e90f4b5
JS
719 /* sanity check on queue memory */
720 if (unlikely(!hq) || unlikely(!dq))
721 return 0;
722
4f774513
JS
723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
724 return 0;
725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
727 return 1;
728}
729
e59058c4 730/**
3621a710 731 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
732 * @phba: Pointer to HBA context object.
733 * @pring: Pointer to driver SLI ring object.
734 *
735 * This function returns pointer to next command iocb entry
736 * in the command ring. The caller must hold hbalock to prevent
737 * other threads consume the next command iocb.
738 * SLI-2/SLI-3 provide different sized iocbs.
739 **/
ed957684
JS
740static inline IOCB_t *
741lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
742{
7e56aa25
JS
743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
745}
746
e59058c4 747/**
3621a710 748 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
749 * @phba: Pointer to HBA context object.
750 * @pring: Pointer to driver SLI ring object.
751 *
752 * This function returns pointer to next response iocb entry
753 * in the response ring. The caller must hold hbalock to make sure
754 * that no other thread consume the next response iocb.
755 * SLI-2/SLI-3 provide different sized iocbs.
756 **/
ed957684
JS
757static inline IOCB_t *
758lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
759{
7e56aa25
JS
760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
761 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
762}
763
e59058c4 764/**
3621a710 765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
766 * @phba: Pointer to HBA context object.
767 *
768 * This function is called with hbalock held. This function
769 * allocates a new driver iocb object from the iocb pool. If the
770 * allocation is successful, it returns pointer to the newly
771 * allocated iocb object else it returns NULL.
772 **/
4f2e66c6 773struct lpfc_iocbq *
2e0fef85 774__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
775{
776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
777 struct lpfc_iocbq * iocbq = NULL;
778
1c2ba475
JT
779 lockdep_assert_held(&phba->hbalock);
780
0bd4ca25 781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
782 if (iocbq)
783 phba->iocb_cnt++;
784 if (phba->iocb_cnt > phba->iocb_max)
785 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
786 return iocbq;
787}
788
da0436e9
JS
789/**
790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
791 * @phba: Pointer to HBA context object.
792 * @xritag: XRI value.
793 *
794 * This function clears the sglq pointer from the array of acive
795 * sglq's. The xritag that is passed in is used to index into the
796 * array. Before the xritag can be used it needs to be adjusted
797 * by subtracting the xribase.
798 *
799 * Returns sglq ponter = success, NULL = Failure.
800 **/
895427bd 801struct lpfc_sglq *
da0436e9
JS
802__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
803{
da0436e9 804 struct lpfc_sglq *sglq;
6d368e53
JS
805
806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
808 return sglq;
809}
810
811/**
812 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
813 * @phba: Pointer to HBA context object.
814 * @xritag: XRI value.
815 *
816 * This function returns the sglq pointer from the array of acive
817 * sglq's. The xritag that is passed in is used to index into the
818 * array. Before the xritag can be used it needs to be adjusted
819 * by subtracting the xribase.
820 *
821 * Returns sglq ponter = success, NULL = Failure.
822 **/
0f65ff68 823struct lpfc_sglq *
da0436e9
JS
824__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
825{
da0436e9 826 struct lpfc_sglq *sglq;
6d368e53
JS
827
828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
829 return sglq;
830}
831
19ca7609 832/**
1151e3ec 833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
834 * @phba: Pointer to HBA context object.
835 * @xritag: xri used in this exchange.
836 * @rrq: The RRQ to be cleared.
837 *
19ca7609 838 **/
1151e3ec
JS
839void
840lpfc_clr_rrq_active(struct lpfc_hba *phba,
841 uint16_t xritag,
842 struct lpfc_node_rrq *rrq)
19ca7609 843{
1151e3ec 844 struct lpfc_nodelist *ndlp = NULL;
19ca7609 845
1151e3ec
JS
846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
848
849 /* The target DID could have been swapped (cable swap)
850 * we should use the ndlp from the findnode if it is
851 * available.
852 */
1151e3ec 853 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
854 ndlp = rrq->ndlp;
855
1151e3ec
JS
856 if (!ndlp)
857 goto out;
858
cff261f6 859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
860 rrq->send_rrq = 0;
861 rrq->xritag = 0;
862 rrq->rrq_stop_time = 0;
863 }
1151e3ec 864out:
19ca7609
JS
865 mempool_free(rrq, phba->rrq_pool);
866}
867
868/**
869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
870 * @phba: Pointer to HBA context object.
871 *
872 * This function is called with hbalock held. This function
873 * Checks if stop_time (ratov from setting rrq active) has
874 * been reached, if it has and the send_rrq flag is set then
875 * it will call lpfc_send_rrq. If the send_rrq flag is not set
876 * then it will just call the routine to clear the rrq and
877 * free the rrq resource.
878 * The timer is set to the next rrq that is going to expire before
879 * leaving the routine.
880 *
881 **/
882void
883lpfc_handle_rrq_active(struct lpfc_hba *phba)
884{
885 struct lpfc_node_rrq *rrq;
886 struct lpfc_node_rrq *nextrrq;
887 unsigned long next_time;
888 unsigned long iflags;
1151e3ec 889 LIST_HEAD(send_rrq);
19ca7609
JS
890
891 spin_lock_irqsave(&phba->hbalock, iflags);
892 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 894 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
895 &phba->active_rrq_list, list) {
896 if (time_after(jiffies, rrq->rrq_stop_time))
897 list_move(&rrq->list, &send_rrq);
898 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
899 next_time = rrq->rrq_stop_time;
900 }
901 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
902 if ((!list_empty(&phba->active_rrq_list)) &&
903 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 904 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
906 list_del(&rrq->list);
ffd43814 907 if (!rrq->send_rrq) {
1151e3ec 908 /* this call will free the rrq */
ffd43814
BVA
909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
910 } else if (lpfc_send_rrq(phba, rrq)) {
1151e3ec
JS
911 /* if we send the rrq then the completion handler
912 * will clear the bit in the xribitmap.
913 */
914 lpfc_clr_rrq_active(phba, rrq->xritag,
915 rrq);
916 }
917 }
19ca7609
JS
918}
919
920/**
921 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
922 * @vport: Pointer to vport context object.
923 * @xri: The xri used in the exchange.
924 * @did: The targets DID for this exchange.
925 *
926 * returns NULL = rrq not found in the phba->active_rrq_list.
927 * rrq = rrq for this xri and target.
928 **/
929struct lpfc_node_rrq *
930lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
931{
932 struct lpfc_hba *phba = vport->phba;
933 struct lpfc_node_rrq *rrq;
934 struct lpfc_node_rrq *nextrrq;
935 unsigned long iflags;
936
937 if (phba->sli_rev != LPFC_SLI_REV4)
938 return NULL;
939 spin_lock_irqsave(&phba->hbalock, iflags);
940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
941 if (rrq->vport == vport && rrq->xritag == xri &&
942 rrq->nlp_DID == did){
943 list_del(&rrq->list);
944 spin_unlock_irqrestore(&phba->hbalock, iflags);
945 return rrq;
946 }
947 }
948 spin_unlock_irqrestore(&phba->hbalock, iflags);
949 return NULL;
950}
951
952/**
953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
954 * @vport: Pointer to vport context object.
1151e3ec
JS
955 * @ndlp: Pointer to the lpfc_node_list structure.
956 * If ndlp is NULL Remove all active RRQs for this vport from the
957 * phba->active_rrq_list and clear the rrq.
958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
959 **/
960void
1151e3ec 961lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
962
963{
964 struct lpfc_hba *phba = vport->phba;
965 struct lpfc_node_rrq *rrq;
966 struct lpfc_node_rrq *nextrrq;
967 unsigned long iflags;
1151e3ec 968 LIST_HEAD(rrq_list);
19ca7609
JS
969
970 if (phba->sli_rev != LPFC_SLI_REV4)
971 return;
1151e3ec
JS
972 if (!ndlp) {
973 lpfc_sli4_vport_delete_els_xri_aborted(vport);
974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 975 }
1151e3ec
JS
976 spin_lock_irqsave(&phba->hbalock, iflags);
977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
979 list_move(&rrq->list, &rrq_list);
19ca7609 980 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
981
982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
983 list_del(&rrq->list);
984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
985 }
19ca7609
JS
986}
987
19ca7609 988/**
1151e3ec 989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
990 * @phba: Pointer to HBA context object.
991 * @ndlp: Targets nodelist pointer for this exchange.
992 * @xritag the xri in the bitmap to test.
993 *
e2a8be56
JS
994 * This function returns:
995 * 0 = rrq not active for this xri
996 * 1 = rrq is valid for this xri.
19ca7609 997 **/
1151e3ec
JS
998int
999lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
1000 uint16_t xritag)
1001{
19ca7609
JS
1002 if (!ndlp)
1003 return 0;
cff261f6
JS
1004 if (!ndlp->active_rrqs_xri_bitmap)
1005 return 0;
1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
258f84fa 1007 return 1;
19ca7609
JS
1008 else
1009 return 0;
1010}
1011
1012/**
1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1014 * @phba: Pointer to HBA context object.
1015 * @ndlp: nodelist pointer for this target.
1016 * @xritag: xri used in this exchange.
1017 * @rxid: Remote Exchange ID.
1018 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1019 *
1020 * This function takes the hbalock.
1021 * The active bit is always set in the active rrq xri_bitmap even
1022 * if there is no slot avaiable for the other rrq information.
1023 *
1024 * returns 0 rrq actived for this xri
1025 * < 0 No memory or invalid ndlp.
1026 **/
1027int
1028lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 1030{
19ca7609 1031 unsigned long iflags;
b42c07c8
JS
1032 struct lpfc_node_rrq *rrq;
1033 int empty;
1034
1035 if (!ndlp)
1036 return -EINVAL;
1037
1038 if (!phba->cfg_enable_rrq)
1039 return -EINVAL;
19ca7609
JS
1040
1041 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
1042 if (phba->pport->load_flag & FC_UNLOADING) {
1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1044 goto out;
1045 }
1046
1047 /*
1048 * set the active bit even if there is no mem available.
1049 */
1050 if (NLP_CHK_FREE_REQ(ndlp))
1051 goto out;
1052
1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1054 goto out;
1055
cff261f6
JS
1056 if (!ndlp->active_rrqs_xri_bitmap)
1057 goto out;
1058
1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
1060 goto out;
1061
19ca7609 1062 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1064 if (!rrq) {
1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1067 " DID:0x%x Send:%d\n",
1068 xritag, rxid, ndlp->nlp_DID, send_rrq);
1069 return -EINVAL;
1070 }
e5771b4d
JS
1071 if (phba->cfg_enable_rrq == 1)
1072 rrq->send_rrq = send_rrq;
1073 else
1074 rrq->send_rrq = 0;
b42c07c8 1075 rrq->xritag = xritag;
256ec0d0
JS
1076 rrq->rrq_stop_time = jiffies +
1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
1078 rrq->ndlp = ndlp;
1079 rrq->nlp_DID = ndlp->nlp_DID;
1080 rrq->vport = ndlp->vport;
1081 rrq->rxid = rxid;
b42c07c8
JS
1082 spin_lock_irqsave(&phba->hbalock, iflags);
1083 empty = list_empty(&phba->active_rrq_list);
1084 list_add_tail(&rrq->list, &phba->active_rrq_list);
1085 phba->hba_flag |= HBA_RRQ_ACTIVE;
1086 if (empty)
1087 lpfc_worker_wake_up(phba);
1088 spin_unlock_irqrestore(&phba->hbalock, iflags);
1089 return 0;
1090out:
1091 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1094 " DID:0x%x Send:%d\n",
1095 xritag, rxid, ndlp->nlp_DID, send_rrq);
1096 return -EINVAL;
19ca7609
JS
1097}
1098
da0436e9 1099/**
895427bd 1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 1101 * @phba: Pointer to HBA context object.
19ca7609 1102 * @piocb: Pointer to the iocbq.
da0436e9 1103 *
e2a8be56
JS
1104 * The driver calls this function with either the nvme ls ring lock
1105 * or the fc els ring lock held depending on the iocb usage. This function
1106 * gets a new driver sglq object from the sglq list. If the list is not empty
1107 * then it is successful, it returns pointer to the newly allocated sglq
1108 * object else it returns NULL.
da0436e9
JS
1109 **/
1110static struct lpfc_sglq *
895427bd 1111__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 1112{
895427bd 1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 1114 struct lpfc_sglq *sglq = NULL;
19ca7609 1115 struct lpfc_sglq *start_sglq = NULL;
c490850a 1116 struct lpfc_io_buf *lpfc_cmd;
19ca7609 1117 struct lpfc_nodelist *ndlp;
e2a8be56 1118 struct lpfc_sli_ring *pring = NULL;
19ca7609
JS
1119 int found = 0;
1120
e2a8be56
JS
1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1122 pring = phba->sli4_hba.nvmels_wq->pring;
1123 else
1124 pring = lpfc_phba_elsring(phba);
1125
1126 lockdep_assert_held(&pring->ring_lock);
1c2ba475 1127
19ca7609 1128 if (piocbq->iocb_flag & LPFC_IO_FCP) {
c490850a 1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
19ca7609 1130 ndlp = lpfc_cmd->rdata->pnode;
be858b65 1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 1133 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1136 ndlp = NULL;
1137 else
1138 ndlp = piocbq->context_un.ndlp;
1139 } else {
19ca7609 1140 ndlp = piocbq->context1;
6c7cf486 1141 }
19ca7609 1142
895427bd
JS
1143 spin_lock(&phba->sli4_hba.sgl_list_lock);
1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
1145 start_sglq = sglq;
1146 while (!found) {
1147 if (!sglq)
d11f54b7 1148 break;
895427bd
JS
1149 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1150 test_bit(sglq->sli4_lxritag,
1151 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
1152 /* This xri has an rrq outstanding for this DID.
1153 * put it back in the list and get another xri.
1154 */
895427bd 1155 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 1156 sglq = NULL;
895427bd 1157 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
1158 struct lpfc_sglq, list);
1159 if (sglq == start_sglq) {
14041bd1 1160 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609
JS
1161 sglq = NULL;
1162 break;
1163 } else
1164 continue;
1165 }
1166 sglq->ndlp = ndlp;
1167 found = 1;
6d368e53 1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
1169 sglq->state = SGL_ALLOCATED;
1170 }
895427bd 1171 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1172 return sglq;
1173}
1174
f358dd0c
JS
1175/**
1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1177 * @phba: Pointer to HBA context object.
1178 * @piocb: Pointer to the iocbq.
1179 *
1180 * This function is called with the sgl_list lock held. This function
1181 * gets a new driver sglq object from the sglq list. If the
1182 * list is not empty then it is successful, it returns pointer to the newly
1183 * allocated sglq object else it returns NULL.
1184 **/
1185struct lpfc_sglq *
1186__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1187{
1188 struct list_head *lpfc_nvmet_sgl_list;
1189 struct lpfc_sglq *sglq = NULL;
1190
1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1192
1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1194
1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1196 if (!sglq)
1197 return NULL;
1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1199 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1200 return sglq;
1201}
1202
e59058c4 1203/**
3621a710 1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1205 * @phba: Pointer to HBA context object.
1206 *
1207 * This function is called with no lock held. This function
1208 * allocates a new driver iocb object from the iocb pool. If the
1209 * allocation is successful, it returns pointer to the newly
1210 * allocated iocb object else it returns NULL.
1211 **/
2e0fef85
JS
1212struct lpfc_iocbq *
1213lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1214{
1215 struct lpfc_iocbq * iocbq = NULL;
1216 unsigned long iflags;
1217
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 iocbq = __lpfc_sli_get_iocbq(phba);
1220 spin_unlock_irqrestore(&phba->hbalock, iflags);
1221 return iocbq;
1222}
1223
4f774513
JS
1224/**
1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1226 * @phba: Pointer to HBA context object.
1227 * @iocbq: Pointer to driver iocb object.
1228 *
1229 * This function is called with hbalock held to release driver
1230 * iocb object to the iocb pool. The iotag in the iocb object
1231 * does not change for each use of the iocb object. This function
1232 * clears all other fields of the iocb object when it is freed.
1233 * The sqlq structure that holds the xritag and phys and virtual
1234 * mappings for the scatter gather list is retrieved from the
1235 * active array of sglq. The get of the sglq pointer also clears
1236 * the entry in the array. If the status of the IO indiactes that
1237 * this IO was aborted then the sglq entry it put on the
1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1239 * IO has good status or fails for any other reason then the sglq
895427bd 1240 * entry is added to the free list (lpfc_els_sgl_list).
4f774513
JS
1241 **/
1242static void
1243__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1244{
1245 struct lpfc_sglq *sglq;
1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1247 unsigned long iflag = 0;
895427bd 1248 struct lpfc_sli_ring *pring;
4f774513 1249
1c2ba475
JT
1250 lockdep_assert_held(&phba->hbalock);
1251
4f774513
JS
1252 if (iocbq->sli4_xritag == NO_XRI)
1253 sglq = NULL;
1254 else
6d368e53
JS
1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1256
0e9bb8d7 1257
4f774513 1258 if (sglq) {
f358dd0c
JS
1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1261 iflag);
1262 sglq->state = SGL_FREED;
1263 sglq->ndlp = NULL;
1264 list_add_tail(&sglq->list,
1265 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1266 spin_unlock_irqrestore(
1267 &phba->sli4_hba.sgl_list_lock, iflag);
1268 goto out;
1269 }
1270
895427bd 1271 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1273 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1275 iflag);
4f774513 1276 list_add(&sglq->list,
895427bd 1277 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1278 spin_unlock_irqrestore(
895427bd 1279 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1280 } else {
895427bd
JS
1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1282 iflag);
0f65ff68 1283 sglq->state = SGL_FREED;
19ca7609 1284 sglq->ndlp = NULL;
fedd3b7b 1285 list_add_tail(&sglq->list,
895427bd
JS
1286 &phba->sli4_hba.lpfc_els_sgl_list);
1287 spin_unlock_irqrestore(
1288 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1289
1290 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1291 if (!list_empty(&pring->txq))
2a9bf3d0 1292 lpfc_worker_wake_up(phba);
0f65ff68 1293 }
4f774513
JS
1294 }
1295
f358dd0c 1296out:
4f774513
JS
1297 /*
1298 * Clean all volatile data fields, preserve iotag and node struct.
1299 */
1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1301 iocbq->sli4_lxritag = NO_XRI;
4f774513 1302 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1304 LPFC_IO_NVME_LS);
4f774513
JS
1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1306}
1307
2a9bf3d0 1308
e59058c4 1309/**
3772a991 1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1311 * @phba: Pointer to HBA context object.
1312 * @iocbq: Pointer to driver iocb object.
1313 *
1314 * This function is called with hbalock held to release driver
1315 * iocb object to the iocb pool. The iotag in the iocb object
1316 * does not change for each use of the iocb object. This function
1317 * clears all other fields of the iocb object when it is freed.
1318 **/
a6ababd2 1319static void
3772a991 1320__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1321{
2e0fef85 1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1323
1c2ba475 1324 lockdep_assert_held(&phba->hbalock);
0e9bb8d7 1325
604a3e30
JB
1326 /*
1327 * Clean all volatile data fields, preserve iotag and node struct.
1328 */
1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1330 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1332}
1333
3772a991
JS
1334/**
1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1336 * @phba: Pointer to HBA context object.
1337 * @iocbq: Pointer to driver iocb object.
1338 *
1339 * This function is called with hbalock held to release driver
1340 * iocb object to the iocb pool. The iotag in the iocb object
1341 * does not change for each use of the iocb object. This function
1342 * clears all other fields of the iocb object when it is freed.
1343 **/
1344static void
1345__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1346{
1c2ba475
JT
1347 lockdep_assert_held(&phba->hbalock);
1348
3772a991 1349 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1350 phba->iocb_cnt--;
3772a991
JS
1351}
1352
e59058c4 1353/**
3621a710 1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1357 *
1358 * This function is called with no lock held to release the iocb to
1359 * iocb pool.
1360 **/
2e0fef85
JS
1361void
1362lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1363{
1364 unsigned long iflags;
1365
1366 /*
1367 * Clean all volatile data fields, preserve iotag and node struct.
1368 */
1369 spin_lock_irqsave(&phba->hbalock, iflags);
1370 __lpfc_sli_release_iocbq(phba, iocbq);
1371 spin_unlock_irqrestore(&phba->hbalock, iflags);
1372}
1373
a257bf90
JS
1374/**
1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1376 * @phba: Pointer to HBA context object.
1377 * @iocblist: List of IOCBs.
1378 * @ulpstatus: ULP status in IOCB command field.
1379 * @ulpWord4: ULP word-4 in IOCB command field.
1380 *
1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1382 * on the list by invoking the complete callback function associated with the
1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1384 * fields.
1385 **/
1386void
1387lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1388 uint32_t ulpstatus, uint32_t ulpWord4)
1389{
1390 struct lpfc_iocbq *piocb;
1391
1392 while (!list_empty(iocblist)) {
1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
84f2ddf8
JS
1394 if (!piocb->iocb_cmpl) {
1395 if (piocb->iocb_flag & LPFC_IO_NVME)
1396 lpfc_nvme_cancel_iocb(phba, piocb);
1397 else
1398 lpfc_sli_release_iocbq(phba, piocb);
1399 } else {
a257bf90
JS
1400 piocb->iocb.ulpStatus = ulpstatus;
1401 piocb->iocb.un.ulpWord[4] = ulpWord4;
1402 (piocb->iocb_cmpl) (phba, piocb, piocb);
1403 }
1404 }
1405 return;
1406}
1407
e59058c4 1408/**
3621a710
JS
1409 * lpfc_sli_iocb_cmd_type - Get the iocb type
1410 * @iocb_cmnd: iocb command code.
e59058c4
JS
1411 *
1412 * This function is called by ring event handler function to get the iocb type.
1413 * This function translates the iocb command to an iocb command type used to
1414 * decide the final disposition of each completed IOCB.
1415 * The function returns
1416 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1417 * LPFC_SOL_IOCB if it is a solicited iocb completion
1418 * LPFC_ABORT_IOCB if it is an abort iocb
1419 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1420 *
1421 * The caller is not required to hold any lock.
1422 **/
dea3101e 1423static lpfc_iocb_type
1424lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1425{
1426 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1427
1428 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1429 return 0;
1430
1431 switch (iocb_cmnd) {
1432 case CMD_XMIT_SEQUENCE_CR:
1433 case CMD_XMIT_SEQUENCE_CX:
1434 case CMD_XMIT_BCAST_CN:
1435 case CMD_XMIT_BCAST_CX:
1436 case CMD_ELS_REQUEST_CR:
1437 case CMD_ELS_REQUEST_CX:
1438 case CMD_CREATE_XRI_CR:
1439 case CMD_CREATE_XRI_CX:
1440 case CMD_GET_RPI_CN:
1441 case CMD_XMIT_ELS_RSP_CX:
1442 case CMD_GET_RPI_CR:
1443 case CMD_FCP_IWRITE_CR:
1444 case CMD_FCP_IWRITE_CX:
1445 case CMD_FCP_IREAD_CR:
1446 case CMD_FCP_IREAD_CX:
1447 case CMD_FCP_ICMND_CR:
1448 case CMD_FCP_ICMND_CX:
f5603511
JS
1449 case CMD_FCP_TSEND_CX:
1450 case CMD_FCP_TRSP_CX:
1451 case CMD_FCP_TRECEIVE_CX:
1452 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1453 case CMD_ADAPTER_MSG:
1454 case CMD_ADAPTER_DUMP:
1455 case CMD_XMIT_SEQUENCE64_CR:
1456 case CMD_XMIT_SEQUENCE64_CX:
1457 case CMD_XMIT_BCAST64_CN:
1458 case CMD_XMIT_BCAST64_CX:
1459 case CMD_ELS_REQUEST64_CR:
1460 case CMD_ELS_REQUEST64_CX:
1461 case CMD_FCP_IWRITE64_CR:
1462 case CMD_FCP_IWRITE64_CX:
1463 case CMD_FCP_IREAD64_CR:
1464 case CMD_FCP_IREAD64_CX:
1465 case CMD_FCP_ICMND64_CR:
1466 case CMD_FCP_ICMND64_CX:
f5603511
JS
1467 case CMD_FCP_TSEND64_CX:
1468 case CMD_FCP_TRSP64_CX:
1469 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1470 case CMD_GEN_REQUEST64_CR:
1471 case CMD_GEN_REQUEST64_CX:
1472 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1473 case DSSCMD_IWRITE64_CR:
1474 case DSSCMD_IWRITE64_CX:
1475 case DSSCMD_IREAD64_CR:
1476 case DSSCMD_IREAD64_CX:
dea3101e 1477 type = LPFC_SOL_IOCB;
1478 break;
1479 case CMD_ABORT_XRI_CN:
1480 case CMD_ABORT_XRI_CX:
1481 case CMD_CLOSE_XRI_CN:
1482 case CMD_CLOSE_XRI_CX:
1483 case CMD_XRI_ABORTED_CX:
1484 case CMD_ABORT_MXRI64_CN:
6669f9bb 1485 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1486 type = LPFC_ABORT_IOCB;
1487 break;
1488 case CMD_RCV_SEQUENCE_CX:
1489 case CMD_RCV_ELS_REQ_CX:
1490 case CMD_RCV_SEQUENCE64_CX:
1491 case CMD_RCV_ELS_REQ64_CX:
57127f15 1492 case CMD_ASYNC_STATUS:
ed957684
JS
1493 case CMD_IOCB_RCV_SEQ64_CX:
1494 case CMD_IOCB_RCV_ELS64_CX:
1495 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1496 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1497 type = LPFC_UNSOL_IOCB;
1498 break;
3163f725
JS
1499 case CMD_IOCB_XMIT_MSEQ64_CR:
1500 case CMD_IOCB_XMIT_MSEQ64_CX:
1501 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1502 case CMD_IOCB_RCV_ELS_LIST64_CX:
1503 case CMD_IOCB_CLOSE_EXTENDED_CN:
1504 case CMD_IOCB_ABORT_EXTENDED_CN:
1505 case CMD_IOCB_RET_HBQE64_CN:
1506 case CMD_IOCB_FCP_IBIDIR64_CR:
1507 case CMD_IOCB_FCP_IBIDIR64_CX:
1508 case CMD_IOCB_FCP_ITASKMGT64_CX:
1509 case CMD_IOCB_LOGENTRY_CN:
1510 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1511 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1512 __func__, iocb_cmnd);
3163f725
JS
1513 type = LPFC_UNKNOWN_IOCB;
1514 break;
dea3101e 1515 default:
1516 type = LPFC_UNKNOWN_IOCB;
1517 break;
1518 }
1519
1520 return type;
1521}
1522
e59058c4 1523/**
3621a710 1524 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1525 * @phba: Pointer to HBA context object.
1526 *
1527 * This function is called from SLI initialization code
1528 * to configure every ring of the HBA's SLI interface. The
1529 * caller is not required to hold any lock. This function issues
1530 * a config_ring mailbox command for each ring.
1531 * This function returns zero if successful else returns a negative
1532 * error code.
1533 **/
dea3101e 1534static int
ed957684 1535lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1536{
1537 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1538 LPFC_MBOXQ_t *pmb;
1539 MAILBOX_t *pmbox;
1540 int i, rc, ret = 0;
dea3101e 1541
ed957684
JS
1542 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1543 if (!pmb)
1544 return -ENOMEM;
04c68496 1545 pmbox = &pmb->u.mb;
ed957684 1546 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1547 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1548 lpfc_config_ring(phba, i, pmb);
1549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1550 if (rc != MBX_SUCCESS) {
92d7f7b0 1551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1552 "0446 Adapter failed to init (%d), "
dea3101e 1553 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1554 "ring %d\n",
e8b62011
JS
1555 rc, pmbox->mbxCommand,
1556 pmbox->mbxStatus, i);
2e0fef85 1557 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1558 ret = -ENXIO;
1559 break;
dea3101e 1560 }
1561 }
ed957684
JS
1562 mempool_free(pmb, phba->mbox_mem_pool);
1563 return ret;
dea3101e 1564}
1565
e59058c4 1566/**
3621a710 1567 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1568 * @phba: Pointer to HBA context object.
1569 * @pring: Pointer to driver SLI ring object.
1570 * @piocb: Pointer to the driver iocb object.
1571 *
e2a8be56
JS
1572 * The driver calls this function with the hbalock held for SLI3 ports or
1573 * the ring lock held for SLI4 ports. The function adds the
e59058c4
JS
1574 * new iocb to txcmplq of the given ring. This function always returns
1575 * 0. If this function is called for ELS ring, this function checks if
1576 * there is a vport associated with the ELS command. This function also
1577 * starts els_tmofunc timer if this is an ELS command.
1578 **/
dea3101e 1579static int
2e0fef85
JS
1580lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1581 struct lpfc_iocbq *piocb)
dea3101e 1582{
e2a8be56
JS
1583 if (phba->sli_rev == LPFC_SLI_REV4)
1584 lockdep_assert_held(&pring->ring_lock);
1585 else
1586 lockdep_assert_held(&phba->hbalock);
1c2ba475 1587
2319f847 1588 BUG_ON(!piocb);
22466da5 1589
dea3101e 1590 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1591 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
c490850a 1592 pring->txcmplq_cnt++;
2a9bf3d0 1593
92d7f7b0
JS
1594 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1595 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1596 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1597 BUG_ON(!piocb->vport);
1598 if (!(piocb->vport->load_flag & FC_UNLOADING))
1599 mod_timer(&piocb->vport->els_tmofunc,
1600 jiffies +
1601 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1602 }
dea3101e 1603
2e0fef85 1604 return 0;
dea3101e 1605}
1606
e59058c4 1607/**
3621a710 1608 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1609 * @phba: Pointer to HBA context object.
1610 * @pring: Pointer to driver SLI ring object.
1611 *
1612 * This function is called with hbalock held to get next
1613 * iocb in txq of the given ring. If there is any iocb in
1614 * the txq, the function returns first iocb in the list after
1615 * removing the iocb from the list, else it returns NULL.
1616 **/
2a9bf3d0 1617struct lpfc_iocbq *
2e0fef85 1618lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1619{
dea3101e 1620 struct lpfc_iocbq *cmd_iocb;
1621
1c2ba475
JT
1622 lockdep_assert_held(&phba->hbalock);
1623
858c9f6c 1624 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1625 return cmd_iocb;
dea3101e 1626}
1627
e59058c4 1628/**
3621a710 1629 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1630 * @phba: Pointer to HBA context object.
1631 * @pring: Pointer to driver SLI ring object.
1632 *
1633 * This function is called with hbalock held and the caller must post the
1634 * iocb without releasing the lock. If the caller releases the lock,
1635 * iocb slot returned by the function is not guaranteed to be available.
1636 * The function returns pointer to the next available iocb slot if there
1637 * is available slot in the ring, else it returns NULL.
1638 * If the get index of the ring is ahead of the put index, the function
1639 * will post an error attention event to the worker thread to take the
1640 * HBA to offline state.
1641 **/
dea3101e 1642static IOCB_t *
1643lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1644{
34b02dcd 1645 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1646 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1647
1648 lockdep_assert_held(&phba->hbalock);
1649
7e56aa25
JS
1650 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1651 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1652 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1653
7e56aa25
JS
1654 if (unlikely(pring->sli.sli3.local_getidx ==
1655 pring->sli.sli3.next_cmdidx)) {
dea3101e 1656
7e56aa25 1657 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1658
7e56aa25 1659 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1661 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1662 "is bigger than cmd ring %d\n",
e8b62011 1663 pring->ringno,
7e56aa25
JS
1664 pring->sli.sli3.local_getidx,
1665 max_cmd_idx);
dea3101e 1666
2e0fef85 1667 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1668 /*
1669 * All error attention handlers are posted to
1670 * worker thread
1671 */
1672 phba->work_ha |= HA_ERATT;
1673 phba->work_hs = HS_FFER3;
92d7f7b0 1674
5e9d9b82 1675 lpfc_worker_wake_up(phba);
dea3101e 1676
1677 return NULL;
1678 }
1679
7e56aa25 1680 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1681 return NULL;
1682 }
1683
ed957684 1684 return lpfc_cmd_iocb(phba, pring);
dea3101e 1685}
1686
e59058c4 1687/**
3621a710 1688 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1689 * @phba: Pointer to HBA context object.
1690 * @iocbq: Pointer to driver iocb object.
1691 *
1692 * This function gets an iotag for the iocb. If there is no unused iotag and
1693 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1694 * array and assigns a new iotag.
1695 * The function returns the allocated iotag if successful, else returns zero.
1696 * Zero is not a valid iotag.
1697 * The caller is not required to hold any lock.
1698 **/
604a3e30 1699uint16_t
2e0fef85 1700lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1701{
2e0fef85
JS
1702 struct lpfc_iocbq **new_arr;
1703 struct lpfc_iocbq **old_arr;
604a3e30
JB
1704 size_t new_len;
1705 struct lpfc_sli *psli = &phba->sli;
1706 uint16_t iotag;
dea3101e 1707
2e0fef85 1708 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1709 iotag = psli->last_iotag;
1710 if(++iotag < psli->iocbq_lookup_len) {
1711 psli->last_iotag = iotag;
1712 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1713 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1714 iocbq->iotag = iotag;
1715 return iotag;
2e0fef85 1716 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1717 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1718 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85 1719 spin_unlock_irq(&phba->hbalock);
6396bb22 1720 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
604a3e30
JB
1721 GFP_KERNEL);
1722 if (new_arr) {
2e0fef85 1723 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1724 old_arr = psli->iocbq_lookup;
1725 if (new_len <= psli->iocbq_lookup_len) {
1726 /* highly unprobable case */
1727 kfree(new_arr);
1728 iotag = psli->last_iotag;
1729 if(++iotag < psli->iocbq_lookup_len) {
1730 psli->last_iotag = iotag;
1731 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1732 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1733 iocbq->iotag = iotag;
1734 return iotag;
1735 }
2e0fef85 1736 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1737 return 0;
1738 }
1739 if (psli->iocbq_lookup)
1740 memcpy(new_arr, old_arr,
1741 ((psli->last_iotag + 1) *
311464ec 1742 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1743 psli->iocbq_lookup = new_arr;
1744 psli->iocbq_lookup_len = new_len;
1745 psli->last_iotag = iotag;
1746 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1747 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1748 iocbq->iotag = iotag;
1749 kfree(old_arr);
1750 return iotag;
1751 }
8f6d98d2 1752 } else
2e0fef85 1753 spin_unlock_irq(&phba->hbalock);
dea3101e 1754
bc73905a 1755 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1756 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1757 psli->last_iotag);
dea3101e 1758
604a3e30 1759 return 0;
dea3101e 1760}
1761
e59058c4 1762/**
3621a710 1763 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1764 * @phba: Pointer to HBA context object.
1765 * @pring: Pointer to driver SLI ring object.
1766 * @iocb: Pointer to iocb slot in the ring.
1767 * @nextiocb: Pointer to driver iocb object which need to be
1768 * posted to firmware.
1769 *
1770 * This function is called with hbalock held to post a new iocb to
1771 * the firmware. This function copies the new iocb to ring iocb slot and
1772 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1773 * a completion call back for this iocb else the function will free the
1774 * iocb object.
1775 **/
dea3101e 1776static void
1777lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1778 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1779{
1c2ba475 1780 lockdep_assert_held(&phba->hbalock);
dea3101e 1781 /*
604a3e30 1782 * Set up an iotag
dea3101e 1783 */
604a3e30 1784 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1785
e2a0a9d6 1786
a58cbd52
JS
1787 if (pring->ringno == LPFC_ELS_RING) {
1788 lpfc_debugfs_slow_ring_trc(phba,
1789 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1790 *(((uint32_t *) &nextiocb->iocb) + 4),
1791 *(((uint32_t *) &nextiocb->iocb) + 6),
1792 *(((uint32_t *) &nextiocb->iocb) + 7));
1793 }
1794
dea3101e 1795 /*
1796 * Issue iocb command to adapter
1797 */
92d7f7b0 1798 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1799 wmb();
1800 pring->stats.iocb_cmd++;
1801
1802 /*
1803 * If there is no completion routine to call, we can release the
1804 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1805 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1806 */
1807 if (nextiocb->iocb_cmpl)
1808 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1809 else
2e0fef85 1810 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1811
1812 /*
1813 * Let the HBA know what IOCB slot will be the next one the
1814 * driver will put a command into.
1815 */
7e56aa25
JS
1816 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1817 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1818}
1819
e59058c4 1820/**
3621a710 1821 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1822 * @phba: Pointer to HBA context object.
1823 * @pring: Pointer to driver SLI ring object.
1824 *
1825 * The caller is not required to hold any lock for calling this function.
1826 * This function updates the chip attention bits for the ring to inform firmware
1827 * that there are pending work to be done for this ring and requests an
1828 * interrupt when there is space available in the ring. This function is
1829 * called when the driver is unable to post more iocbs to the ring due
1830 * to unavailability of space in the ring.
1831 **/
dea3101e 1832static void
2e0fef85 1833lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1834{
1835 int ringno = pring->ringno;
1836
1837 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1838
1839 wmb();
1840
1841 /*
1842 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1843 * The HBA will tell us when an IOCB entry is available.
1844 */
1845 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1846 readl(phba->CAregaddr); /* flush */
1847
1848 pring->stats.iocb_cmd_full++;
1849}
1850
e59058c4 1851/**
3621a710 1852 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1853 * @phba: Pointer to HBA context object.
1854 * @pring: Pointer to driver SLI ring object.
1855 *
1856 * This function updates the chip attention register bit for the
1857 * given ring to inform HBA that there is more work to be done
1858 * in this ring. The caller is not required to hold any lock.
1859 **/
dea3101e 1860static void
2e0fef85 1861lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1862{
1863 int ringno = pring->ringno;
1864
1865 /*
1866 * Tell the HBA that there is work to do in this ring.
1867 */
34b02dcd
JS
1868 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1869 wmb();
1870 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1871 readl(phba->CAregaddr); /* flush */
1872 }
dea3101e 1873}
1874
e59058c4 1875/**
3621a710 1876 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1877 * @phba: Pointer to HBA context object.
1878 * @pring: Pointer to driver SLI ring object.
1879 *
1880 * This function is called with hbalock held to post pending iocbs
1881 * in the txq to the firmware. This function is called when driver
1882 * detects space available in the ring.
1883 **/
dea3101e 1884static void
2e0fef85 1885lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1886{
1887 IOCB_t *iocb;
1888 struct lpfc_iocbq *nextiocb;
1889
1c2ba475
JT
1890 lockdep_assert_held(&phba->hbalock);
1891
dea3101e 1892 /*
1893 * Check to see if:
1894 * (a) there is anything on the txq to send
1895 * (b) link is up
1896 * (c) link attention events can be processed (fcp ring only)
1897 * (d) IOCB processing is not blocked by the outstanding mbox command.
1898 */
0e9bb8d7
JS
1899
1900 if (lpfc_is_link_up(phba) &&
1901 (!list_empty(&pring->txq)) &&
895427bd 1902 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1903 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1904
1905 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1906 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1907 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1908
1909 if (iocb)
1910 lpfc_sli_update_ring(phba, pring);
1911 else
1912 lpfc_sli_update_full_ring(phba, pring);
1913 }
1914
1915 return;
1916}
1917
e59058c4 1918/**
3621a710 1919 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1920 * @phba: Pointer to HBA context object.
1921 * @hbqno: HBQ number.
1922 *
1923 * This function is called with hbalock held to get the next
1924 * available slot for the given HBQ. If there is free slot
1925 * available for the HBQ it will return pointer to the next available
1926 * HBQ entry else it will return NULL.
1927 **/
a6ababd2 1928static struct lpfc_hbq_entry *
ed957684
JS
1929lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1930{
1931 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1932
1c2ba475
JT
1933 lockdep_assert_held(&phba->hbalock);
1934
ed957684
JS
1935 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1936 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1937 hbqp->next_hbqPutIdx = 0;
1938
1939 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1940 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1941 uint32_t getidx = le32_to_cpu(raw_index);
1942
1943 hbqp->local_hbqGetIdx = getidx;
1944
1945 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1946 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1947 LOG_SLI | LOG_VPORT,
e8b62011 1948 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1949 "%u is > than hbqp->entry_count %u\n",
e8b62011 1950 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1951 hbqp->entry_count);
1952
1953 phba->link_state = LPFC_HBA_ERROR;
1954 return NULL;
1955 }
1956
1957 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1958 return NULL;
1959 }
1960
51ef4c26
JS
1961 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1962 hbqp->hbqPutIdx;
ed957684
JS
1963}
1964
e59058c4 1965/**
3621a710 1966 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1967 * @phba: Pointer to HBA context object.
1968 *
1969 * This function is called with no lock held to free all the
1970 * hbq buffers while uninitializing the SLI interface. It also
1971 * frees the HBQ buffers returned by the firmware but not yet
1972 * processed by the upper layers.
1973 **/
ed957684
JS
1974void
1975lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1976{
92d7f7b0
JS
1977 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1978 struct hbq_dmabuf *hbq_buf;
3163f725 1979 unsigned long flags;
51ef4c26 1980 int i, hbq_count;
ed957684 1981
51ef4c26 1982 hbq_count = lpfc_sli_hbq_count();
ed957684 1983 /* Return all memory used by all HBQs */
3163f725 1984 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1985 for (i = 0; i < hbq_count; ++i) {
1986 list_for_each_entry_safe(dmabuf, next_dmabuf,
1987 &phba->hbqs[i].hbq_buffer_list, list) {
1988 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1989 list_del(&hbq_buf->dbuf.list);
1990 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1991 }
a8adb832 1992 phba->hbqs[i].buffer_count = 0;
ed957684 1993 }
3163f725
JS
1994
1995 /* Mark the HBQs not in use */
1996 phba->hbq_in_use = 0;
1997 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1998}
1999
e59058c4 2000/**
3621a710 2001 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
2002 * @phba: Pointer to HBA context object.
2003 * @hbqno: HBQ number.
2004 * @hbq_buf: Pointer to HBQ buffer.
2005 *
2006 * This function is called with the hbalock held to post a
2007 * hbq buffer to the firmware. If the function finds an empty
2008 * slot in the HBQ, it will post the buffer. The function will return
2009 * pointer to the hbq entry if it successfully post the buffer
2010 * else it will return NULL.
2011 **/
3772a991 2012static int
ed957684 2013lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 2014 struct hbq_dmabuf *hbq_buf)
3772a991 2015{
1c2ba475 2016 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2017 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2018}
2019
2020/**
2021 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2022 * @phba: Pointer to HBA context object.
2023 * @hbqno: HBQ number.
2024 * @hbq_buf: Pointer to HBQ buffer.
2025 *
2026 * This function is called with the hbalock held to post a hbq buffer to the
2027 * firmware. If the function finds an empty slot in the HBQ, it will post the
2028 * buffer and place it on the hbq_buffer_list. The function will return zero if
2029 * it successfully post the buffer else it will return an error.
2030 **/
2031static int
2032lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2033 struct hbq_dmabuf *hbq_buf)
ed957684
JS
2034{
2035 struct lpfc_hbq_entry *hbqe;
92d7f7b0 2036 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 2037
1c2ba475 2038 lockdep_assert_held(&phba->hbalock);
ed957684
JS
2039 /* Get next HBQ entry slot to use */
2040 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2041 if (hbqe) {
2042 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2043
92d7f7b0
JS
2044 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2045 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 2046 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 2047 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
2048 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2049 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2050 /* Sync SLIM */
ed957684
JS
2051 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2052 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 2053 /* flush */
ed957684 2054 readl(phba->hbq_put + hbqno);
51ef4c26 2055 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
2056 return 0;
2057 } else
2058 return -ENOMEM;
ed957684
JS
2059}
2060
4f774513
JS
2061/**
2062 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2063 * @phba: Pointer to HBA context object.
2064 * @hbqno: HBQ number.
2065 * @hbq_buf: Pointer to HBQ buffer.
2066 *
2067 * This function is called with the hbalock held to post an RQE to the SLI4
2068 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2069 * the hbq_buffer_list and return zero, otherwise it will return an error.
2070 **/
2071static int
2072lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2073 struct hbq_dmabuf *hbq_buf)
2074{
2075 int rc;
2076 struct lpfc_rqe hrqe;
2077 struct lpfc_rqe drqe;
895427bd
JS
2078 struct lpfc_queue *hrq;
2079 struct lpfc_queue *drq;
2080
2081 if (hbqno != LPFC_ELS_HBQ)
2082 return 1;
2083 hrq = phba->sli4_hba.hdr_rq;
2084 drq = phba->sli4_hba.dat_rq;
4f774513 2085
1c2ba475 2086 lockdep_assert_held(&phba->hbalock);
4f774513
JS
2087 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2088 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2089 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2090 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 2091 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
2092 if (rc < 0)
2093 return rc;
895427bd 2094 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
2095 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2096 return 0;
2097}
2098
e59058c4 2099/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
2100static struct lpfc_hbq_init lpfc_els_hbq = {
2101 .rn = 1,
def9c7a9 2102 .entry_count = 256,
92d7f7b0
JS
2103 .mask_count = 0,
2104 .profile = 0,
51ef4c26 2105 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 2106 .buffer_count = 0,
a257bf90
JS
2107 .init_count = 40,
2108 .add_count = 40,
92d7f7b0 2109};
ed957684 2110
e59058c4 2111/* Array of HBQs */
78b2d852 2112struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
2113 &lpfc_els_hbq,
2114};
ed957684 2115
e59058c4 2116/**
3621a710 2117 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
2118 * @phba: Pointer to HBA context object.
2119 * @hbqno: HBQ number.
2120 * @count: Number of HBQ buffers to be posted.
2121 *
d7c255b2
JS
2122 * This function is called with no lock held to post more hbq buffers to the
2123 * given HBQ. The function returns the number of HBQ buffers successfully
2124 * posted.
e59058c4 2125 **/
311464ec 2126static int
92d7f7b0 2127lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 2128{
d7c255b2 2129 uint32_t i, posted = 0;
3163f725 2130 unsigned long flags;
92d7f7b0 2131 struct hbq_dmabuf *hbq_buffer;
d7c255b2 2132 LIST_HEAD(hbq_buf_list);
eafe1df9 2133 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 2134 return 0;
51ef4c26 2135
d7c255b2
JS
2136 if ((phba->hbqs[hbqno].buffer_count + count) >
2137 lpfc_hbq_defs[hbqno]->entry_count)
2138 count = lpfc_hbq_defs[hbqno]->entry_count -
2139 phba->hbqs[hbqno].buffer_count;
2140 if (!count)
2141 return 0;
2142 /* Allocate HBQ entries */
2143 for (i = 0; i < count; i++) {
2144 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2145 if (!hbq_buffer)
2146 break;
2147 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2148 }
3163f725
JS
2149 /* Check whether HBQ is still in use */
2150 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 2151 if (!phba->hbq_in_use)
d7c255b2
JS
2152 goto err;
2153 while (!list_empty(&hbq_buf_list)) {
2154 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2155 dbuf.list);
2156 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2157 (hbqno << 16));
3772a991 2158 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 2159 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
2160 posted++;
2161 } else
51ef4c26 2162 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 2163 }
3163f725 2164 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2165 return posted;
2166err:
eafe1df9 2167 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2168 while (!list_empty(&hbq_buf_list)) {
2169 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2170 dbuf.list);
2171 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2172 }
2173 return 0;
ed957684
JS
2174}
2175
e59058c4 2176/**
3621a710 2177 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
2178 * @phba: Pointer to HBA context object.
2179 * @qno: HBQ number.
2180 *
2181 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
2182 * is called with no lock held. The function returns the number of HBQ entries
2183 * successfully allocated.
e59058c4 2184 **/
92d7f7b0
JS
2185int
2186lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 2187{
def9c7a9
JS
2188 if (phba->sli_rev == LPFC_SLI_REV4)
2189 return 0;
2190 else
2191 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2192 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 2193}
ed957684 2194
e59058c4 2195/**
3621a710 2196 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
2197 * @phba: Pointer to HBA context object.
2198 * @qno: HBQ queue number.
2199 *
2200 * This function is called from SLI initialization code path with
2201 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2202 * function returns the number of HBQ entries successfully allocated.
e59058c4 2203 **/
a6ababd2 2204static int
92d7f7b0
JS
2205lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2206{
def9c7a9
JS
2207 if (phba->sli_rev == LPFC_SLI_REV4)
2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2209 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2210 else
2211 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2212 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2213}
2214
3772a991
JS
2215/**
2216 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2217 * @phba: Pointer to HBA context object.
2218 * @hbqno: HBQ number.
2219 *
2220 * This function removes the first hbq buffer on an hbq list and returns a
2221 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2222 **/
2223static struct hbq_dmabuf *
2224lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2225{
2226 struct lpfc_dmabuf *d_buf;
2227
2228 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2229 if (!d_buf)
2230 return NULL;
2231 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2232}
2233
2d7dbc4c
JS
2234/**
2235 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2236 * @phba: Pointer to HBA context object.
2237 * @hbqno: HBQ number.
2238 *
2239 * This function removes the first RQ buffer on an RQ buffer list and returns a
2240 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2241 **/
2242static struct rqb_dmabuf *
2243lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2244{
2245 struct lpfc_dmabuf *h_buf;
2246 struct lpfc_rqb *rqbp;
2247
2248 rqbp = hrq->rqbp;
2249 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2250 struct lpfc_dmabuf, list);
2251 if (!h_buf)
2252 return NULL;
2253 rqbp->buffer_count--;
2254 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2255}
2256
e59058c4 2257/**
3621a710 2258 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2259 * @phba: Pointer to HBA context object.
2260 * @tag: Tag of the hbq buffer.
2261 *
71892418
SH
2262 * This function searches for the hbq buffer associated with the given tag in
2263 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2264 * otherwise it returns NULL.
e59058c4 2265 **/
a6ababd2 2266static struct hbq_dmabuf *
92d7f7b0 2267lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2268{
92d7f7b0
JS
2269 struct lpfc_dmabuf *d_buf;
2270 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2271 uint32_t hbqno;
2272
2273 hbqno = tag >> 16;
a0a74e45 2274 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2275 return NULL;
ed957684 2276
3772a991 2277 spin_lock_irq(&phba->hbalock);
51ef4c26 2278 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2279 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2280 if (hbq_buf->tag == tag) {
3772a991 2281 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2282 return hbq_buf;
ed957684
JS
2283 }
2284 }
3772a991 2285 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2287 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2288 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2289 return NULL;
ed957684
JS
2290}
2291
e59058c4 2292/**
3621a710 2293 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2294 * @phba: Pointer to HBA context object.
2295 * @hbq_buffer: Pointer to HBQ buffer.
2296 *
2297 * This function is called with hbalock. This function gives back
2298 * the hbq buffer to firmware. If the HBQ does not have space to
2299 * post the buffer, it will free the buffer.
2300 **/
ed957684 2301void
51ef4c26 2302lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2303{
2304 uint32_t hbqno;
2305
51ef4c26
JS
2306 if (hbq_buffer) {
2307 hbqno = hbq_buffer->tag >> 16;
3772a991 2308 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2309 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2310 }
2311}
2312
e59058c4 2313/**
3621a710 2314 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2315 * @mbxCommand: mailbox command code.
2316 *
2317 * This function is called by the mailbox event handler function to verify
2318 * that the completed mailbox command is a legitimate mailbox command. If the
2319 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2320 * and the mailbox event handler will take the HBA offline.
2321 **/
dea3101e 2322static int
2323lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2324{
2325 uint8_t ret;
2326
2327 switch (mbxCommand) {
2328 case MBX_LOAD_SM:
2329 case MBX_READ_NV:
2330 case MBX_WRITE_NV:
a8adb832 2331 case MBX_WRITE_VPARMS:
dea3101e 2332 case MBX_RUN_BIU_DIAG:
2333 case MBX_INIT_LINK:
2334 case MBX_DOWN_LINK:
2335 case MBX_CONFIG_LINK:
2336 case MBX_CONFIG_RING:
2337 case MBX_RESET_RING:
2338 case MBX_READ_CONFIG:
2339 case MBX_READ_RCONFIG:
2340 case MBX_READ_SPARM:
2341 case MBX_READ_STATUS:
2342 case MBX_READ_RPI:
2343 case MBX_READ_XRI:
2344 case MBX_READ_REV:
2345 case MBX_READ_LNK_STAT:
2346 case MBX_REG_LOGIN:
2347 case MBX_UNREG_LOGIN:
dea3101e 2348 case MBX_CLEAR_LA:
2349 case MBX_DUMP_MEMORY:
2350 case MBX_DUMP_CONTEXT:
2351 case MBX_RUN_DIAGS:
2352 case MBX_RESTART:
2353 case MBX_UPDATE_CFG:
2354 case MBX_DOWN_LOAD:
2355 case MBX_DEL_LD_ENTRY:
2356 case MBX_RUN_PROGRAM:
2357 case MBX_SET_MASK:
09372820 2358 case MBX_SET_VARIABLE:
dea3101e 2359 case MBX_UNREG_D_ID:
41415862 2360 case MBX_KILL_BOARD:
dea3101e 2361 case MBX_CONFIG_FARP:
41415862 2362 case MBX_BEACON:
dea3101e 2363 case MBX_LOAD_AREA:
2364 case MBX_RUN_BIU_DIAG64:
2365 case MBX_CONFIG_PORT:
2366 case MBX_READ_SPARM64:
2367 case MBX_READ_RPI64:
2368 case MBX_REG_LOGIN64:
76a95d75 2369 case MBX_READ_TOPOLOGY:
09372820 2370 case MBX_WRITE_WWN:
dea3101e 2371 case MBX_SET_DEBUG:
2372 case MBX_LOAD_EXP_ROM:
57127f15 2373 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2374 case MBX_REG_VPI:
2375 case MBX_UNREG_VPI:
858c9f6c 2376 case MBX_HEARTBEAT:
84774a4d
JS
2377 case MBX_PORT_CAPABILITIES:
2378 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2379 case MBX_SLI4_CONFIG:
2380 case MBX_SLI4_REQ_FTRS:
2381 case MBX_REG_FCFI:
2382 case MBX_UNREG_FCFI:
2383 case MBX_REG_VFI:
2384 case MBX_UNREG_VFI:
2385 case MBX_INIT_VPI:
2386 case MBX_INIT_VFI:
2387 case MBX_RESUME_RPI:
c7495937
JS
2388 case MBX_READ_EVENT_LOG_STATUS:
2389 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2390 case MBX_SECURITY_MGMT:
2391 case MBX_AUTH_PORT:
940eb687 2392 case MBX_ACCESS_VDATA:
dea3101e 2393 ret = mbxCommand;
2394 break;
2395 default:
2396 ret = MBX_SHUTDOWN;
2397 break;
2398 }
2e0fef85 2399 return ret;
dea3101e 2400}
e59058c4
JS
2401
2402/**
3621a710 2403 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2404 * @phba: Pointer to HBA context object.
2405 * @pmboxq: Pointer to mailbox command.
2406 *
2407 * This is completion handler function for mailbox commands issued from
2408 * lpfc_sli_issue_mbox_wait function. This function is called by the
2409 * mailbox event handler function with no lock held. This function
2410 * will wake up thread waiting on the wait queue pointed by context1
2411 * of the mailbox.
2412 **/
04c68496 2413void
2e0fef85 2414lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2415{
858c9f6c 2416 unsigned long drvr_flag;
e29d74f8 2417 struct completion *pmbox_done;
dea3101e 2418
2419 /*
e29d74f8 2420 * If pmbox_done is empty, the driver thread gave up waiting and
dea3101e 2421 * continued running.
2422 */
7054a606 2423 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2424 spin_lock_irqsave(&phba->hbalock, drvr_flag);
e29d74f8
JS
2425 pmbox_done = (struct completion *)pmboxq->context3;
2426 if (pmbox_done)
2427 complete(pmbox_done);
858c9f6c 2428 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2429 return;
2430}
2431
b95b2119
JS
2432static void
2433__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2434{
2435 unsigned long iflags;
2436
2437 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2438 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2439 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2440 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2441 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2442 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2443 }
2444 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2445}
e59058c4
JS
2446
2447/**
3621a710 2448 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2449 * @phba: Pointer to HBA context object.
2450 * @pmb: Pointer to mailbox object.
2451 *
2452 * This function is the default mailbox completion handler. It
2453 * frees the memory resources associated with the completed mailbox
2454 * command. If the completed command is a REG_LOGIN mailbox command,
2455 * this function will issue a UREG_LOGIN to re-claim the RPI.
2456 **/
dea3101e 2457void
2e0fef85 2458lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2459{
d439d286 2460 struct lpfc_vport *vport = pmb->vport;
dea3101e 2461 struct lpfc_dmabuf *mp;
d439d286 2462 struct lpfc_nodelist *ndlp;
5af5eee7 2463 struct Scsi_Host *shost;
04c68496 2464 uint16_t rpi, vpi;
7054a606
JS
2465 int rc;
2466
3e1f0718 2467 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
7054a606 2468
dea3101e 2469 if (mp) {
2470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2471 kfree(mp);
2472 }
7054a606
JS
2473
2474 /*
2475 * If a REG_LOGIN succeeded after node is destroyed or node
2476 * is in re-discovery driver need to cleanup the RPI.
2477 */
2e0fef85 2478 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2479 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2480 !pmb->u.mb.mbxStatus) {
2481 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2482 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2483 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2484 pmb->vport = vport;
92d7f7b0 2485 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2486 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2487 if (rc != MBX_NOT_FINISHED)
2488 return;
2489 }
2490
695a814e
JS
2491 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2492 !(phba->pport->load_flag & FC_UNLOADING) &&
2493 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2494 shost = lpfc_shost_from_vport(vport);
2495 spin_lock_irq(shost->host_lock);
2496 vport->vpi_state |= LPFC_VPI_REGISTERED;
2497 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2498 spin_unlock_irq(shost->host_lock);
695a814e
JS
2499 }
2500
d439d286 2501 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 2502 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
d439d286 2503 lpfc_nlp_put(ndlp);
dea16bda
JS
2504 pmb->ctx_buf = NULL;
2505 pmb->ctx_ndlp = NULL;
2506 }
2507
2508 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2509 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2510
2511 /* Check to see if there are any deferred events to process */
2512 if (ndlp) {
2513 lpfc_printf_vlog(
2514 vport,
2515 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2516 "1438 UNREG cmpl deferred mbox x%x "
32350664 2517 "on NPort x%x Data: x%x x%x %px\n",
dea16bda
JS
2518 ndlp->nlp_rpi, ndlp->nlp_DID,
2519 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2520
2521 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2522 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
00292e03 2523 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2524 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2525 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
00292e03 2526 } else {
b95b2119 2527 __lpfc_sli_rpi_release(vport, ndlp);
dea16bda 2528 }
9b164068 2529 pmb->ctx_ndlp = NULL;
dea16bda 2530 }
d439d286
JS
2531 }
2532
dcf2a4e0
JS
2533 /* Check security permission status on INIT_LINK mailbox command */
2534 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2535 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2536 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2537 "2860 SLI authentication is required "
2538 "for INIT_LINK but has not done yet\n");
2539
04c68496
JS
2540 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2541 lpfc_sli4_mbox_cmd_free(phba, pmb);
2542 else
2543 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2544}
be6bb941
JS
2545 /**
2546 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2547 * @phba: Pointer to HBA context object.
2548 * @pmb: Pointer to mailbox object.
2549 *
2550 * This function is the unreg rpi mailbox completion handler. It
2551 * frees the memory resources associated with the completed mailbox
2552 * command. An additional refrenece is put on the ndlp to prevent
2553 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2554 * the unreg mailbox command completes, this routine puts the
2555 * reference back.
2556 *
2557 **/
2558void
2559lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2560{
2561 struct lpfc_vport *vport = pmb->vport;
2562 struct lpfc_nodelist *ndlp;
2563
3e1f0718 2564 ndlp = pmb->ctx_ndlp;
be6bb941
JS
2565 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2566 if (phba->sli_rev == LPFC_SLI_REV4 &&
2567 (bf_get(lpfc_sli_intf_if_type,
27d6ac0a 2568 &phba->sli4_hba.sli_intf) >=
be6bb941
JS
2569 LPFC_SLI_INTF_IF_TYPE_2)) {
2570 if (ndlp) {
dea16bda
JS
2571 lpfc_printf_vlog(
2572 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2573 "0010 UNREG_LOGIN vpi:%x "
2574 "rpi:%x DID:%x defer x%x flg x%x "
32350664 2575 "map:%x %px\n",
dea16bda
JS
2576 vport->vpi, ndlp->nlp_rpi,
2577 ndlp->nlp_DID, ndlp->nlp_defer_did,
2578 ndlp->nlp_flag,
2579 ndlp->nlp_usg_map, ndlp);
7c5e518c 2580 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941 2581 lpfc_nlp_put(ndlp);
dea16bda
JS
2582
2583 /* Check to see if there are any deferred
2584 * events to process
2585 */
2586 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2587 (ndlp->nlp_defer_did !=
2588 NLP_EVT_NOTHING_PENDING)) {
2589 lpfc_printf_vlog(
2590 vport, KERN_INFO, LOG_DISCOVERY,
2591 "4111 UNREG cmpl deferred "
2592 "clr x%x on "
32350664 2593 "NPort x%x Data: x%x x%px\n",
dea16bda
JS
2594 ndlp->nlp_rpi, ndlp->nlp_DID,
2595 ndlp->nlp_defer_did, ndlp);
00292e03 2596 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2597 ndlp->nlp_defer_did =
2598 NLP_EVT_NOTHING_PENDING;
2599 lpfc_issue_els_plogi(
2600 vport, ndlp->nlp_DID, 0);
00292e03 2601 } else {
b95b2119 2602 __lpfc_sli_rpi_release(vport, ndlp);
dea16bda 2603 }
be6bb941
JS
2604 }
2605 }
2606 }
2607
2608 mempool_free(pmb, phba->mbox_mem_pool);
2609}
dea3101e 2610
e59058c4 2611/**
3621a710 2612 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2613 * @phba: Pointer to HBA context object.
2614 *
2615 * This function is called with no lock held. This function processes all
2616 * the completed mailbox commands and gives it to upper layers. The interrupt
2617 * service routine processes mailbox completion interrupt and adds completed
2618 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2619 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2620 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2621 * function returns the mailbox commands to the upper layer by calling the
2622 * completion handler function of each mailbox.
2623 **/
dea3101e 2624int
2e0fef85 2625lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2626{
92d7f7b0 2627 MAILBOX_t *pmbox;
dea3101e 2628 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2629 int rc;
2630 LIST_HEAD(cmplq);
dea3101e 2631
2632 phba->sli.slistat.mbox_event++;
2633
92d7f7b0
JS
2634 /* Get all completed mailboxe buffers into the cmplq */
2635 spin_lock_irq(&phba->hbalock);
2636 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2637 spin_unlock_irq(&phba->hbalock);
dea3101e 2638
92d7f7b0
JS
2639 /* Get a Mailbox buffer to setup mailbox commands for callback */
2640 do {
2641 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2642 if (pmb == NULL)
2643 break;
2e0fef85 2644
04c68496 2645 pmbox = &pmb->u.mb;
dea3101e 2646
858c9f6c
JS
2647 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2648 if (pmb->vport) {
2649 lpfc_debugfs_disc_trc(pmb->vport,
2650 LPFC_DISC_TRC_MBOX_VPORT,
2651 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2652 (uint32_t)pmbox->mbxCommand,
2653 pmbox->un.varWords[0],
2654 pmbox->un.varWords[1]);
2655 }
2656 else {
2657 lpfc_debugfs_disc_trc(phba->pport,
2658 LPFC_DISC_TRC_MBOX,
2659 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2660 (uint32_t)pmbox->mbxCommand,
2661 pmbox->un.varWords[0],
2662 pmbox->un.varWords[1]);
2663 }
2664 }
2665
dea3101e 2666 /*
2667 * It is a fatal error if unknown mbox command completion.
2668 */
2669 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2670 MBX_SHUTDOWN) {
af901ca1 2671 /* Unknown mailbox command compl */
92d7f7b0 2672 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2673 "(%d):0323 Unknown Mailbox command "
a183a15f 2674 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2675 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2676 pmbox->mbxCommand,
a183a15f
JS
2677 lpfc_sli_config_mbox_subsys_get(phba,
2678 pmb),
2679 lpfc_sli_config_mbox_opcode_get(phba,
2680 pmb));
2e0fef85 2681 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2682 phba->work_hs = HS_FFER3;
2683 lpfc_handle_eratt(phba);
92d7f7b0 2684 continue;
dea3101e 2685 }
2686
dea3101e 2687 if (pmbox->mbxStatus) {
2688 phba->sli.slistat.mbox_stat_err++;
2689 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2690 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2691 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2692 LOG_MBOX | LOG_SLI,
2693 "(%d):0305 Mbox cmd cmpl "
2694 "error - RETRYing Data: x%x "
2695 "(x%x/x%x) x%x x%x x%x\n",
2696 pmb->vport ? pmb->vport->vpi : 0,
2697 pmbox->mbxCommand,
2698 lpfc_sli_config_mbox_subsys_get(phba,
2699 pmb),
2700 lpfc_sli_config_mbox_opcode_get(phba,
2701 pmb),
2702 pmbox->mbxStatus,
2703 pmbox->un.varWords[0],
2704 pmb->vport->port_state);
dea3101e 2705 pmbox->mbxStatus = 0;
2706 pmbox->mbxOwner = OWN_HOST;
dea3101e 2707 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2708 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2709 continue;
dea3101e 2710 }
2711 }
2712
2713 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2714 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
32350664 2715 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %pf "
e74c03c8
JS
2716 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2717 "x%x x%x x%x\n",
92d7f7b0 2718 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2719 pmbox->mbxCommand,
a183a15f
JS
2720 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2721 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2722 pmb->mbox_cmpl,
2723 *((uint32_t *) pmbox),
2724 pmbox->un.varWords[0],
2725 pmbox->un.varWords[1],
2726 pmbox->un.varWords[2],
2727 pmbox->un.varWords[3],
2728 pmbox->un.varWords[4],
2729 pmbox->un.varWords[5],
2730 pmbox->un.varWords[6],
e74c03c8
JS
2731 pmbox->un.varWords[7],
2732 pmbox->un.varWords[8],
2733 pmbox->un.varWords[9],
2734 pmbox->un.varWords[10]);
dea3101e 2735
92d7f7b0 2736 if (pmb->mbox_cmpl)
dea3101e 2737 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2738 } while (1);
2739 return 0;
2740}
dea3101e 2741
e59058c4 2742/**
3621a710 2743 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2744 * @phba: Pointer to HBA context object.
2745 * @pring: Pointer to driver SLI ring object.
2746 * @tag: buffer tag.
2747 *
2748 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2749 * is set in the tag the buffer is posted for a particular exchange,
2750 * the function will return the buffer without replacing the buffer.
2751 * If the buffer is for unsolicited ELS or CT traffic, this function
2752 * returns the buffer and also posts another buffer to the firmware.
2753 **/
76bb24ef
JS
2754static struct lpfc_dmabuf *
2755lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2756 struct lpfc_sli_ring *pring,
2757 uint32_t tag)
76bb24ef 2758{
9f1e1b50
JS
2759 struct hbq_dmabuf *hbq_entry;
2760
76bb24ef
JS
2761 if (tag & QUE_BUFTAG_BIT)
2762 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2763 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2764 if (!hbq_entry)
2765 return NULL;
2766 return &hbq_entry->dbuf;
76bb24ef 2767}
57127f15 2768
3772a991
JS
2769/**
2770 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2771 * @phba: Pointer to HBA context object.
2772 * @pring: Pointer to driver SLI ring object.
2773 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2774 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2775 * @fch_type: the type for the first frame of the sequence.
2776 *
2777 * This function is called with no lock held. This function uses the r_ctl and
2778 * type of the received sequence to find the correct callback function to call
2779 * to process the sequence.
2780 **/
2781static int
2782lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2783 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2784 uint32_t fch_type)
2785{
2786 int i;
2787
f358dd0c
JS
2788 switch (fch_type) {
2789 case FC_TYPE_NVME:
d613b6a7 2790 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
f358dd0c
JS
2791 return 1;
2792 default:
2793 break;
2794 }
2795
3772a991
JS
2796 /* unSolicited Responses */
2797 if (pring->prt[0].profile) {
2798 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2799 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2800 saveq);
2801 return 1;
2802 }
2803 /* We must search, based on rctl / type
2804 for the right routine */
2805 for (i = 0; i < pring->num_mask; i++) {
2806 if ((pring->prt[i].rctl == fch_r_ctl) &&
2807 (pring->prt[i].type == fch_type)) {
2808 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2809 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2810 (phba, pring, saveq);
2811 return 1;
2812 }
2813 }
2814 return 0;
2815}
e59058c4
JS
2816
2817/**
3621a710 2818 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2819 * @phba: Pointer to HBA context object.
2820 * @pring: Pointer to driver SLI ring object.
2821 * @saveq: Pointer to the unsolicited iocb.
2822 *
2823 * This function is called with no lock held by the ring event handler
2824 * when there is an unsolicited iocb posted to the response ring by the
2825 * firmware. This function gets the buffer associated with the iocbs
2826 * and calls the event handler for the ring. This function handles both
2827 * qring buffers and hbq buffers.
2828 * When the function returns 1 the caller can free the iocb object otherwise
2829 * upper layer functions will free the iocb objects.
2830 **/
dea3101e 2831static int
2832lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2833 struct lpfc_iocbq *saveq)
2834{
2835 IOCB_t * irsp;
2836 WORD5 * w5p;
2837 uint32_t Rctl, Type;
76bb24ef 2838 struct lpfc_iocbq *iocbq;
3163f725 2839 struct lpfc_dmabuf *dmzbuf;
dea3101e 2840
dea3101e 2841 irsp = &(saveq->iocb);
57127f15
JS
2842
2843 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2844 if (pring->lpfc_sli_rcv_async_status)
2845 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2846 else
2847 lpfc_printf_log(phba,
2848 KERN_WARNING,
2849 LOG_SLI,
2850 "0316 Ring %d handler: unexpected "
2851 "ASYNC_STATUS iocb received evt_code "
2852 "0x%x\n",
2853 pring->ringno,
2854 irsp->un.asyncstat.evt_code);
2855 return 1;
2856 }
2857
3163f725
JS
2858 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2859 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2860 if (irsp->ulpBdeCount > 0) {
2861 dmzbuf = lpfc_sli_get_buff(phba, pring,
2862 irsp->un.ulpWord[3]);
2863 lpfc_in_buf_free(phba, dmzbuf);
2864 }
2865
2866 if (irsp->ulpBdeCount > 1) {
2867 dmzbuf = lpfc_sli_get_buff(phba, pring,
2868 irsp->unsli3.sli3Words[3]);
2869 lpfc_in_buf_free(phba, dmzbuf);
2870 }
2871
2872 if (irsp->ulpBdeCount > 2) {
2873 dmzbuf = lpfc_sli_get_buff(phba, pring,
2874 irsp->unsli3.sli3Words[7]);
2875 lpfc_in_buf_free(phba, dmzbuf);
2876 }
2877
2878 return 1;
2879 }
2880
92d7f7b0 2881 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2882 if (irsp->ulpBdeCount != 0) {
2883 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2884 irsp->un.ulpWord[3]);
2885 if (!saveq->context2)
2886 lpfc_printf_log(phba,
2887 KERN_ERR,
2888 LOG_SLI,
2889 "0341 Ring %d Cannot find buffer for "
2890 "an unsolicited iocb. tag 0x%x\n",
2891 pring->ringno,
2892 irsp->un.ulpWord[3]);
76bb24ef
JS
2893 }
2894 if (irsp->ulpBdeCount == 2) {
2895 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2896 irsp->unsli3.sli3Words[7]);
2897 if (!saveq->context3)
2898 lpfc_printf_log(phba,
2899 KERN_ERR,
2900 LOG_SLI,
2901 "0342 Ring %d Cannot find buffer for an"
2902 " unsolicited iocb. tag 0x%x\n",
2903 pring->ringno,
2904 irsp->unsli3.sli3Words[7]);
2905 }
2906 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2907 irsp = &(iocbq->iocb);
76bb24ef
JS
2908 if (irsp->ulpBdeCount != 0) {
2909 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2910 irsp->un.ulpWord[3]);
9c2face6 2911 if (!iocbq->context2)
76bb24ef
JS
2912 lpfc_printf_log(phba,
2913 KERN_ERR,
2914 LOG_SLI,
2915 "0343 Ring %d Cannot find "
2916 "buffer for an unsolicited iocb"
2917 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2918 irsp->un.ulpWord[3]);
76bb24ef
JS
2919 }
2920 if (irsp->ulpBdeCount == 2) {
2921 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2922 irsp->unsli3.sli3Words[7]);
9c2face6 2923 if (!iocbq->context3)
76bb24ef
JS
2924 lpfc_printf_log(phba,
2925 KERN_ERR,
2926 LOG_SLI,
2927 "0344 Ring %d Cannot find "
2928 "buffer for an unsolicited "
2929 "iocb. tag 0x%x\n",
2930 pring->ringno,
2931 irsp->unsli3.sli3Words[7]);
2932 }
2933 }
92d7f7b0 2934 }
9c2face6
JS
2935 if (irsp->ulpBdeCount != 0 &&
2936 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2937 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2938 int found = 0;
2939
2940 /* search continue save q for same XRI */
2941 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2942 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2943 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2944 list_add_tail(&saveq->list, &iocbq->list);
2945 found = 1;
2946 break;
2947 }
2948 }
2949 if (!found)
2950 list_add_tail(&saveq->clist,
2951 &pring->iocb_continue_saveq);
2952 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2953 list_del_init(&iocbq->clist);
2954 saveq = iocbq;
2955 irsp = &(saveq->iocb);
2956 } else
2957 return 0;
2958 }
2959 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2960 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2961 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2962 Rctl = FC_RCTL_ELS_REQ;
2963 Type = FC_TYPE_ELS;
9c2face6
JS
2964 } else {
2965 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2966 Rctl = w5p->hcsw.Rctl;
2967 Type = w5p->hcsw.Type;
2968
2969 /* Firmware Workaround */
2970 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2971 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2972 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2973 Rctl = FC_RCTL_ELS_REQ;
2974 Type = FC_TYPE_ELS;
9c2face6
JS
2975 w5p->hcsw.Rctl = Rctl;
2976 w5p->hcsw.Type = Type;
2977 }
2978 }
92d7f7b0 2979
3772a991 2980 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2981 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2982 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2983 "Type x%x received\n",
e8b62011 2984 pring->ringno, Rctl, Type);
3772a991 2985
92d7f7b0 2986 return 1;
dea3101e 2987}
2988
e59058c4 2989/**
3621a710 2990 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2991 * @phba: Pointer to HBA context object.
2992 * @pring: Pointer to driver SLI ring object.
2993 * @prspiocb: Pointer to response iocb object.
2994 *
2995 * This function looks up the iocb_lookup table to get the command iocb
2996 * corresponding to the given response iocb using the iotag of the
e2a8be56
JS
2997 * response iocb. The driver calls this function with the hbalock held
2998 * for SLI3 ports or the ring lock held for SLI4 ports.
e59058c4
JS
2999 * This function returns the command iocb object if it finds the command
3000 * iocb else returns NULL.
3001 **/
dea3101e 3002static struct lpfc_iocbq *
2e0fef85
JS
3003lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3004 struct lpfc_sli_ring *pring,
3005 struct lpfc_iocbq *prspiocb)
dea3101e 3006{
dea3101e 3007 struct lpfc_iocbq *cmd_iocb = NULL;
3008 uint16_t iotag;
e2a8be56
JS
3009 spinlock_t *temp_lock = NULL;
3010 unsigned long iflag = 0;
3011
3012 if (phba->sli_rev == LPFC_SLI_REV4)
3013 temp_lock = &pring->ring_lock;
3014 else
3015 temp_lock = &phba->hbalock;
dea3101e 3016
e2a8be56 3017 spin_lock_irqsave(temp_lock, iflag);
604a3e30
JB
3018 iotag = prspiocb->iocb.ulpIoTag;
3019
3020 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3021 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 3022 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
3023 /* remove from txcmpl queue list */
3024 list_del_init(&cmd_iocb->list);
4f2e66c6 3025 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 3026 pring->txcmplq_cnt--;
e2a8be56 3027 spin_unlock_irqrestore(temp_lock, iflag);
89533e9b 3028 return cmd_iocb;
2a9bf3d0 3029 }
dea3101e 3030 }
3031
e2a8be56 3032 spin_unlock_irqrestore(temp_lock, iflag);
dea3101e 3033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 3034 "0317 iotag x%x is out of "
604a3e30 3035 "range: max iotag x%x wd0 x%x\n",
e8b62011 3036 iotag, phba->sli.last_iotag,
604a3e30 3037 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 3038 return NULL;
3039}
3040
3772a991
JS
3041/**
3042 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3043 * @phba: Pointer to HBA context object.
3044 * @pring: Pointer to driver SLI ring object.
3045 * @iotag: IOCB tag.
3046 *
3047 * This function looks up the iocb_lookup table to get the command iocb
e2a8be56
JS
3048 * corresponding to the given iotag. The driver calls this function with
3049 * the ring lock held because this function is an SLI4 port only helper.
3772a991
JS
3050 * This function returns the command iocb object if it finds the command
3051 * iocb else returns NULL.
3052 **/
3053static struct lpfc_iocbq *
3054lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3055 struct lpfc_sli_ring *pring, uint16_t iotag)
3056{
895427bd 3057 struct lpfc_iocbq *cmd_iocb = NULL;
e2a8be56
JS
3058 spinlock_t *temp_lock = NULL;
3059 unsigned long iflag = 0;
3772a991 3060
e2a8be56
JS
3061 if (phba->sli_rev == LPFC_SLI_REV4)
3062 temp_lock = &pring->ring_lock;
3063 else
3064 temp_lock = &phba->hbalock;
3065
3066 spin_lock_irqsave(temp_lock, iflag);
3772a991
JS
3067 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3068 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
3069 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3070 /* remove from txcmpl queue list */
3071 list_del_init(&cmd_iocb->list);
3072 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
c490850a 3073 pring->txcmplq_cnt--;
e2a8be56 3074 spin_unlock_irqrestore(temp_lock, iflag);
4f2e66c6 3075 return cmd_iocb;
2a9bf3d0 3076 }
3772a991 3077 }
89533e9b 3078
e2a8be56 3079 spin_unlock_irqrestore(temp_lock, iflag);
3772a991 3080 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
3081 "0372 iotag x%x lookup error: max iotag (x%x) "
3082 "iocb_flag x%x\n",
3083 iotag, phba->sli.last_iotag,
3084 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
3085 return NULL;
3086}
3087
e59058c4 3088/**
3621a710 3089 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
3090 * @phba: Pointer to HBA context object.
3091 * @pring: Pointer to driver SLI ring object.
3092 * @saveq: Pointer to the response iocb to be processed.
3093 *
3094 * This function is called by the ring event handler for non-fcp
3095 * rings when there is a new response iocb in the response ring.
3096 * The caller is not required to hold any locks. This function
3097 * gets the command iocb associated with the response iocb and
3098 * calls the completion handler for the command iocb. If there
3099 * is no completion handler, the function will free the resources
3100 * associated with command iocb. If the response iocb is for
3101 * an already aborted command iocb, the status of the completion
3102 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3103 * This function always returns 1.
3104 **/
dea3101e 3105static int
2e0fef85 3106lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 3107 struct lpfc_iocbq *saveq)
3108{
2e0fef85 3109 struct lpfc_iocbq *cmdiocbp;
dea3101e 3110 int rc = 1;
3111 unsigned long iflag;
3112
604a3e30 3113 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
dea3101e 3114 if (cmdiocbp) {
3115 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
3116 /*
3117 * If an ELS command failed send an event to mgmt
3118 * application.
3119 */
3120 if (saveq->iocb.ulpStatus &&
3121 (pring->ringno == LPFC_ELS_RING) &&
3122 (cmdiocbp->iocb.ulpCommand ==
3123 CMD_ELS_REQUEST64_CR))
3124 lpfc_send_els_failure_event(phba,
3125 cmdiocbp, saveq);
3126
dea3101e 3127 /*
3128 * Post all ELS completions to the worker thread.
3129 * All other are passed to the completion callback.
3130 */
3131 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
3132 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3133 (cmdiocbp->iocb_flag &
3134 LPFC_DRIVER_ABORTED)) {
3135 spin_lock_irqsave(&phba->hbalock,
3136 iflag);
07951076
JS
3137 cmdiocbp->iocb_flag &=
3138 ~LPFC_DRIVER_ABORTED;
341af102
JS
3139 spin_unlock_irqrestore(&phba->hbalock,
3140 iflag);
07951076
JS
3141 saveq->iocb.ulpStatus =
3142 IOSTAT_LOCAL_REJECT;
3143 saveq->iocb.un.ulpWord[4] =
3144 IOERR_SLI_ABORTED;
0ff10d46
JS
3145
3146 /* Firmware could still be in progress
3147 * of DMAing payload, so don't free data
3148 * buffer till after a hbeat.
3149 */
341af102
JS
3150 spin_lock_irqsave(&phba->hbalock,
3151 iflag);
0ff10d46 3152 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
3153 spin_unlock_irqrestore(&phba->hbalock,
3154 iflag);
3155 }
0f65ff68
JS
3156 if (phba->sli_rev == LPFC_SLI_REV4) {
3157 if (saveq->iocb_flag &
3158 LPFC_EXCHANGE_BUSY) {
3159 /* Set cmdiocb flag for the
3160 * exchange busy so sgl (xri)
3161 * will not be released until
3162 * the abort xri is received
3163 * from hba.
3164 */
3165 spin_lock_irqsave(
3166 &phba->hbalock, iflag);
3167 cmdiocbp->iocb_flag |=
3168 LPFC_EXCHANGE_BUSY;
3169 spin_unlock_irqrestore(
3170 &phba->hbalock, iflag);
3171 }
3172 if (cmdiocbp->iocb_flag &
3173 LPFC_DRIVER_ABORTED) {
3174 /*
3175 * Clear LPFC_DRIVER_ABORTED
3176 * bit in case it was driver
3177 * initiated abort.
3178 */
3179 spin_lock_irqsave(
3180 &phba->hbalock, iflag);
3181 cmdiocbp->iocb_flag &=
3182 ~LPFC_DRIVER_ABORTED;
3183 spin_unlock_irqrestore(
3184 &phba->hbalock, iflag);
3185 cmdiocbp->iocb.ulpStatus =
3186 IOSTAT_LOCAL_REJECT;
3187 cmdiocbp->iocb.un.ulpWord[4] =
3188 IOERR_ABORT_REQUESTED;
3189 /*
3190 * For SLI4, irsiocb contains
3191 * NO_XRI in sli_xritag, it
3192 * shall not affect releasing
3193 * sgl (xri) process.
3194 */
3195 saveq->iocb.ulpStatus =
3196 IOSTAT_LOCAL_REJECT;
3197 saveq->iocb.un.ulpWord[4] =
3198 IOERR_SLI_ABORTED;
3199 spin_lock_irqsave(
3200 &phba->hbalock, iflag);
3201 saveq->iocb_flag |=
3202 LPFC_DELAY_MEM_FREE;
3203 spin_unlock_irqrestore(
3204 &phba->hbalock, iflag);
3205 }
07951076 3206 }
dea3101e 3207 }
2e0fef85 3208 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
3209 } else
3210 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 3211 } else {
3212 /*
3213 * Unknown initiating command based on the response iotag.
3214 * This could be the case on the ELS ring because of
3215 * lpfc_els_abort().
3216 */
3217 if (pring->ringno != LPFC_ELS_RING) {
3218 /*
3219 * Ring <ringno> handler: unexpected completion IoTag
3220 * <IoTag>
3221 */
a257bf90 3222 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
3223 "0322 Ring %d handler: "
3224 "unexpected completion IoTag x%x "
3225 "Data: x%x x%x x%x x%x\n",
3226 pring->ringno,
3227 saveq->iocb.ulpIoTag,
3228 saveq->iocb.ulpStatus,
3229 saveq->iocb.un.ulpWord[4],
3230 saveq->iocb.ulpCommand,
3231 saveq->iocb.ulpContext);
dea3101e 3232 }
3233 }
68876920 3234
dea3101e 3235 return rc;
3236}
3237
e59058c4 3238/**
3621a710 3239 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
3240 * @phba: Pointer to HBA context object.
3241 * @pring: Pointer to driver SLI ring object.
3242 *
3243 * This function is called from the iocb ring event handlers when
3244 * put pointer is ahead of the get pointer for a ring. This function signal
3245 * an error attention condition to the worker thread and the worker
3246 * thread will transition the HBA to offline state.
3247 **/
2e0fef85
JS
3248static void
3249lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 3250{
34b02dcd 3251 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 3252 /*
025dfdaf 3253 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
3254 * rsp ring <portRspMax>
3255 */
3256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3257 "0312 Ring %d handler: portRspPut %d "
025dfdaf 3258 "is bigger than rsp ring %d\n",
e8b62011 3259 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 3260 pring->sli.sli3.numRiocb);
875fbdfe 3261
2e0fef85 3262 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
3263
3264 /*
3265 * All error attention handlers are posted to
3266 * worker thread
3267 */
3268 phba->work_ha |= HA_ERATT;
3269 phba->work_hs = HS_FFER3;
92d7f7b0 3270
5e9d9b82 3271 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
3272
3273 return;
3274}
3275
9399627f 3276/**
3621a710 3277 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
3278 * @ptr: Pointer to address of HBA context object.
3279 *
3280 * This function is invoked by the Error Attention polling timer when the
3281 * timer times out. It will check the SLI Error Attention register for
3282 * possible attention events. If so, it will post an Error Attention event
3283 * and wake up worker thread to process it. Otherwise, it will set up the
3284 * Error Attention polling timer for the next poll.
3285 **/
f22eb4d3 3286void lpfc_poll_eratt(struct timer_list *t)
9399627f
JS
3287{
3288 struct lpfc_hba *phba;
eb016566 3289 uint32_t eratt = 0;
aa6fbb75 3290 uint64_t sli_intr, cnt;
9399627f 3291
f22eb4d3 3292 phba = from_timer(phba, t, eratt_poll);
9399627f 3293
aa6fbb75
JS
3294 /* Here we will also keep track of interrupts per sec of the hba */
3295 sli_intr = phba->sli.slistat.sli_intr;
3296
3297 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3298 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3299 sli_intr);
3300 else
3301 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3302
65791f1f
JS
3303 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3304 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3305 phba->sli.slistat.sli_ips = cnt;
3306
3307 phba->sli.slistat.sli_prev_intr = sli_intr;
3308
9399627f
JS
3309 /* Check chip HA register for error event */
3310 eratt = lpfc_sli_check_eratt(phba);
3311
3312 if (eratt)
3313 /* Tell the worker thread there is work to do */
3314 lpfc_worker_wake_up(phba);
3315 else
3316 /* Restart the timer for next eratt poll */
256ec0d0
JS
3317 mod_timer(&phba->eratt_poll,
3318 jiffies +
65791f1f 3319 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3320 return;
3321}
3322
875fbdfe 3323
e59058c4 3324/**
3621a710 3325 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3326 * @phba: Pointer to HBA context object.
3327 * @pring: Pointer to driver SLI ring object.
3328 * @mask: Host attention register mask for this ring.
3329 *
3330 * This function is called from the interrupt context when there is a ring
3331 * event for the fcp ring. The caller does not hold any lock.
3332 * The function processes each response iocb in the response ring until it
25985edc 3333 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3334 * LE bit set. The function will call the completion handler of the command iocb
3335 * if the response iocb indicates a completion for a command iocb or it is
3336 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3337 * function if this is an unsolicited iocb.
dea3101e 3338 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3339 * to check it explicitly.
3340 */
3341int
2e0fef85
JS
3342lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3343 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3344{
34b02dcd 3345 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3346 IOCB_t *irsp = NULL;
87f6eaff 3347 IOCB_t *entry = NULL;
dea3101e 3348 struct lpfc_iocbq *cmdiocbq = NULL;
3349 struct lpfc_iocbq rspiocbq;
dea3101e 3350 uint32_t status;
3351 uint32_t portRspPut, portRspMax;
3352 int rc = 1;
3353 lpfc_iocb_type type;
3354 unsigned long iflag;
3355 uint32_t rsp_cmpl = 0;
dea3101e 3356
2e0fef85 3357 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3358 pring->stats.iocb_event++;
3359
dea3101e 3360 /*
3361 * The next available response entry should never exceed the maximum
3362 * entries. If it does, treat it as an adapter hardware error.
3363 */
7e56aa25 3364 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3365 portRspPut = le32_to_cpu(pgp->rspPutInx);
3366 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3367 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3368 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3369 return 1;
3370 }
45ed1190
JS
3371 if (phba->fcp_ring_in_use) {
3372 spin_unlock_irqrestore(&phba->hbalock, iflag);
3373 return 1;
3374 } else
3375 phba->fcp_ring_in_use = 1;
dea3101e 3376
3377 rmb();
7e56aa25 3378 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3379 /*
3380 * Fetch an entry off the ring and copy it into a local data
3381 * structure. The copy involves a byte-swap since the
3382 * network byte order and pci byte orders are different.
3383 */
ed957684 3384 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3385 phba->last_completion_time = jiffies;
875fbdfe 3386
7e56aa25
JS
3387 if (++pring->sli.sli3.rspidx >= portRspMax)
3388 pring->sli.sli3.rspidx = 0;
875fbdfe 3389
87f6eaff
JSEC
3390 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3391 (uint32_t *) &rspiocbq.iocb,
ed957684 3392 phba->iocb_rsp_size);
a4bc3379 3393 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3394 irsp = &rspiocbq.iocb;
3395
dea3101e 3396 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3397 pring->stats.iocb_rsp++;
3398 rsp_cmpl++;
3399
3400 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3401 /*
3402 * If resource errors reported from HBA, reduce
3403 * queuedepths of the SCSI device.
3404 */
3405 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3406 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3407 IOERR_NO_RESOURCES)) {
92d7f7b0 3408 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3409 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3410 spin_lock_irqsave(&phba->hbalock, iflag);
3411 }
3412
dea3101e 3413 /* Rsp ring <ringno> error: IOCB */
3414 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3415 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3416 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3417 pring->ringno,
92d7f7b0
JS
3418 irsp->un.ulpWord[0],
3419 irsp->un.ulpWord[1],
3420 irsp->un.ulpWord[2],
3421 irsp->un.ulpWord[3],
3422 irsp->un.ulpWord[4],
3423 irsp->un.ulpWord[5],
d7c255b2
JS
3424 *(uint32_t *)&irsp->un1,
3425 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3426 }
3427
3428 switch (type) {
3429 case LPFC_ABORT_IOCB:
3430 case LPFC_SOL_IOCB:
3431 /*
3432 * Idle exchange closed via ABTS from port. No iocb
3433 * resources need to be recovered.
3434 */
3435 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3436 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3437 "0333 IOCB cmd 0x%x"
dca9479b 3438 " processed. Skipping"
92d7f7b0 3439 " completion\n",
dca9479b 3440 irsp->ulpCommand);
dea3101e 3441 break;
3442 }
3443
e2a8be56 3444 spin_unlock_irqrestore(&phba->hbalock, iflag);
604a3e30
JB
3445 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3446 &rspiocbq);
e2a8be56 3447 spin_lock_irqsave(&phba->hbalock, iflag);
0f65ff68
JS
3448 if (unlikely(!cmdiocbq))
3449 break;
3450 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3451 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3452 if (cmdiocbq->iocb_cmpl) {
3453 spin_unlock_irqrestore(&phba->hbalock, iflag);
3454 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3455 &rspiocbq);
3456 spin_lock_irqsave(&phba->hbalock, iflag);
3457 }
dea3101e 3458 break;
a4bc3379 3459 case LPFC_UNSOL_IOCB:
2e0fef85 3460 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3461 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3462 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3463 break;
dea3101e 3464 default:
3465 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3466 char adaptermsg[LPFC_MAX_ADPTMSG];
3467 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3468 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3469 MAX_MSG_DATA);
898eb71c
JP
3470 dev_warn(&((phba->pcidev)->dev),
3471 "lpfc%d: %s\n",
dea3101e 3472 phba->brd_no, adaptermsg);
3473 } else {
3474 /* Unknown IOCB command */
3475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3476 "0334 Unknown IOCB command "
92d7f7b0 3477 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3478 type, irsp->ulpCommand,
92d7f7b0
JS
3479 irsp->ulpStatus,
3480 irsp->ulpIoTag,
3481 irsp->ulpContext);
dea3101e 3482 }
3483 break;
3484 }
3485
3486 /*
3487 * The response IOCB has been processed. Update the ring
3488 * pointer in SLIM. If the port response put pointer has not
3489 * been updated, sync the pgp->rspPutInx and fetch the new port
3490 * response put pointer.
3491 */
7e56aa25
JS
3492 writel(pring->sli.sli3.rspidx,
3493 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3494
7e56aa25 3495 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3496 portRspPut = le32_to_cpu(pgp->rspPutInx);
3497 }
3498
3499 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3500 pring->stats.iocb_rsp_full++;
3501 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3502 writel(status, phba->CAregaddr);
3503 readl(phba->CAregaddr);
3504 }
3505 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3506 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3507 pring->stats.iocb_cmd_empty++;
3508
3509 /* Force update of the local copy of cmdGetInx */
7e56aa25 3510 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3511 lpfc_sli_resume_iocb(phba, pring);
3512
3513 if ((pring->lpfc_sli_cmd_available))
3514 (pring->lpfc_sli_cmd_available) (phba, pring);
3515
3516 }
3517
45ed1190 3518 phba->fcp_ring_in_use = 0;
2e0fef85 3519 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3520 return rc;
3521}
3522
e59058c4 3523/**
3772a991
JS
3524 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3525 * @phba: Pointer to HBA context object.
3526 * @pring: Pointer to driver SLI ring object.
3527 * @rspiocbp: Pointer to driver response IOCB object.
3528 *
3529 * This function is called from the worker thread when there is a slow-path
3530 * response IOCB to process. This function chains all the response iocbs until
3531 * seeing the iocb with the LE bit set. The function will call
3532 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3533 * completion of a command iocb. The function will call the
3534 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3535 * The function frees the resources or calls the completion handler if this
3536 * iocb is an abort completion. The function returns NULL when the response
3537 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3538 * this function shall chain the iocb on to the iocb_continueq and return the
3539 * response iocb passed in.
3540 **/
3541static struct lpfc_iocbq *
3542lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3543 struct lpfc_iocbq *rspiocbp)
3544{
3545 struct lpfc_iocbq *saveq;
3546 struct lpfc_iocbq *cmdiocbp;
3547 struct lpfc_iocbq *next_iocb;
3548 IOCB_t *irsp = NULL;
3549 uint32_t free_saveq;
3550 uint8_t iocb_cmd_type;
3551 lpfc_iocb_type type;
3552 unsigned long iflag;
3553 int rc;
3554
3555 spin_lock_irqsave(&phba->hbalock, iflag);
3556 /* First add the response iocb to the countinueq list */
3557 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3558 pring->iocb_continueq_cnt++;
3559
70f23fd6 3560 /* Now, determine whether the list is completed for processing */
3772a991
JS
3561 irsp = &rspiocbp->iocb;
3562 if (irsp->ulpLe) {
3563 /*
3564 * By default, the driver expects to free all resources
3565 * associated with this iocb completion.
3566 */
3567 free_saveq = 1;
3568 saveq = list_get_first(&pring->iocb_continueq,
3569 struct lpfc_iocbq, list);
3570 irsp = &(saveq->iocb);
3571 list_del_init(&pring->iocb_continueq);
3572 pring->iocb_continueq_cnt = 0;
3573
3574 pring->stats.iocb_rsp++;
3575
3576 /*
3577 * If resource errors reported from HBA, reduce
3578 * queuedepths of the SCSI device.
3579 */
3580 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3581 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3582 IOERR_NO_RESOURCES)) {
3772a991
JS
3583 spin_unlock_irqrestore(&phba->hbalock, iflag);
3584 phba->lpfc_rampdown_queue_depth(phba);
3585 spin_lock_irqsave(&phba->hbalock, iflag);
3586 }
3587
3588 if (irsp->ulpStatus) {
3589 /* Rsp ring <ringno> error: IOCB */
3590 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3591 "0328 Rsp Ring %d error: "
3592 "IOCB Data: "
3593 "x%x x%x x%x x%x "
3594 "x%x x%x x%x x%x "
3595 "x%x x%x x%x x%x "
3596 "x%x x%x x%x x%x\n",
3597 pring->ringno,
3598 irsp->un.ulpWord[0],
3599 irsp->un.ulpWord[1],
3600 irsp->un.ulpWord[2],
3601 irsp->un.ulpWord[3],
3602 irsp->un.ulpWord[4],
3603 irsp->un.ulpWord[5],
3604 *(((uint32_t *) irsp) + 6),
3605 *(((uint32_t *) irsp) + 7),
3606 *(((uint32_t *) irsp) + 8),
3607 *(((uint32_t *) irsp) + 9),
3608 *(((uint32_t *) irsp) + 10),
3609 *(((uint32_t *) irsp) + 11),
3610 *(((uint32_t *) irsp) + 12),
3611 *(((uint32_t *) irsp) + 13),
3612 *(((uint32_t *) irsp) + 14),
3613 *(((uint32_t *) irsp) + 15));
3614 }
3615
3616 /*
3617 * Fetch the IOCB command type and call the correct completion
3618 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3619 * get freed back to the lpfc_iocb_list by the discovery
3620 * kernel thread.
3621 */
3622 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3623 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3624 switch (type) {
3625 case LPFC_SOL_IOCB:
3626 spin_unlock_irqrestore(&phba->hbalock, iflag);
3627 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3628 spin_lock_irqsave(&phba->hbalock, iflag);
3629 break;
3630
3631 case LPFC_UNSOL_IOCB:
3632 spin_unlock_irqrestore(&phba->hbalock, iflag);
3633 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3634 spin_lock_irqsave(&phba->hbalock, iflag);
3635 if (!rc)
3636 free_saveq = 0;
3637 break;
3638
3639 case LPFC_ABORT_IOCB:
3640 cmdiocbp = NULL;
e2a8be56
JS
3641 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3642 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991
JS
3643 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3644 saveq);
e2a8be56
JS
3645 spin_lock_irqsave(&phba->hbalock, iflag);
3646 }
3772a991
JS
3647 if (cmdiocbp) {
3648 /* Call the specified completion routine */
3649 if (cmdiocbp->iocb_cmpl) {
3650 spin_unlock_irqrestore(&phba->hbalock,
3651 iflag);
3652 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3653 saveq);
3654 spin_lock_irqsave(&phba->hbalock,
3655 iflag);
3656 } else
3657 __lpfc_sli_release_iocbq(phba,
3658 cmdiocbp);
3659 }
3660 break;
3661
3662 case LPFC_UNKNOWN_IOCB:
3663 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3664 char adaptermsg[LPFC_MAX_ADPTMSG];
3665 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3666 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3667 MAX_MSG_DATA);
3668 dev_warn(&((phba->pcidev)->dev),
3669 "lpfc%d: %s\n",
3670 phba->brd_no, adaptermsg);
3671 } else {
3672 /* Unknown IOCB command */
3673 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3674 "0335 Unknown IOCB "
3675 "command Data: x%x "
3676 "x%x x%x x%x\n",
3677 irsp->ulpCommand,
3678 irsp->ulpStatus,
3679 irsp->ulpIoTag,
3680 irsp->ulpContext);
3681 }
3682 break;
3683 }
3684
3685 if (free_saveq) {
3686 list_for_each_entry_safe(rspiocbp, next_iocb,
3687 &saveq->list, list) {
61f35bff 3688 list_del_init(&rspiocbp->list);
3772a991
JS
3689 __lpfc_sli_release_iocbq(phba, rspiocbp);
3690 }
3691 __lpfc_sli_release_iocbq(phba, saveq);
3692 }
3693 rspiocbp = NULL;
3694 }
3695 spin_unlock_irqrestore(&phba->hbalock, iflag);
3696 return rspiocbp;
3697}
3698
3699/**
3700 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3701 * @phba: Pointer to HBA context object.
3702 * @pring: Pointer to driver SLI ring object.
3703 * @mask: Host attention register mask for this ring.
3704 *
3772a991
JS
3705 * This routine wraps the actual slow_ring event process routine from the
3706 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3707 **/
3772a991 3708void
2e0fef85
JS
3709lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3710 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3711{
3712 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3713}
3714
3715/**
3716 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3717 * @phba: Pointer to HBA context object.
3718 * @pring: Pointer to driver SLI ring object.
3719 * @mask: Host attention register mask for this ring.
3720 *
3721 * This function is called from the worker thread when there is a ring event
3722 * for non-fcp rings. The caller does not hold any lock. The function will
3723 * remove each response iocb in the response ring and calls the handle
3724 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3725 **/
3726static void
3727lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3728 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3729{
34b02dcd 3730 struct lpfc_pgp *pgp;
dea3101e 3731 IOCB_t *entry;
3732 IOCB_t *irsp = NULL;
3733 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3734 uint32_t portRspPut, portRspMax;
dea3101e 3735 unsigned long iflag;
3772a991 3736 uint32_t status;
dea3101e 3737
34b02dcd 3738 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3739 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3740 pring->stats.iocb_event++;
3741
dea3101e 3742 /*
3743 * The next available response entry should never exceed the maximum
3744 * entries. If it does, treat it as an adapter hardware error.
3745 */
7e56aa25 3746 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3747 portRspPut = le32_to_cpu(pgp->rspPutInx);
3748 if (portRspPut >= portRspMax) {
3749 /*
025dfdaf 3750 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3751 * rsp ring <portRspMax>
3752 */
ed957684 3753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3754 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3755 "is bigger than rsp ring %d\n",
e8b62011 3756 pring->ringno, portRspPut, portRspMax);
dea3101e 3757
2e0fef85
JS
3758 phba->link_state = LPFC_HBA_ERROR;
3759 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3760
3761 phba->work_hs = HS_FFER3;
3762 lpfc_handle_eratt(phba);
3763
3772a991 3764 return;
dea3101e 3765 }
3766
3767 rmb();
7e56aa25 3768 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3769 /*
3770 * Build a completion list and call the appropriate handler.
3771 * The process is to get the next available response iocb, get
3772 * a free iocb from the list, copy the response data into the
3773 * free iocb, insert to the continuation list, and update the
3774 * next response index to slim. This process makes response
3775 * iocb's in the ring available to DMA as fast as possible but
3776 * pays a penalty for a copy operation. Since the iocb is
3777 * only 32 bytes, this penalty is considered small relative to
3778 * the PCI reads for register values and a slim write. When
3779 * the ulpLe field is set, the entire Command has been
3780 * received.
3781 */
ed957684
JS
3782 entry = lpfc_resp_iocb(phba, pring);
3783
858c9f6c 3784 phba->last_completion_time = jiffies;
2e0fef85 3785 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3786 if (rspiocbp == NULL) {
3787 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3788 "completion.\n", __func__);
dea3101e 3789 break;
3790 }
3791
ed957684
JS
3792 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3793 phba->iocb_rsp_size);
dea3101e 3794 irsp = &rspiocbp->iocb;
3795
7e56aa25
JS
3796 if (++pring->sli.sli3.rspidx >= portRspMax)
3797 pring->sli.sli3.rspidx = 0;
dea3101e 3798
a58cbd52
JS
3799 if (pring->ringno == LPFC_ELS_RING) {
3800 lpfc_debugfs_slow_ring_trc(phba,
3801 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3802 *(((uint32_t *) irsp) + 4),
3803 *(((uint32_t *) irsp) + 6),
3804 *(((uint32_t *) irsp) + 7));
3805 }
3806
7e56aa25
JS
3807 writel(pring->sli.sli3.rspidx,
3808 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3809
3772a991
JS
3810 spin_unlock_irqrestore(&phba->hbalock, iflag);
3811 /* Handle the response IOCB */
3812 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3813 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3814
3815 /*
3816 * If the port response put pointer has not been updated, sync
3817 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3818 * response put pointer.
3819 */
7e56aa25 3820 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3821 portRspPut = le32_to_cpu(pgp->rspPutInx);
3822 }
7e56aa25 3823 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3824
92d7f7b0 3825 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3826 /* At least one response entry has been freed */
3827 pring->stats.iocb_rsp_full++;
3828 /* SET RxRE_RSP in Chip Att register */
3829 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3830 writel(status, phba->CAregaddr);
3831 readl(phba->CAregaddr); /* flush */
3832 }
3833 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3834 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3835 pring->stats.iocb_cmd_empty++;
3836
3837 /* Force update of the local copy of cmdGetInx */
7e56aa25 3838 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3839 lpfc_sli_resume_iocb(phba, pring);
3840
3841 if ((pring->lpfc_sli_cmd_available))
3842 (pring->lpfc_sli_cmd_available) (phba, pring);
3843
3844 }
3845
2e0fef85 3846 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3847 return;
dea3101e 3848}
3849
4f774513
JS
3850/**
3851 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3852 * @phba: Pointer to HBA context object.
3853 * @pring: Pointer to driver SLI ring object.
3854 * @mask: Host attention register mask for this ring.
3855 *
3856 * This function is called from the worker thread when there is a pending
3857 * ELS response iocb on the driver internal slow-path response iocb worker
3858 * queue. The caller does not hold any lock. The function will remove each
3859 * response iocb from the response worker queue and calls the handle
3860 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3861 **/
3862static void
3863lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3864 struct lpfc_sli_ring *pring, uint32_t mask)
3865{
3866 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3867 struct hbq_dmabuf *dmabuf;
3868 struct lpfc_cq_event *cq_event;
4f774513 3869 unsigned long iflag;
0ef01a2d 3870 int count = 0;
4f774513 3871
45ed1190
JS
3872 spin_lock_irqsave(&phba->hbalock, iflag);
3873 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3874 spin_unlock_irqrestore(&phba->hbalock, iflag);
3875 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3876 /* Get the response iocb from the head of work queue */
3877 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3878 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3879 cq_event, struct lpfc_cq_event, list);
4f774513 3880 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3881
3882 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3883 case CQE_CODE_COMPL_WQE:
3884 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3885 cq_event);
45ed1190
JS
3886 /* Translate ELS WCQE to response IOCBQ */
3887 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3888 irspiocbq);
3889 if (irspiocbq)
3890 lpfc_sli_sp_handle_rspiocb(phba, pring,
3891 irspiocbq);
0ef01a2d 3892 count++;
4d9ab994
JS
3893 break;
3894 case CQE_CODE_RECEIVE:
7851fe2c 3895 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3896 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3897 cq_event);
3898 lpfc_sli4_handle_received_buffer(phba, dmabuf);
0ef01a2d 3899 count++;
4d9ab994
JS
3900 break;
3901 default:
3902 break;
3903 }
0ef01a2d
JS
3904
3905 /* Limit the number of events to 64 to avoid soft lockups */
3906 if (count == 64)
3907 break;
4f774513
JS
3908 }
3909}
3910
e59058c4 3911/**
3621a710 3912 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3913 * @phba: Pointer to HBA context object.
3914 * @pring: Pointer to driver SLI ring object.
3915 *
3916 * This function aborts all iocbs in the given ring and frees all the iocb
3917 * objects in txq. This function issues an abort iocb for all the iocb commands
3918 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3919 * the return of this function. The caller is not required to hold any locks.
3920 **/
2e0fef85 3921void
dea3101e 3922lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3923{
2534ba75 3924 LIST_HEAD(completions);
dea3101e 3925 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3926
92d7f7b0
JS
3927 if (pring->ringno == LPFC_ELS_RING) {
3928 lpfc_fabric_abort_hba(phba);
3929 }
3930
dea3101e 3931 /* Error everything on txq and txcmplq
3932 * First do the txq.
3933 */
db55fba8
JS
3934 if (phba->sli_rev >= LPFC_SLI_REV4) {
3935 spin_lock_irq(&pring->ring_lock);
3936 list_splice_init(&pring->txq, &completions);
3937 pring->txq_cnt = 0;
3938 spin_unlock_irq(&pring->ring_lock);
dea3101e 3939
db55fba8
JS
3940 spin_lock_irq(&phba->hbalock);
3941 /* Next issue ABTS for everything on the txcmplq */
3942 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3943 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3944 spin_unlock_irq(&phba->hbalock);
3945 } else {
3946 spin_lock_irq(&phba->hbalock);
3947 list_splice_init(&pring->txq, &completions);
3948 pring->txq_cnt = 0;
dea3101e 3949
db55fba8
JS
3950 /* Next issue ABTS for everything on the txcmplq */
3951 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3952 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3953 spin_unlock_irq(&phba->hbalock);
3954 }
dea3101e 3955
a257bf90
JS
3956 /* Cancel all the IOCBs from the completions list */
3957 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3958 IOERR_SLI_ABORTED);
dea3101e 3959}
3960
db55fba8
JS
3961/**
3962 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3963 * @phba: Pointer to HBA context object.
3964 * @pring: Pointer to driver SLI ring object.
3965 *
3966 * This function aborts all iocbs in FCP rings and frees all the iocb
3967 * objects in txq. This function issues an abort iocb for all the iocb commands
3968 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3969 * the return of this function. The caller is not required to hold any locks.
3970 **/
3971void
3972lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3973{
3974 struct lpfc_sli *psli = &phba->sli;
3975 struct lpfc_sli_ring *pring;
3976 uint32_t i;
3977
3978 /* Look on all the FCP Rings for the iotag */
3979 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
3980 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3981 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
3982 lpfc_sli_abort_iocb_ring(phba, pring);
3983 }
3984 } else {
895427bd 3985 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
3986 lpfc_sli_abort_iocb_ring(phba, pring);
3987 }
3988}
3989
a8e497d5 3990/**
3621a710 3991 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
3992 * @phba: Pointer to HBA context object.
3993 *
3994 * This function flushes all iocbs in the fcp ring and frees all the iocb
3995 * objects in txq and txcmplq. This function will not issue abort iocbs
3996 * for all the iocb commands in txcmplq, they will just be returned with
3997 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3998 * slot has been permanently disabled.
3999 **/
4000void
4001lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4002{
4003 LIST_HEAD(txq);
4004 LIST_HEAD(txcmplq);
a8e497d5
JS
4005 struct lpfc_sli *psli = &phba->sli;
4006 struct lpfc_sli_ring *pring;
db55fba8 4007 uint32_t i;
c1dd9111 4008 struct lpfc_iocbq *piocb, *next_iocb;
a8e497d5
JS
4009
4010 spin_lock_irq(&phba->hbalock);
4f2e66c6
JS
4011 /* Indicate the I/O queues are flushed */
4012 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
4013 spin_unlock_irq(&phba->hbalock);
4014
db55fba8
JS
4015 /* Look on all the FCP Rings for the iotag */
4016 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
4017 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4018 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
4019
4020 spin_lock_irq(&pring->ring_lock);
4021 /* Retrieve everything on txq */
4022 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4023 list_for_each_entry_safe(piocb, next_iocb,
4024 &pring->txcmplq, list)
4025 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4026 /* Retrieve everything on the txcmplq */
4027 list_splice_init(&pring->txcmplq, &txcmplq);
4028 pring->txq_cnt = 0;
4029 pring->txcmplq_cnt = 0;
4030 spin_unlock_irq(&pring->ring_lock);
4031
4032 /* Flush the txq */
4033 lpfc_sli_cancel_iocbs(phba, &txq,
4034 IOSTAT_LOCAL_REJECT,
4035 IOERR_SLI_DOWN);
4036 /* Flush the txcmpq */
4037 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4038 IOSTAT_LOCAL_REJECT,
4039 IOERR_SLI_DOWN);
4040 }
4041 } else {
895427bd 4042 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 4043
db55fba8
JS
4044 spin_lock_irq(&phba->hbalock);
4045 /* Retrieve everything on txq */
4046 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4047 list_for_each_entry_safe(piocb, next_iocb,
4048 &pring->txcmplq, list)
4049 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4050 /* Retrieve everything on the txcmplq */
4051 list_splice_init(&pring->txcmplq, &txcmplq);
4052 pring->txq_cnt = 0;
4053 pring->txcmplq_cnt = 0;
4054 spin_unlock_irq(&phba->hbalock);
4055
4056 /* Flush the txq */
4057 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4058 IOERR_SLI_DOWN);
4059 /* Flush the txcmpq */
4060 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4061 IOERR_SLI_DOWN);
4062 }
a8e497d5
JS
4063}
4064
895427bd
JS
4065/**
4066 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4067 * @phba: Pointer to HBA context object.
4068 *
4069 * This function flushes all wqes in the nvme rings and frees all resources
4070 * in the txcmplq. This function does not issue abort wqes for the IO
4071 * commands in txcmplq, they will just be returned with
4072 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4073 * slot has been permanently disabled.
4074 **/
4075void
4076lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4077{
4078 LIST_HEAD(txcmplq);
4079 struct lpfc_sli_ring *pring;
4080 uint32_t i;
c1dd9111 4081 struct lpfc_iocbq *piocb, *next_iocb;
895427bd 4082
cdb42bec
JS
4083 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4084 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
895427bd
JS
4085 return;
4086
4087 /* Hint to other driver operations that a flush is in progress. */
4088 spin_lock_irq(&phba->hbalock);
4089 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4090 spin_unlock_irq(&phba->hbalock);
4091
4092 /* Cycle through all NVME rings and complete each IO with
4093 * a local driver reason code. This is a flush so no
4094 * abort exchange to FW.
4095 */
cdb42bec
JS
4096 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4097 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
895427bd 4098
895427bd 4099 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
4100 list_for_each_entry_safe(piocb, next_iocb,
4101 &pring->txcmplq, list)
4102 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4103 /* Retrieve everything on the txcmplq */
895427bd
JS
4104 list_splice_init(&pring->txcmplq, &txcmplq);
4105 pring->txcmplq_cnt = 0;
4106 spin_unlock_irq(&pring->ring_lock);
4107
4108 /* Flush the txcmpq &&&PAE */
4109 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4110 IOSTAT_LOCAL_REJECT,
4111 IOERR_SLI_DOWN);
4112 }
4113}
4114
e59058c4 4115/**
3772a991 4116 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
4117 * @phba: Pointer to HBA context object.
4118 * @mask: Bit mask to be checked.
4119 *
4120 * This function reads the host status register and compares
4121 * with the provided bit mask to check if HBA completed
4122 * the restart. This function will wait in a loop for the
4123 * HBA to complete restart. If the HBA does not restart within
4124 * 15 iterations, the function will reset the HBA again. The
4125 * function returns 1 when HBA fail to restart otherwise returns
4126 * zero.
4127 **/
3772a991
JS
4128static int
4129lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 4130{
41415862
JW
4131 uint32_t status;
4132 int i = 0;
4133 int retval = 0;
dea3101e 4134
41415862 4135 /* Read the HBA Host Status Register */
9940b97b
JS
4136 if (lpfc_readl(phba->HSregaddr, &status))
4137 return 1;
dea3101e 4138
41415862
JW
4139 /*
4140 * Check status register every 100ms for 5 retries, then every
4141 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4142 * every 2.5 sec for 4.
4143 * Break our of the loop if errors occurred during init.
4144 */
4145 while (((status & mask) != mask) &&
4146 !(status & HS_FFERM) &&
4147 i++ < 20) {
dea3101e 4148
41415862
JW
4149 if (i <= 5)
4150 msleep(10);
4151 else if (i <= 10)
4152 msleep(500);
4153 else
4154 msleep(2500);
dea3101e 4155
41415862 4156 if (i == 15) {
2e0fef85 4157 /* Do post */
92d7f7b0 4158 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
4159 lpfc_sli_brdrestart(phba);
4160 }
4161 /* Read the HBA Host Status Register */
9940b97b
JS
4162 if (lpfc_readl(phba->HSregaddr, &status)) {
4163 retval = 1;
4164 break;
4165 }
41415862 4166 }
dea3101e 4167
41415862
JW
4168 /* Check to see if any errors occurred during init */
4169 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
4170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4171 "2751 Adapter failed to restart, "
4172 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4173 status,
4174 readl(phba->MBslimaddr + 0xa8),
4175 readl(phba->MBslimaddr + 0xac));
2e0fef85 4176 phba->link_state = LPFC_HBA_ERROR;
41415862 4177 retval = 1;
dea3101e 4178 }
dea3101e 4179
41415862
JW
4180 return retval;
4181}
dea3101e 4182
da0436e9
JS
4183/**
4184 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4185 * @phba: Pointer to HBA context object.
4186 * @mask: Bit mask to be checked.
4187 *
4188 * This function checks the host status register to check if HBA is
4189 * ready. This function will wait in a loop for the HBA to be ready
4190 * If the HBA is not ready , the function will will reset the HBA PCI
4191 * function again. The function returns 1 when HBA fail to be ready
4192 * otherwise returns zero.
4193 **/
4194static int
4195lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4196{
4197 uint32_t status;
4198 int retval = 0;
4199
4200 /* Read the HBA Host Status Register */
4201 status = lpfc_sli4_post_status_check(phba);
4202
4203 if (status) {
4204 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4205 lpfc_sli_brdrestart(phba);
4206 status = lpfc_sli4_post_status_check(phba);
4207 }
4208
4209 /* Check to see if any errors occurred during init */
4210 if (status) {
4211 phba->link_state = LPFC_HBA_ERROR;
4212 retval = 1;
4213 } else
4214 phba->sli4_hba.intr_enable = 0;
4215
4216 return retval;
4217}
4218
4219/**
4220 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4221 * @phba: Pointer to HBA context object.
4222 * @mask: Bit mask to be checked.
4223 *
4224 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4225 * from the API jump table function pointer from the lpfc_hba struct.
4226 **/
4227int
4228lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4229{
4230 return phba->lpfc_sli_brdready(phba, mask);
4231}
4232
9290831f
JS
4233#define BARRIER_TEST_PATTERN (0xdeadbeef)
4234
e59058c4 4235/**
3621a710 4236 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
4237 * @phba: Pointer to HBA context object.
4238 *
1b51197d
JS
4239 * This function is called before resetting an HBA. This function is called
4240 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 4241 **/
2e0fef85 4242void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 4243{
65a29c16
JS
4244 uint32_t __iomem *resp_buf;
4245 uint32_t __iomem *mbox_buf;
9290831f 4246 volatile uint32_t mbox;
9940b97b 4247 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
4248 int i;
4249 uint8_t hdrtype;
4250
1c2ba475
JT
4251 lockdep_assert_held(&phba->hbalock);
4252
9290831f
JS
4253 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4254 if (hdrtype != 0x80 ||
4255 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4256 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4257 return;
4258
4259 /*
4260 * Tell the other part of the chip to suspend temporarily all
4261 * its DMA activity.
4262 */
65a29c16 4263 resp_buf = phba->MBslimaddr;
9290831f
JS
4264
4265 /* Disable the error attention */
9940b97b
JS
4266 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4267 return;
9290831f
JS
4268 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4269 readl(phba->HCregaddr); /* flush */
2e0fef85 4270 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4271
9940b97b
JS
4272 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4273 return;
4274 if (ha_copy & HA_ERATT) {
9290831f
JS
4275 /* Clear Chip error bit */
4276 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4277 phba->pport->stopped = 1;
9290831f
JS
4278 }
4279
4280 mbox = 0;
4281 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4282 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4283
4284 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4285 mbox_buf = phba->MBslimaddr;
9290831f
JS
4286 writel(mbox, mbox_buf);
4287
9940b97b
JS
4288 for (i = 0; i < 50; i++) {
4289 if (lpfc_readl((resp_buf + 1), &resp_data))
4290 return;
4291 if (resp_data != ~(BARRIER_TEST_PATTERN))
4292 mdelay(1);
4293 else
4294 break;
4295 }
4296 resp_data = 0;
4297 if (lpfc_readl((resp_buf + 1), &resp_data))
4298 return;
4299 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4300 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4301 phba->pport->stopped)
9290831f
JS
4302 goto restore_hc;
4303 else
4304 goto clear_errat;
4305 }
4306
4307 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4308 resp_data = 0;
4309 for (i = 0; i < 500; i++) {
4310 if (lpfc_readl(resp_buf, &resp_data))
4311 return;
4312 if (resp_data != mbox)
4313 mdelay(1);
4314 else
4315 break;
4316 }
9290831f
JS
4317
4318clear_errat:
4319
9940b97b
JS
4320 while (++i < 500) {
4321 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4322 return;
4323 if (!(ha_copy & HA_ERATT))
4324 mdelay(1);
4325 else
4326 break;
4327 }
9290831f
JS
4328
4329 if (readl(phba->HAregaddr) & HA_ERATT) {
4330 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4331 phba->pport->stopped = 1;
9290831f
JS
4332 }
4333
4334restore_hc:
2e0fef85 4335 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4336 writel(hc_copy, phba->HCregaddr);
4337 readl(phba->HCregaddr); /* flush */
4338}
4339
e59058c4 4340/**
3621a710 4341 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4342 * @phba: Pointer to HBA context object.
4343 *
4344 * This function issues a kill_board mailbox command and waits for
4345 * the error attention interrupt. This function is called for stopping
4346 * the firmware processing. The caller is not required to hold any
4347 * locks. This function calls lpfc_hba_down_post function to free
4348 * any pending commands after the kill. The function will return 1 when it
4349 * fails to kill the board else will return 0.
4350 **/
41415862 4351int
2e0fef85 4352lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4353{
4354 struct lpfc_sli *psli;
4355 LPFC_MBOXQ_t *pmb;
4356 uint32_t status;
4357 uint32_t ha_copy;
4358 int retval;
4359 int i = 0;
dea3101e 4360
41415862 4361 psli = &phba->sli;
dea3101e 4362
41415862 4363 /* Kill HBA */
ed957684 4364 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4365 "0329 Kill HBA Data: x%x x%x\n",
4366 phba->pport->port_state, psli->sli_flag);
41415862 4367
98c9ea5c
JS
4368 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4369 if (!pmb)
41415862 4370 return 1;
41415862
JW
4371
4372 /* Disable the error attention */
2e0fef85 4373 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4374 if (lpfc_readl(phba->HCregaddr, &status)) {
4375 spin_unlock_irq(&phba->hbalock);
4376 mempool_free(pmb, phba->mbox_mem_pool);
4377 return 1;
4378 }
41415862
JW
4379 status &= ~HC_ERINT_ENA;
4380 writel(status, phba->HCregaddr);
4381 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4382 phba->link_flag |= LS_IGNORE_ERATT;
4383 spin_unlock_irq(&phba->hbalock);
41415862
JW
4384
4385 lpfc_kill_board(phba, pmb);
4386 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4387 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4388
4389 if (retval != MBX_SUCCESS) {
4390 if (retval != MBX_BUSY)
4391 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4392 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4393 "2752 KILL_BOARD command failed retval %d\n",
4394 retval);
2e0fef85
JS
4395 spin_lock_irq(&phba->hbalock);
4396 phba->link_flag &= ~LS_IGNORE_ERATT;
4397 spin_unlock_irq(&phba->hbalock);
41415862
JW
4398 return 1;
4399 }
4400
f4b4c68f
JS
4401 spin_lock_irq(&phba->hbalock);
4402 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4403 spin_unlock_irq(&phba->hbalock);
9290831f 4404
41415862
JW
4405 mempool_free(pmb, phba->mbox_mem_pool);
4406
4407 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4408 * attention every 100ms for 3 seconds. If we don't get ERATT after
4409 * 3 seconds we still set HBA_ERROR state because the status of the
4410 * board is now undefined.
4411 */
9940b97b
JS
4412 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4413 return 1;
41415862
JW
4414 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4415 mdelay(100);
9940b97b
JS
4416 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4417 return 1;
41415862
JW
4418 }
4419
4420 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4421 if (ha_copy & HA_ERATT) {
4422 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4423 phba->pport->stopped = 1;
9290831f 4424 }
2e0fef85 4425 spin_lock_irq(&phba->hbalock);
41415862 4426 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4427 psli->mbox_active = NULL;
2e0fef85
JS
4428 phba->link_flag &= ~LS_IGNORE_ERATT;
4429 spin_unlock_irq(&phba->hbalock);
41415862 4430
41415862 4431 lpfc_hba_down_post(phba);
2e0fef85 4432 phba->link_state = LPFC_HBA_ERROR;
41415862 4433
2e0fef85 4434 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 4435}
4436
e59058c4 4437/**
3772a991 4438 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4439 * @phba: Pointer to HBA context object.
4440 *
4441 * This function resets the HBA by writing HC_INITFF to the control
4442 * register. After the HBA resets, this function resets all the iocb ring
4443 * indices. This function disables PCI layer parity checking during
4444 * the reset.
4445 * This function returns 0 always.
4446 * The caller is not required to hold any locks.
4447 **/
41415862 4448int
2e0fef85 4449lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4450{
41415862 4451 struct lpfc_sli *psli;
dea3101e 4452 struct lpfc_sli_ring *pring;
41415862 4453 uint16_t cfg_value;
dea3101e 4454 int i;
dea3101e 4455
41415862 4456 psli = &phba->sli;
dea3101e 4457
41415862
JW
4458 /* Reset HBA */
4459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4460 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4461 (phba->pport) ? phba->pport->port_state : 0,
4462 psli->sli_flag);
dea3101e 4463
4464 /* perform board reset */
4465 phba->fc_eventTag = 0;
4d9ab994 4466 phba->link_events = 0;
4492b739
JS
4467 if (phba->pport) {
4468 phba->pport->fc_myDID = 0;
4469 phba->pport->fc_prevDID = 0;
4470 }
dea3101e 4471
41415862 4472 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4473 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4474 return -EIO;
4475
41415862
JW
4476 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4477 (cfg_value &
4478 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4479
3772a991
JS
4480 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4481
41415862
JW
4482 /* Now toggle INITFF bit in the Host Control Register */
4483 writel(HC_INITFF, phba->HCregaddr);
4484 mdelay(1);
4485 readl(phba->HCregaddr); /* flush */
4486 writel(0, phba->HCregaddr);
4487 readl(phba->HCregaddr); /* flush */
4488
4489 /* Restore PCI cmd register */
4490 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 4491
4492 /* Initialize relevant SLI info */
41415862 4493 for (i = 0; i < psli->num_rings; i++) {
895427bd 4494 pring = &psli->sli3_ring[i];
dea3101e 4495 pring->flag = 0;
7e56aa25
JS
4496 pring->sli.sli3.rspidx = 0;
4497 pring->sli.sli3.next_cmdidx = 0;
4498 pring->sli.sli3.local_getidx = 0;
4499 pring->sli.sli3.cmdidx = 0;
dea3101e 4500 pring->missbufcnt = 0;
4501 }
dea3101e 4502
2e0fef85 4503 phba->link_state = LPFC_WARM_START;
41415862
JW
4504 return 0;
4505}
4506
e59058c4 4507/**
da0436e9
JS
4508 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4509 * @phba: Pointer to HBA context object.
4510 *
4511 * This function resets a SLI4 HBA. This function disables PCI layer parity
4512 * checking during resets the device. The caller is not required to hold
4513 * any locks.
4514 *
8c24a4f6 4515 * This function returns 0 on success else returns negative error code.
da0436e9
JS
4516 **/
4517int
4518lpfc_sli4_brdreset(struct lpfc_hba *phba)
4519{
4520 struct lpfc_sli *psli = &phba->sli;
4521 uint16_t cfg_value;
0293635e 4522 int rc = 0;
da0436e9
JS
4523
4524 /* Reset HBA */
4525 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4526 "0295 Reset HBA Data: x%x x%x x%x\n",
4527 phba->pport->port_state, psli->sli_flag,
4528 phba->hba_flag);
da0436e9
JS
4529
4530 /* perform board reset */
4531 phba->fc_eventTag = 0;
4d9ab994 4532 phba->link_events = 0;
da0436e9
JS
4533 phba->pport->fc_myDID = 0;
4534 phba->pport->fc_prevDID = 0;
4535
da0436e9
JS
4536 spin_lock_irq(&phba->hbalock);
4537 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4538 phba->fcf.fcf_flag = 0;
da0436e9
JS
4539 spin_unlock_irq(&phba->hbalock);
4540
0293635e
JS
4541 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4542 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4543 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4544 return rc;
4545 }
4546
da0436e9
JS
4547 /* Now physically reset the device */
4548 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4549 "0389 Performing PCI function reset!\n");
be858b65
JS
4550
4551 /* Turn off parity checking and serr during the physical reset */
32a93100
JS
4552 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4553 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4554 "3205 PCI read Config failed\n");
4555 return -EIO;
4556 }
4557
be858b65
JS
4558 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4559 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4560
88318816 4561 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4562 rc = lpfc_pci_function_reset(phba);
da0436e9 4563
be858b65
JS
4564 /* Restore PCI cmd register */
4565 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4566
27b01b82 4567 return rc;
da0436e9
JS
4568}
4569
4570/**
4571 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4572 * @phba: Pointer to HBA context object.
4573 *
4574 * This function is called in the SLI initialization code path to
4575 * restart the HBA. The caller is not required to hold any lock.
4576 * This function writes MBX_RESTART mailbox command to the SLIM and
4577 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4578 * function to free any pending commands. The function enables
4579 * POST only during the first initialization. The function returns zero.
4580 * The function does not guarantee completion of MBX_RESTART mailbox
4581 * command before the return of this function.
4582 **/
da0436e9
JS
4583static int
4584lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4585{
4586 MAILBOX_t *mb;
4587 struct lpfc_sli *psli;
41415862
JW
4588 volatile uint32_t word0;
4589 void __iomem *to_slim;
0d878419 4590 uint32_t hba_aer_enabled;
41415862 4591
2e0fef85 4592 spin_lock_irq(&phba->hbalock);
41415862 4593
0d878419
JS
4594 /* Take PCIe device Advanced Error Reporting (AER) state */
4595 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4596
41415862
JW
4597 psli = &phba->sli;
4598
4599 /* Restart HBA */
4600 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4601 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4602 (phba->pport) ? phba->pport->port_state : 0,
4603 psli->sli_flag);
41415862
JW
4604
4605 word0 = 0;
4606 mb = (MAILBOX_t *) &word0;
4607 mb->mbxCommand = MBX_RESTART;
4608 mb->mbxHc = 1;
4609
9290831f
JS
4610 lpfc_reset_barrier(phba);
4611
41415862
JW
4612 to_slim = phba->MBslimaddr;
4613 writel(*(uint32_t *) mb, to_slim);
4614 readl(to_slim); /* flush */
4615
4616 /* Only skip post after fc_ffinit is completed */
4492b739 4617 if (phba->pport && phba->pport->port_state)
41415862 4618 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4619 else
41415862 4620 word0 = 0; /* This is really setting up word1 */
65a29c16 4621 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4622 writel(*(uint32_t *) mb, to_slim);
4623 readl(to_slim); /* flush */
dea3101e 4624
41415862 4625 lpfc_sli_brdreset(phba);
4492b739
JS
4626 if (phba->pport)
4627 phba->pport->stopped = 0;
2e0fef85 4628 phba->link_state = LPFC_INIT_START;
da0436e9 4629 phba->hba_flag = 0;
2e0fef85 4630 spin_unlock_irq(&phba->hbalock);
41415862 4631
64ba8818 4632 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4633 psli->stats_start = ktime_get_seconds();
64ba8818 4634
eaf15d5b
JS
4635 /* Give the INITFF and Post time to settle. */
4636 mdelay(100);
41415862 4637
0d878419
JS
4638 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4639 if (hba_aer_enabled)
4640 pci_disable_pcie_error_reporting(phba->pcidev);
4641
41415862 4642 lpfc_hba_down_post(phba);
dea3101e 4643
4644 return 0;
4645}
4646
da0436e9
JS
4647/**
4648 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4649 * @phba: Pointer to HBA context object.
4650 *
4651 * This function is called in the SLI initialization code path to restart
4652 * a SLI4 HBA. The caller is not required to hold any lock.
4653 * At the end of the function, it calls lpfc_hba_down_post function to
4654 * free any pending commands.
4655 **/
4656static int
4657lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4658{
4659 struct lpfc_sli *psli = &phba->sli;
75baf696 4660 uint32_t hba_aer_enabled;
27b01b82 4661 int rc;
da0436e9
JS
4662
4663 /* Restart HBA */
4664 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4665 "0296 Restart HBA Data: x%x x%x\n",
4666 phba->pport->port_state, psli->sli_flag);
4667
75baf696
JS
4668 /* Take PCIe device Advanced Error Reporting (AER) state */
4669 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4670
27b01b82 4671 rc = lpfc_sli4_brdreset(phba);
5a9eeff5 4672 if (rc)
8c24a4f6 4673 goto error;
da0436e9
JS
4674
4675 spin_lock_irq(&phba->hbalock);
4676 phba->pport->stopped = 0;
4677 phba->link_state = LPFC_INIT_START;
4678 phba->hba_flag = 0;
4679 spin_unlock_irq(&phba->hbalock);
4680
4681 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4682 psli->stats_start = ktime_get_seconds();
da0436e9 4683
75baf696
JS
4684 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4685 if (hba_aer_enabled)
4686 pci_disable_pcie_error_reporting(phba->pcidev);
4687
8c24a4f6
JS
4688error:
4689 phba->link_state = LPFC_HBA_ERROR;
da0436e9 4690 lpfc_hba_down_post(phba);
569dbe84 4691 lpfc_sli4_queue_destroy(phba);
da0436e9 4692
27b01b82 4693 return rc;
da0436e9
JS
4694}
4695
4696/**
4697 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4698 * @phba: Pointer to HBA context object.
4699 *
4700 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4701 * API jump table function pointer from the lpfc_hba struct.
4702**/
4703int
4704lpfc_sli_brdrestart(struct lpfc_hba *phba)
4705{
4706 return phba->lpfc_sli_brdrestart(phba);
4707}
4708
e59058c4 4709/**
3621a710 4710 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4711 * @phba: Pointer to HBA context object.
4712 *
4713 * This function is called after a HBA restart to wait for successful
4714 * restart of the HBA. Successful restart of the HBA is indicated by
4715 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4716 * iteration, the function will restart the HBA again. The function returns
4717 * zero if HBA successfully restarted else returns negative error code.
4718 **/
4492b739 4719int
dea3101e 4720lpfc_sli_chipset_init(struct lpfc_hba *phba)
4721{
4722 uint32_t status, i = 0;
4723
4724 /* Read the HBA Host Status Register */
9940b97b
JS
4725 if (lpfc_readl(phba->HSregaddr, &status))
4726 return -EIO;
dea3101e 4727
4728 /* Check status register to see what current state is */
4729 i = 0;
4730 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4731
dcf2a4e0
JS
4732 /* Check every 10ms for 10 retries, then every 100ms for 90
4733 * retries, then every 1 sec for 50 retires for a total of
4734 * ~60 seconds before reset the board again and check every
4735 * 1 sec for 50 retries. The up to 60 seconds before the
4736 * board ready is required by the Falcon FIPS zeroization
4737 * complete, and any reset the board in between shall cause
4738 * restart of zeroization, further delay the board ready.
dea3101e 4739 */
dcf2a4e0 4740 if (i++ >= 200) {
dea3101e 4741 /* Adapter failed to init, timeout, status reg
4742 <status> */
ed957684 4743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4744 "0436 Adapter failed to init, "
09372820
JS
4745 "timeout, status reg x%x, "
4746 "FW Data: A8 x%x AC x%x\n", status,
4747 readl(phba->MBslimaddr + 0xa8),
4748 readl(phba->MBslimaddr + 0xac));
2e0fef85 4749 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4750 return -ETIMEDOUT;
4751 }
4752
4753 /* Check to see if any errors occurred during init */
4754 if (status & HS_FFERM) {
4755 /* ERROR: During chipset initialization */
4756 /* Adapter failed to init, chipset, status reg
4757 <status> */
ed957684 4758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4759 "0437 Adapter failed to init, "
09372820
JS
4760 "chipset, status reg x%x, "
4761 "FW Data: A8 x%x AC x%x\n", status,
4762 readl(phba->MBslimaddr + 0xa8),
4763 readl(phba->MBslimaddr + 0xac));
2e0fef85 4764 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4765 return -EIO;
4766 }
4767
dcf2a4e0 4768 if (i <= 10)
dea3101e 4769 msleep(10);
dcf2a4e0
JS
4770 else if (i <= 100)
4771 msleep(100);
4772 else
4773 msleep(1000);
dea3101e 4774
dcf2a4e0
JS
4775 if (i == 150) {
4776 /* Do post */
92d7f7b0 4777 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4778 lpfc_sli_brdrestart(phba);
dea3101e 4779 }
4780 /* Read the HBA Host Status Register */
9940b97b
JS
4781 if (lpfc_readl(phba->HSregaddr, &status))
4782 return -EIO;
dea3101e 4783 }
4784
4785 /* Check to see if any errors occurred during init */
4786 if (status & HS_FFERM) {
4787 /* ERROR: During chipset initialization */
4788 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4790 "0438 Adapter failed to init, chipset, "
09372820
JS
4791 "status reg x%x, "
4792 "FW Data: A8 x%x AC x%x\n", status,
4793 readl(phba->MBslimaddr + 0xa8),
4794 readl(phba->MBslimaddr + 0xac));
2e0fef85 4795 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4796 return -EIO;
4797 }
4798
4799 /* Clear all interrupt enable conditions */
4800 writel(0, phba->HCregaddr);
4801 readl(phba->HCregaddr); /* flush */
4802
4803 /* setup host attn register */
4804 writel(0xffffffff, phba->HAregaddr);
4805 readl(phba->HAregaddr); /* flush */
4806 return 0;
4807}
4808
e59058c4 4809/**
3621a710 4810 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4811 *
4812 * This function calculates and returns the number of HBQs required to be
4813 * configured.
4814 **/
78b2d852 4815int
ed957684
JS
4816lpfc_sli_hbq_count(void)
4817{
92d7f7b0 4818 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4819}
4820
e59058c4 4821/**
3621a710 4822 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4823 *
4824 * This function adds the number of hbq entries in every HBQ to get
4825 * the total number of hbq entries required for the HBA and returns
4826 * the total count.
4827 **/
ed957684
JS
4828static int
4829lpfc_sli_hbq_entry_count(void)
4830{
4831 int hbq_count = lpfc_sli_hbq_count();
4832 int count = 0;
4833 int i;
4834
4835 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4836 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4837 return count;
4838}
4839
e59058c4 4840/**
3621a710 4841 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4842 *
4843 * This function calculates amount of memory required for all hbq entries
4844 * to be configured and returns the total memory required.
4845 **/
dea3101e 4846int
ed957684
JS
4847lpfc_sli_hbq_size(void)
4848{
4849 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4850}
4851
e59058c4 4852/**
3621a710 4853 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4854 * @phba: Pointer to HBA context object.
4855 *
4856 * This function is called during the SLI initialization to configure
4857 * all the HBQs and post buffers to the HBQ. The caller is not
4858 * required to hold any locks. This function will return zero if successful
4859 * else it will return negative error code.
4860 **/
ed957684
JS
4861static int
4862lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4863{
4864 int hbq_count = lpfc_sli_hbq_count();
4865 LPFC_MBOXQ_t *pmb;
4866 MAILBOX_t *pmbox;
4867 uint32_t hbqno;
4868 uint32_t hbq_entry_index;
ed957684 4869
92d7f7b0
JS
4870 /* Get a Mailbox buffer to setup mailbox
4871 * commands for HBA initialization
4872 */
ed957684
JS
4873 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4874
4875 if (!pmb)
4876 return -ENOMEM;
4877
04c68496 4878 pmbox = &pmb->u.mb;
ed957684
JS
4879
4880 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4881 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4882 phba->hbq_in_use = 1;
ed957684
JS
4883
4884 hbq_entry_index = 0;
4885 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4886 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4887 phba->hbqs[hbqno].hbqPutIdx = 0;
4888 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4889 phba->hbqs[hbqno].entry_count =
92d7f7b0 4890 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4891 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4892 hbq_entry_index, pmb);
ed957684
JS
4893 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4894
4895 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4896 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4897 mbxStatus <status>, ring <num> */
4898
4899 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4900 LOG_SLI | LOG_VPORT,
e8b62011 4901 "1805 Adapter failed to init. "
ed957684 4902 "Data: x%x x%x x%x\n",
e8b62011 4903 pmbox->mbxCommand,
ed957684
JS
4904 pmbox->mbxStatus, hbqno);
4905
4906 phba->link_state = LPFC_HBA_ERROR;
4907 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4908 return -ENXIO;
ed957684
JS
4909 }
4910 }
4911 phba->hbq_count = hbq_count;
4912
ed957684
JS
4913 mempool_free(pmb, phba->mbox_mem_pool);
4914
92d7f7b0 4915 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4916 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4917 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4918 return 0;
4919}
4920
4f774513
JS
4921/**
4922 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4923 * @phba: Pointer to HBA context object.
4924 *
4925 * This function is called during the SLI initialization to configure
4926 * all the HBQs and post buffers to the HBQ. The caller is not
4927 * required to hold any locks. This function will return zero if successful
4928 * else it will return negative error code.
4929 **/
4930static int
4931lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4932{
4933 phba->hbq_in_use = 1;
895427bd
JS
4934 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4935 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 4936 phba->hbq_count = 1;
895427bd 4937 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 4938 /* Initially populate or replenish the HBQs */
4f774513
JS
4939 return 0;
4940}
4941
e59058c4 4942/**
3621a710 4943 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4944 * @phba: Pointer to HBA context object.
4945 * @sli_mode: sli mode - 2/3
4946 *
183b8021 4947 * This function is called by the sli initialization code path
e59058c4
JS
4948 * to issue config_port mailbox command. This function restarts the
4949 * HBA firmware and issues a config_port mailbox command to configure
4950 * the SLI interface in the sli mode specified by sli_mode
4951 * variable. The caller is not required to hold any locks.
4952 * The function returns 0 if successful, else returns negative error
4953 * code.
4954 **/
9399627f
JS
4955int
4956lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4957{
4958 LPFC_MBOXQ_t *pmb;
4959 uint32_t resetcount = 0, rc = 0, done = 0;
4960
4961 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4962 if (!pmb) {
2e0fef85 4963 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4964 return -ENOMEM;
4965 }
4966
ed957684 4967 phba->sli_rev = sli_mode;
dea3101e 4968 while (resetcount < 2 && !done) {
2e0fef85 4969 spin_lock_irq(&phba->hbalock);
1c067a42 4970 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4971 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4972 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4973 lpfc_sli_brdrestart(phba);
dea3101e 4974 rc = lpfc_sli_chipset_init(phba);
4975 if (rc)
4976 break;
4977
2e0fef85 4978 spin_lock_irq(&phba->hbalock);
1c067a42 4979 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4980 spin_unlock_irq(&phba->hbalock);
dea3101e 4981 resetcount++;
4982
ed957684
JS
4983 /* Call pre CONFIG_PORT mailbox command initialization. A
4984 * value of 0 means the call was successful. Any other
4985 * nonzero value is a failure, but if ERESTART is returned,
4986 * the driver may reset the HBA and try again.
4987 */
dea3101e 4988 rc = lpfc_config_port_prep(phba);
4989 if (rc == -ERESTART) {
ed957684 4990 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 4991 continue;
34b02dcd 4992 } else if (rc)
dea3101e 4993 break;
6d368e53 4994
2e0fef85 4995 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 4996 lpfc_config_port(phba, pmb);
4997 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
4998 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4999 LPFC_SLI3_HBQ_ENABLED |
5000 LPFC_SLI3_CRP_ENABLED |
bc73905a 5001 LPFC_SLI3_DSS_ENABLED);
ed957684 5002 if (rc != MBX_SUCCESS) {
dea3101e 5003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5004 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 5005 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 5006 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 5007 spin_lock_irq(&phba->hbalock);
04c68496 5008 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
5009 spin_unlock_irq(&phba->hbalock);
5010 rc = -ENXIO;
04c68496
JS
5011 } else {
5012 /* Allow asynchronous mailbox command to go through */
5013 spin_lock_irq(&phba->hbalock);
5014 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5015 spin_unlock_irq(&phba->hbalock);
ed957684 5016 done = 1;
cb69f7de
JS
5017
5018 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5019 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5020 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5021 "3110 Port did not grant ASABT\n");
04c68496 5022 }
dea3101e 5023 }
ed957684
JS
5024 if (!done) {
5025 rc = -EINVAL;
5026 goto do_prep_failed;
5027 }
04c68496
JS
5028 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5029 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
5030 rc = -ENXIO;
5031 goto do_prep_failed;
5032 }
04c68496 5033 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 5034 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
5035 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5036 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5037 phba->max_vpi : phba->max_vports;
5038
34b02dcd
JS
5039 } else
5040 phba->max_vpi = 0;
bc73905a
JS
5041 phba->fips_level = 0;
5042 phba->fips_spec_rev = 0;
5043 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 5044 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
5045 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5046 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5048 "2850 Security Crypto Active. FIPS x%d "
5049 "(Spec Rev: x%d)",
5050 phba->fips_level, phba->fips_spec_rev);
5051 }
5052 if (pmb->u.mb.un.varCfgPort.sec_err) {
5053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5054 "2856 Config Port Security Crypto "
5055 "Error: x%x ",
5056 pmb->u.mb.un.varCfgPort.sec_err);
5057 }
04c68496 5058 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 5059 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 5060 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 5061 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
5062
5063 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5064 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6 5065
f44ac12f
JS
5066 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5067 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5068 phba->cfg_enable_bg = 0;
5069 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
e2a0a9d6
JS
5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5071 "0443 Adapter did not grant "
5072 "BlockGuard\n");
f44ac12f 5073 }
e2a0a9d6 5074 }
34b02dcd 5075 } else {
8f34f4ce 5076 phba->hbq_get = NULL;
34b02dcd 5077 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 5078 phba->max_vpi = 0;
ed957684 5079 }
92d7f7b0 5080do_prep_failed:
ed957684
JS
5081 mempool_free(pmb, phba->mbox_mem_pool);
5082 return rc;
5083}
5084
e59058c4
JS
5085
5086/**
183b8021 5087 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
5088 * @phba: Pointer to HBA context object.
5089 *
183b8021
MY
5090 * This function is the main SLI initialization function. This function
5091 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
5092 * error attention handler code. Caller is not required to hold any
5093 * locks. This function issues config_port mailbox command to configure
5094 * the SLI, setup iocb rings and HBQ rings. In the end the function
5095 * calls the config_port_post function to issue init_link mailbox
5096 * command and to start the discovery. The function will return zero
5097 * if successful, else it will return negative error code.
5098 **/
ed957684
JS
5099int
5100lpfc_sli_hba_setup(struct lpfc_hba *phba)
5101{
5102 uint32_t rc;
6d368e53
JS
5103 int mode = 3, i;
5104 int longs;
ed957684 5105
12247e81 5106 switch (phba->cfg_sli_mode) {
ed957684 5107 case 2:
78b2d852 5108 if (phba->cfg_enable_npiv) {
92d7f7b0 5109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 5110 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 5111 "parameter (%d) to auto (0).\n",
12247e81 5112 phba->cfg_sli_mode);
92d7f7b0
JS
5113 break;
5114 }
ed957684
JS
5115 mode = 2;
5116 break;
5117 case 0:
5118 case 3:
5119 break;
5120 default:
92d7f7b0 5121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
5122 "1819 Unrecognized sli_mode parameter: %d.\n",
5123 phba->cfg_sli_mode);
ed957684
JS
5124
5125 break;
5126 }
b5c53958 5127 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 5128
9399627f
JS
5129 rc = lpfc_sli_config_port(phba, mode);
5130
12247e81 5131 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 5132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
5133 "1820 Unable to select SLI-3. "
5134 "Not supported by adapter.\n");
ed957684 5135 if (rc && mode != 2)
9399627f 5136 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
5137 else if (rc && mode == 2)
5138 rc = lpfc_sli_config_port(phba, 3);
ed957684 5139 if (rc)
dea3101e 5140 goto lpfc_sli_hba_setup_error;
5141
0d878419
JS
5142 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5143 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5144 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5145 if (!rc) {
5146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5147 "2709 This device supports "
5148 "Advanced Error Reporting (AER)\n");
5149 spin_lock_irq(&phba->hbalock);
5150 phba->hba_flag |= HBA_AER_ENABLED;
5151 spin_unlock_irq(&phba->hbalock);
5152 } else {
5153 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5154 "2708 This device does not support "
b069d7eb
JS
5155 "Advanced Error Reporting (AER): %d\n",
5156 rc);
0d878419
JS
5157 phba->cfg_aer_support = 0;
5158 }
5159 }
5160
ed957684
JS
5161 if (phba->sli_rev == 3) {
5162 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5163 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
5164 } else {
5165 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5166 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 5167 phba->sli3_options = 0;
ed957684
JS
5168 }
5169
5170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
5171 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5172 phba->sli_rev, phba->max_vpi);
ed957684 5173 rc = lpfc_sli_ring_map(phba);
dea3101e 5174
5175 if (rc)
5176 goto lpfc_sli_hba_setup_error;
5177
6d368e53
JS
5178 /* Initialize VPIs. */
5179 if (phba->sli_rev == LPFC_SLI_REV3) {
5180 /*
5181 * The VPI bitmask and physical ID array are allocated
5182 * and initialized once only - at driver load. A port
5183 * reset doesn't need to reinitialize this memory.
5184 */
5185 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5186 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
6396bb22
KC
5187 phba->vpi_bmask = kcalloc(longs,
5188 sizeof(unsigned long),
6d368e53
JS
5189 GFP_KERNEL);
5190 if (!phba->vpi_bmask) {
5191 rc = -ENOMEM;
5192 goto lpfc_sli_hba_setup_error;
5193 }
5194
6396bb22
KC
5195 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5196 sizeof(uint16_t),
5197 GFP_KERNEL);
6d368e53
JS
5198 if (!phba->vpi_ids) {
5199 kfree(phba->vpi_bmask);
5200 rc = -ENOMEM;
5201 goto lpfc_sli_hba_setup_error;
5202 }
5203 for (i = 0; i < phba->max_vpi; i++)
5204 phba->vpi_ids[i] = i;
5205 }
5206 }
5207
9399627f 5208 /* Init HBQs */
ed957684
JS
5209 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5210 rc = lpfc_sli_hbq_setup(phba);
5211 if (rc)
5212 goto lpfc_sli_hba_setup_error;
5213 }
04c68496 5214 spin_lock_irq(&phba->hbalock);
dea3101e 5215 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 5216 spin_unlock_irq(&phba->hbalock);
dea3101e 5217
5218 rc = lpfc_config_port_post(phba);
5219 if (rc)
5220 goto lpfc_sli_hba_setup_error;
5221
ed957684
JS
5222 return rc;
5223
92d7f7b0 5224lpfc_sli_hba_setup_error:
2e0fef85 5225 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 5226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5227 "0445 Firmware initialization failed\n");
dea3101e 5228 return rc;
5229}
5230
e59058c4 5231/**
da0436e9
JS
5232 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5233 * @phba: Pointer to HBA context object.
5234 * @mboxq: mailbox pointer.
5235 * This function issue a dump mailbox command to read config region
5236 * 23 and parse the records in the region and populate driver
5237 * data structure.
e59058c4 5238 **/
da0436e9 5239static int
ff78d8f9 5240lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 5241{
ff78d8f9 5242 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
5243 struct lpfc_dmabuf *mp;
5244 struct lpfc_mqe *mqe;
5245 uint32_t data_length;
5246 int rc;
dea3101e 5247
da0436e9
JS
5248 /* Program the default value of vlan_id and fc_map */
5249 phba->valid_vlan = 0;
5250 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5251 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5252 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 5253
ff78d8f9
JS
5254 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5255 if (!mboxq)
da0436e9
JS
5256 return -ENOMEM;
5257
ff78d8f9
JS
5258 mqe = &mboxq->u.mqe;
5259 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5260 rc = -ENOMEM;
5261 goto out_free_mboxq;
5262 }
5263
3e1f0718 5264 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
5265 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5266
5267 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5268 "(%d):2571 Mailbox cmd x%x Status x%x "
5269 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5270 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5271 "CQ: x%x x%x x%x x%x\n",
5272 mboxq->vport ? mboxq->vport->vpi : 0,
5273 bf_get(lpfc_mqe_command, mqe),
5274 bf_get(lpfc_mqe_status, mqe),
5275 mqe->un.mb_words[0], mqe->un.mb_words[1],
5276 mqe->un.mb_words[2], mqe->un.mb_words[3],
5277 mqe->un.mb_words[4], mqe->un.mb_words[5],
5278 mqe->un.mb_words[6], mqe->un.mb_words[7],
5279 mqe->un.mb_words[8], mqe->un.mb_words[9],
5280 mqe->un.mb_words[10], mqe->un.mb_words[11],
5281 mqe->un.mb_words[12], mqe->un.mb_words[13],
5282 mqe->un.mb_words[14], mqe->un.mb_words[15],
5283 mqe->un.mb_words[16], mqe->un.mb_words[50],
5284 mboxq->mcqe.word0,
5285 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5286 mboxq->mcqe.trailer);
5287
5288 if (rc) {
5289 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5290 kfree(mp);
ff78d8f9
JS
5291 rc = -EIO;
5292 goto out_free_mboxq;
da0436e9
JS
5293 }
5294 data_length = mqe->un.mb_words[5];
a0c87cbd 5295 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5296 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5297 kfree(mp);
ff78d8f9
JS
5298 rc = -EIO;
5299 goto out_free_mboxq;
d11e31dd 5300 }
dea3101e 5301
da0436e9
JS
5302 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5304 kfree(mp);
ff78d8f9
JS
5305 rc = 0;
5306
5307out_free_mboxq:
5308 mempool_free(mboxq, phba->mbox_mem_pool);
5309 return rc;
da0436e9 5310}
e59058c4
JS
5311
5312/**
da0436e9
JS
5313 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5314 * @phba: pointer to lpfc hba data structure.
5315 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5316 * @vpd: pointer to the memory to hold resulting port vpd data.
5317 * @vpd_size: On input, the number of bytes allocated to @vpd.
5318 * On output, the number of data bytes in @vpd.
e59058c4 5319 *
da0436e9
JS
5320 * This routine executes a READ_REV SLI4 mailbox command. In
5321 * addition, this routine gets the port vpd data.
5322 *
5323 * Return codes
af901ca1 5324 * 0 - successful
d439d286 5325 * -ENOMEM - could not allocated memory.
e59058c4 5326 **/
da0436e9
JS
5327static int
5328lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5329 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5330{
da0436e9
JS
5331 int rc = 0;
5332 uint32_t dma_size;
5333 struct lpfc_dmabuf *dmabuf;
5334 struct lpfc_mqe *mqe;
dea3101e 5335
da0436e9
JS
5336 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5337 if (!dmabuf)
5338 return -ENOMEM;
5339
5340 /*
5341 * Get a DMA buffer for the vpd data resulting from the READ_REV
5342 * mailbox command.
a257bf90 5343 */
da0436e9 5344 dma_size = *vpd_size;
750afb08
LC
5345 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5346 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5347 if (!dmabuf->virt) {
5348 kfree(dmabuf);
5349 return -ENOMEM;
a257bf90
JS
5350 }
5351
da0436e9
JS
5352 /*
5353 * The SLI4 implementation of READ_REV conflicts at word1,
5354 * bits 31:16 and SLI4 adds vpd functionality not present
5355 * in SLI3. This code corrects the conflicts.
1dcb58e5 5356 */
da0436e9
JS
5357 lpfc_read_rev(phba, mboxq);
5358 mqe = &mboxq->u.mqe;
5359 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5360 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5361 mqe->un.read_rev.word1 &= 0x0000FFFF;
5362 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5363 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5364
5365 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5366 if (rc) {
5367 dma_free_coherent(&phba->pcidev->dev, dma_size,
5368 dmabuf->virt, dmabuf->phys);
def9c7a9 5369 kfree(dmabuf);
da0436e9
JS
5370 return -EIO;
5371 }
1dcb58e5 5372
da0436e9
JS
5373 /*
5374 * The available vpd length cannot be bigger than the
5375 * DMA buffer passed to the port. Catch the less than
5376 * case and update the caller's size.
5377 */
5378 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5379 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5380
d7c47992
JS
5381 memcpy(vpd, dmabuf->virt, *vpd_size);
5382
da0436e9
JS
5383 dma_free_coherent(&phba->pcidev->dev, dma_size,
5384 dmabuf->virt, dmabuf->phys);
5385 kfree(dmabuf);
5386 return 0;
dea3101e 5387}
5388
cd1c8301 5389/**
b3b4f3e1 5390 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
cd1c8301
JS
5391 * @phba: pointer to lpfc hba data structure.
5392 *
5393 * This routine retrieves SLI4 device physical port name this PCI function
5394 * is attached to.
5395 *
5396 * Return codes
4907cb7b 5397 * 0 - successful
b3b4f3e1 5398 * otherwise - failed to retrieve controller attributes
cd1c8301
JS
5399 **/
5400static int
b3b4f3e1 5401lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
cd1c8301
JS
5402{
5403 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5404 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5405 struct lpfc_controller_attribute *cntl_attr;
cd1c8301
JS
5406 void *virtaddr = NULL;
5407 uint32_t alloclen, reqlen;
5408 uint32_t shdr_status, shdr_add_status;
5409 union lpfc_sli4_cfg_shdr *shdr;
cd1c8301
JS
5410 int rc;
5411
cd1c8301
JS
5412 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5413 if (!mboxq)
5414 return -ENOMEM;
cd1c8301 5415
b3b4f3e1 5416 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
cd1c8301
JS
5417 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5418 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5419 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5420 LPFC_SLI4_MBX_NEMBED);
b3b4f3e1 5421
cd1c8301
JS
5422 if (alloclen < reqlen) {
5423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5424 "3084 Allocated DMA memory size (%d) is "
5425 "less than the requested DMA memory size "
5426 "(%d)\n", alloclen, reqlen);
5427 rc = -ENOMEM;
5428 goto out_free_mboxq;
5429 }
5430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5431 virtaddr = mboxq->sge_array->addr[0];
5432 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5433 shdr = &mbx_cntl_attr->cfg_shdr;
5434 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5435 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5436 if (shdr_status || shdr_add_status || rc) {
5437 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5438 "3085 Mailbox x%x (x%x/x%x) failed, "
5439 "rc:x%x, status:x%x, add_status:x%x\n",
5440 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5441 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5442 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5443 rc, shdr_status, shdr_add_status);
5444 rc = -ENXIO;
5445 goto out_free_mboxq;
5446 }
b3b4f3e1 5447
cd1c8301
JS
5448 cntl_attr = &mbx_cntl_attr->cntl_attr;
5449 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5450 phba->sli4_hba.lnk_info.lnk_tp =
5451 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5452 phba->sli4_hba.lnk_info.lnk_no =
5453 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
b3b4f3e1
JS
5454
5455 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5456 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5457 sizeof(phba->BIOSVersion));
5458
cd1c8301 5459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
b3b4f3e1 5460 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
cd1c8301 5461 phba->sli4_hba.lnk_info.lnk_tp,
b3b4f3e1
JS
5462 phba->sli4_hba.lnk_info.lnk_no,
5463 phba->BIOSVersion);
5464out_free_mboxq:
5465 if (rc != MBX_TIMEOUT) {
5466 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5467 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5468 else
5469 mempool_free(mboxq, phba->mbox_mem_pool);
5470 }
5471 return rc;
5472}
5473
5474/**
5475 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5476 * @phba: pointer to lpfc hba data structure.
5477 *
5478 * This routine retrieves SLI4 device physical port name this PCI function
5479 * is attached to.
5480 *
5481 * Return codes
5482 * 0 - successful
5483 * otherwise - failed to retrieve physical port name
5484 **/
5485static int
5486lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5487{
5488 LPFC_MBOXQ_t *mboxq;
5489 struct lpfc_mbx_get_port_name *get_port_name;
5490 uint32_t shdr_status, shdr_add_status;
5491 union lpfc_sli4_cfg_shdr *shdr;
5492 char cport_name = 0;
5493 int rc;
5494
5495 /* We assume nothing at this point */
5496 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5497 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5498
5499 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5500 if (!mboxq)
5501 return -ENOMEM;
5502 /* obtain link type and link number via READ_CONFIG */
5503 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5504 lpfc_sli4_read_config(phba);
5505 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5506 goto retrieve_ppname;
5507
5508 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5509 rc = lpfc_sli4_get_ctl_attr(phba);
5510 if (rc)
5511 goto out_free_mboxq;
cd1c8301
JS
5512
5513retrieve_ppname:
5514 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5515 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5516 sizeof(struct lpfc_mbx_get_port_name) -
5517 sizeof(struct lpfc_sli4_cfg_mhdr),
5518 LPFC_SLI4_MBX_EMBED);
5519 get_port_name = &mboxq->u.mqe.un.get_port_name;
5520 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5521 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5522 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5523 phba->sli4_hba.lnk_info.lnk_tp);
5524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5525 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5526 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5527 if (shdr_status || shdr_add_status || rc) {
5528 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5529 "3087 Mailbox x%x (x%x/x%x) failed: "
5530 "rc:x%x, status:x%x, add_status:x%x\n",
5531 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5532 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5533 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5534 rc, shdr_status, shdr_add_status);
5535 rc = -ENXIO;
5536 goto out_free_mboxq;
5537 }
5538 switch (phba->sli4_hba.lnk_info.lnk_no) {
5539 case LPFC_LINK_NUMBER_0:
5540 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5541 &get_port_name->u.response);
5542 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5543 break;
5544 case LPFC_LINK_NUMBER_1:
5545 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5546 &get_port_name->u.response);
5547 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5548 break;
5549 case LPFC_LINK_NUMBER_2:
5550 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5551 &get_port_name->u.response);
5552 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5553 break;
5554 case LPFC_LINK_NUMBER_3:
5555 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5556 &get_port_name->u.response);
5557 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5558 break;
5559 default:
5560 break;
5561 }
5562
5563 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5564 phba->Port[0] = cport_name;
5565 phba->Port[1] = '\0';
5566 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5567 "3091 SLI get port name: %s\n", phba->Port);
5568 }
5569
5570out_free_mboxq:
5571 if (rc != MBX_TIMEOUT) {
5572 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5573 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5574 else
5575 mempool_free(mboxq, phba->mbox_mem_pool);
5576 }
5577 return rc;
5578}
5579
e59058c4 5580/**
da0436e9
JS
5581 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5582 * @phba: pointer to lpfc hba data structure.
e59058c4 5583 *
da0436e9
JS
5584 * This routine is called to explicitly arm the SLI4 device's completion and
5585 * event queues
5586 **/
5587static void
5588lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5589{
895427bd 5590 int qidx;
b71413dd 5591 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
cdb42bec 5592 struct lpfc_sli4_hdw_queue *qp;
657add4e 5593 struct lpfc_queue *eq;
da0436e9 5594
32517fc0
JS
5595 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5596 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
b71413dd 5597 if (sli4_hba->nvmels_cq)
32517fc0
JS
5598 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5599 LPFC_QUEUE_REARM);
1ba981fd 5600
cdb42bec 5601 if (sli4_hba->hdwq) {
657add4e 5602 /* Loop thru all Hardware Queues */
cdb42bec 5603 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e
JS
5604 qp = &sli4_hba->hdwq[qidx];
5605 /* ARM the corresponding CQ */
5606 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
32517fc0 5607 LPFC_QUEUE_REARM);
657add4e 5608 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
32517fc0 5609 LPFC_QUEUE_REARM);
cdb42bec 5610 }
1ba981fd 5611
657add4e
JS
5612 /* Loop thru all IRQ vectors */
5613 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5614 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5615 /* ARM the corresponding EQ */
5616 sli4_hba->sli4_write_eq_db(phba, eq,
5617 0, LPFC_QUEUE_REARM);
5618 }
cdb42bec 5619 }
1ba981fd 5620
2d7dbc4c
JS
5621 if (phba->nvmet_support) {
5622 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
32517fc0
JS
5623 sli4_hba->sli4_write_cq_db(phba,
5624 sli4_hba->nvmet_cqset[qidx], 0,
2d7dbc4c
JS
5625 LPFC_QUEUE_REARM);
5626 }
2e90f4b5 5627 }
da0436e9
JS
5628}
5629
6d368e53
JS
5630/**
5631 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5632 * @phba: Pointer to HBA context object.
5633 * @type: The resource extent type.
b76f2dc9
JS
5634 * @extnt_count: buffer to hold port available extent count.
5635 * @extnt_size: buffer to hold element count per extent.
6d368e53 5636 *
b76f2dc9
JS
5637 * This function calls the port and retrievs the number of available
5638 * extents and their size for a particular extent type.
5639 *
5640 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5641 **/
b76f2dc9 5642int
6d368e53
JS
5643lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5644 uint16_t *extnt_count, uint16_t *extnt_size)
5645{
5646 int rc = 0;
5647 uint32_t length;
5648 uint32_t mbox_tmo;
5649 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5650 LPFC_MBOXQ_t *mbox;
5651
5652 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5653 if (!mbox)
5654 return -ENOMEM;
5655
5656 /* Find out how many extents are available for this resource type */
5657 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5658 sizeof(struct lpfc_sli4_cfg_mhdr));
5659 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5660 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5661 length, LPFC_SLI4_MBX_EMBED);
5662
5663 /* Send an extents count of 0 - the GET doesn't use it. */
5664 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5665 LPFC_SLI4_MBX_EMBED);
5666 if (unlikely(rc)) {
5667 rc = -EIO;
5668 goto err_exit;
5669 }
5670
5671 if (!phba->sli4_hba.intr_enable)
5672 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5673 else {
a183a15f 5674 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5675 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5676 }
5677 if (unlikely(rc)) {
5678 rc = -EIO;
5679 goto err_exit;
5680 }
5681
5682 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5683 if (bf_get(lpfc_mbox_hdr_status,
5684 &rsrc_info->header.cfg_shdr.response)) {
5685 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5686 "2930 Failed to get resource extents "
5687 "Status 0x%x Add'l Status 0x%x\n",
5688 bf_get(lpfc_mbox_hdr_status,
5689 &rsrc_info->header.cfg_shdr.response),
5690 bf_get(lpfc_mbox_hdr_add_status,
5691 &rsrc_info->header.cfg_shdr.response));
5692 rc = -EIO;
5693 goto err_exit;
5694 }
5695
5696 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5697 &rsrc_info->u.rsp);
5698 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5699 &rsrc_info->u.rsp);
8a9d2e80
JS
5700
5701 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5702 "3162 Retrieved extents type-%d from port: count:%d, "
5703 "size:%d\n", type, *extnt_count, *extnt_size);
5704
5705err_exit:
6d368e53
JS
5706 mempool_free(mbox, phba->mbox_mem_pool);
5707 return rc;
5708}
5709
5710/**
5711 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5712 * @phba: Pointer to HBA context object.
5713 * @type: The extent type to check.
5714 *
5715 * This function reads the current available extents from the port and checks
5716 * if the extent count or extent size has changed since the last access.
5717 * Callers use this routine post port reset to understand if there is a
5718 * extent reprovisioning requirement.
5719 *
5720 * Returns:
5721 * -Error: error indicates problem.
5722 * 1: Extent count or size has changed.
5723 * 0: No changes.
5724 **/
5725static int
5726lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5727{
5728 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5729 uint16_t size_diff, rsrc_ext_size;
5730 int rc = 0;
5731 struct lpfc_rsrc_blks *rsrc_entry;
5732 struct list_head *rsrc_blk_list = NULL;
5733
5734 size_diff = 0;
5735 curr_ext_cnt = 0;
5736 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5737 &rsrc_ext_cnt,
5738 &rsrc_ext_size);
5739 if (unlikely(rc))
5740 return -EIO;
5741
5742 switch (type) {
5743 case LPFC_RSC_TYPE_FCOE_RPI:
5744 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5745 break;
5746 case LPFC_RSC_TYPE_FCOE_VPI:
5747 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5748 break;
5749 case LPFC_RSC_TYPE_FCOE_XRI:
5750 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5751 break;
5752 case LPFC_RSC_TYPE_FCOE_VFI:
5753 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5754 break;
5755 default:
5756 break;
5757 }
5758
5759 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5760 curr_ext_cnt++;
5761 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5762 size_diff++;
5763 }
5764
5765 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5766 rc = 1;
5767
5768 return rc;
5769}
5770
5771/**
5772 * lpfc_sli4_cfg_post_extnts -
5773 * @phba: Pointer to HBA context object.
5774 * @extnt_cnt - number of available extents.
5775 * @type - the extent type (rpi, xri, vfi, vpi).
5776 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5777 * @mbox - pointer to the caller's allocated mailbox structure.
5778 *
5779 * This function executes the extents allocation request. It also
5780 * takes care of the amount of memory needed to allocate or get the
5781 * allocated extents. It is the caller's responsibility to evaluate
5782 * the response.
5783 *
5784 * Returns:
5785 * -Error: Error value describes the condition found.
5786 * 0: if successful
5787 **/
5788static int
8a9d2e80 5789lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5790 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5791{
5792 int rc = 0;
5793 uint32_t req_len;
5794 uint32_t emb_len;
5795 uint32_t alloc_len, mbox_tmo;
5796
5797 /* Calculate the total requested length of the dma memory */
8a9d2e80 5798 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5799
5800 /*
5801 * Calculate the size of an embedded mailbox. The uint32_t
5802 * accounts for extents-specific word.
5803 */
5804 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5805 sizeof(uint32_t);
5806
5807 /*
5808 * Presume the allocation and response will fit into an embedded
5809 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5810 */
5811 *emb = LPFC_SLI4_MBX_EMBED;
5812 if (req_len > emb_len) {
8a9d2e80 5813 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5814 sizeof(union lpfc_sli4_cfg_shdr) +
5815 sizeof(uint32_t);
5816 *emb = LPFC_SLI4_MBX_NEMBED;
5817 }
5818
5819 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5820 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5821 req_len, *emb);
5822 if (alloc_len < req_len) {
5823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5824 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5825 "less than the requested DMA memory "
5826 "size (x%x)\n", alloc_len, req_len);
5827 return -ENOMEM;
5828 }
8a9d2e80 5829 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5830 if (unlikely(rc))
5831 return -EIO;
5832
5833 if (!phba->sli4_hba.intr_enable)
5834 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5835 else {
a183a15f 5836 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5837 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5838 }
5839
5840 if (unlikely(rc))
5841 rc = -EIO;
5842 return rc;
5843}
5844
5845/**
5846 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5847 * @phba: Pointer to HBA context object.
5848 * @type: The resource extent type to allocate.
5849 *
5850 * This function allocates the number of elements for the specified
5851 * resource type.
5852 **/
5853static int
5854lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5855{
5856 bool emb = false;
5857 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5858 uint16_t rsrc_id, rsrc_start, j, k;
5859 uint16_t *ids;
5860 int i, rc;
5861 unsigned long longs;
5862 unsigned long *bmask;
5863 struct lpfc_rsrc_blks *rsrc_blks;
5864 LPFC_MBOXQ_t *mbox;
5865 uint32_t length;
5866 struct lpfc_id_range *id_array = NULL;
5867 void *virtaddr = NULL;
5868 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5869 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5870 struct list_head *ext_blk_list;
5871
5872 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5873 &rsrc_cnt,
5874 &rsrc_size);
5875 if (unlikely(rc))
5876 return -EIO;
5877
5878 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5880 "3009 No available Resource Extents "
5881 "for resource type 0x%x: Count: 0x%x, "
5882 "Size 0x%x\n", type, rsrc_cnt,
5883 rsrc_size);
5884 return -ENOMEM;
5885 }
5886
8a9d2e80
JS
5887 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5888 "2903 Post resource extents type-0x%x: "
5889 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5890
5891 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5892 if (!mbox)
5893 return -ENOMEM;
5894
8a9d2e80 5895 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5896 if (unlikely(rc)) {
5897 rc = -EIO;
5898 goto err_exit;
5899 }
5900
5901 /*
5902 * Figure out where the response is located. Then get local pointers
5903 * to the response data. The port does not guarantee to respond to
5904 * all extents counts request so update the local variable with the
5905 * allocated count from the port.
5906 */
5907 if (emb == LPFC_SLI4_MBX_EMBED) {
5908 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5909 id_array = &rsrc_ext->u.rsp.id[0];
5910 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5911 } else {
5912 virtaddr = mbox->sge_array->addr[0];
5913 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5914 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5915 id_array = &n_rsrc->id;
5916 }
5917
5918 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5919 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5920
5921 /*
5922 * Based on the resource size and count, correct the base and max
5923 * resource values.
5924 */
5925 length = sizeof(struct lpfc_rsrc_blks);
5926 switch (type) {
5927 case LPFC_RSC_TYPE_FCOE_RPI:
6396bb22 5928 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
5929 sizeof(unsigned long),
5930 GFP_KERNEL);
5931 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5932 rc = -ENOMEM;
5933 goto err_exit;
5934 }
6396bb22 5935 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5936 sizeof(uint16_t),
5937 GFP_KERNEL);
5938 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5939 kfree(phba->sli4_hba.rpi_bmask);
5940 rc = -ENOMEM;
5941 goto err_exit;
5942 }
5943
5944 /*
5945 * The next_rpi was initialized with the maximum available
5946 * count but the port may allocate a smaller number. Catch
5947 * that case and update the next_rpi.
5948 */
5949 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5950
5951 /* Initialize local ptrs for common extent processing later. */
5952 bmask = phba->sli4_hba.rpi_bmask;
5953 ids = phba->sli4_hba.rpi_ids;
5954 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5955 break;
5956 case LPFC_RSC_TYPE_FCOE_VPI:
6396bb22 5957 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
5958 GFP_KERNEL);
5959 if (unlikely(!phba->vpi_bmask)) {
5960 rc = -ENOMEM;
5961 goto err_exit;
5962 }
6396bb22 5963 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6d368e53
JS
5964 GFP_KERNEL);
5965 if (unlikely(!phba->vpi_ids)) {
5966 kfree(phba->vpi_bmask);
5967 rc = -ENOMEM;
5968 goto err_exit;
5969 }
5970
5971 /* Initialize local ptrs for common extent processing later. */
5972 bmask = phba->vpi_bmask;
5973 ids = phba->vpi_ids;
5974 ext_blk_list = &phba->lpfc_vpi_blk_list;
5975 break;
5976 case LPFC_RSC_TYPE_FCOE_XRI:
6396bb22 5977 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
5978 sizeof(unsigned long),
5979 GFP_KERNEL);
5980 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5981 rc = -ENOMEM;
5982 goto err_exit;
5983 }
8a9d2e80 5984 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 5985 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5986 sizeof(uint16_t),
5987 GFP_KERNEL);
5988 if (unlikely(!phba->sli4_hba.xri_ids)) {
5989 kfree(phba->sli4_hba.xri_bmask);
5990 rc = -ENOMEM;
5991 goto err_exit;
5992 }
5993
5994 /* Initialize local ptrs for common extent processing later. */
5995 bmask = phba->sli4_hba.xri_bmask;
5996 ids = phba->sli4_hba.xri_ids;
5997 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5998 break;
5999 case LPFC_RSC_TYPE_FCOE_VFI:
6396bb22 6000 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6001 sizeof(unsigned long),
6002 GFP_KERNEL);
6003 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6004 rc = -ENOMEM;
6005 goto err_exit;
6006 }
6396bb22 6007 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
6008 sizeof(uint16_t),
6009 GFP_KERNEL);
6010 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6011 kfree(phba->sli4_hba.vfi_bmask);
6012 rc = -ENOMEM;
6013 goto err_exit;
6014 }
6015
6016 /* Initialize local ptrs for common extent processing later. */
6017 bmask = phba->sli4_hba.vfi_bmask;
6018 ids = phba->sli4_hba.vfi_ids;
6019 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6020 break;
6021 default:
6022 /* Unsupported Opcode. Fail call. */
6023 id_array = NULL;
6024 bmask = NULL;
6025 ids = NULL;
6026 ext_blk_list = NULL;
6027 goto err_exit;
6028 }
6029
6030 /*
6031 * Complete initializing the extent configuration with the
6032 * allocated ids assigned to this function. The bitmask serves
6033 * as an index into the array and manages the available ids. The
6034 * array just stores the ids communicated to the port via the wqes.
6035 */
6036 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6037 if ((i % 2) == 0)
6038 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6039 &id_array[k]);
6040 else
6041 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6042 &id_array[k]);
6043
6044 rsrc_blks = kzalloc(length, GFP_KERNEL);
6045 if (unlikely(!rsrc_blks)) {
6046 rc = -ENOMEM;
6047 kfree(bmask);
6048 kfree(ids);
6049 goto err_exit;
6050 }
6051 rsrc_blks->rsrc_start = rsrc_id;
6052 rsrc_blks->rsrc_size = rsrc_size;
6053 list_add_tail(&rsrc_blks->list, ext_blk_list);
6054 rsrc_start = rsrc_id;
895427bd 6055 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5e5b511d 6056 phba->sli4_hba.io_xri_start = rsrc_start +
895427bd 6057 lpfc_sli4_get_iocb_cnt(phba);
895427bd 6058 }
6d368e53
JS
6059
6060 while (rsrc_id < (rsrc_start + rsrc_size)) {
6061 ids[j] = rsrc_id;
6062 rsrc_id++;
6063 j++;
6064 }
6065 /* Entire word processed. Get next word.*/
6066 if ((i % 2) == 1)
6067 k++;
6068 }
6069 err_exit:
6070 lpfc_sli4_mbox_cmd_free(phba, mbox);
6071 return rc;
6072}
6073
895427bd
JS
6074
6075
6d368e53
JS
6076/**
6077 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6078 * @phba: Pointer to HBA context object.
6079 * @type: the extent's type.
6080 *
6081 * This function deallocates all extents of a particular resource type.
6082 * SLI4 does not allow for deallocating a particular extent range. It
6083 * is the caller's responsibility to release all kernel memory resources.
6084 **/
6085static int
6086lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6087{
6088 int rc;
6089 uint32_t length, mbox_tmo = 0;
6090 LPFC_MBOXQ_t *mbox;
6091 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6092 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6093
6094 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6095 if (!mbox)
6096 return -ENOMEM;
6097
6098 /*
6099 * This function sends an embedded mailbox because it only sends the
6100 * the resource type. All extents of this type are released by the
6101 * port.
6102 */
6103 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6104 sizeof(struct lpfc_sli4_cfg_mhdr));
6105 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6106 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6107 length, LPFC_SLI4_MBX_EMBED);
6108
6109 /* Send an extents count of 0 - the dealloc doesn't use it. */
6110 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6111 LPFC_SLI4_MBX_EMBED);
6112 if (unlikely(rc)) {
6113 rc = -EIO;
6114 goto out_free_mbox;
6115 }
6116 if (!phba->sli4_hba.intr_enable)
6117 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6118 else {
a183a15f 6119 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
6120 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6121 }
6122 if (unlikely(rc)) {
6123 rc = -EIO;
6124 goto out_free_mbox;
6125 }
6126
6127 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6128 if (bf_get(lpfc_mbox_hdr_status,
6129 &dealloc_rsrc->header.cfg_shdr.response)) {
6130 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6131 "2919 Failed to release resource extents "
6132 "for type %d - Status 0x%x Add'l Status 0x%x. "
6133 "Resource memory not released.\n",
6134 type,
6135 bf_get(lpfc_mbox_hdr_status,
6136 &dealloc_rsrc->header.cfg_shdr.response),
6137 bf_get(lpfc_mbox_hdr_add_status,
6138 &dealloc_rsrc->header.cfg_shdr.response));
6139 rc = -EIO;
6140 goto out_free_mbox;
6141 }
6142
6143 /* Release kernel memory resources for the specific type. */
6144 switch (type) {
6145 case LPFC_RSC_TYPE_FCOE_VPI:
6146 kfree(phba->vpi_bmask);
6147 kfree(phba->vpi_ids);
6148 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6149 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6150 &phba->lpfc_vpi_blk_list, list) {
6151 list_del_init(&rsrc_blk->list);
6152 kfree(rsrc_blk);
6153 }
16a3a208 6154 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6155 break;
6156 case LPFC_RSC_TYPE_FCOE_XRI:
6157 kfree(phba->sli4_hba.xri_bmask);
6158 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6159 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6160 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6161 list_del_init(&rsrc_blk->list);
6162 kfree(rsrc_blk);
6163 }
6164 break;
6165 case LPFC_RSC_TYPE_FCOE_VFI:
6166 kfree(phba->sli4_hba.vfi_bmask);
6167 kfree(phba->sli4_hba.vfi_ids);
6168 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6169 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6170 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6171 list_del_init(&rsrc_blk->list);
6172 kfree(rsrc_blk);
6173 }
6174 break;
6175 case LPFC_RSC_TYPE_FCOE_RPI:
6176 /* RPI bitmask and physical id array are cleaned up earlier. */
6177 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6178 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6179 list_del_init(&rsrc_blk->list);
6180 kfree(rsrc_blk);
6181 }
6182 break;
6183 default:
6184 break;
6185 }
6186
6187 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6188
6189 out_free_mbox:
6190 mempool_free(mbox, phba->mbox_mem_pool);
6191 return rc;
6192}
6193
bd4b3e5c 6194static void
7bdedb34
JS
6195lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6196 uint32_t feature)
65791f1f 6197{
65791f1f 6198 uint32_t len;
65791f1f 6199
65791f1f
JS
6200 len = sizeof(struct lpfc_mbx_set_feature) -
6201 sizeof(struct lpfc_sli4_cfg_mhdr);
6202 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6203 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6204 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
6205
6206 switch (feature) {
6207 case LPFC_SET_UE_RECOVERY:
6208 bf_set(lpfc_mbx_set_feature_UER,
6209 &mbox->u.mqe.un.set_feature, 1);
6210 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6211 mbox->u.mqe.un.set_feature.param_len = 8;
6212 break;
6213 case LPFC_SET_MDS_DIAGS:
6214 bf_set(lpfc_mbx_set_feature_mds,
6215 &mbox->u.mqe.un.set_feature, 1);
6216 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
ae9e28f3 6217 &mbox->u.mqe.un.set_feature, 1);
7bdedb34
JS
6218 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6219 mbox->u.mqe.un.set_feature.param_len = 8;
6220 break;
65791f1f 6221 }
7bdedb34
JS
6222
6223 return;
65791f1f
JS
6224}
6225
1165a5c2
JS
6226/**
6227 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6228 * @phba: Pointer to HBA context object.
6229 *
6230 * Disable FW logging into host memory on the adapter. To
6231 * be done before reading logs from the host memory.
6232 **/
6233void
6234lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6235{
6236 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6237
6238 ras_fwlog->ras_active = false;
6239
6240 /* Disable FW logging to host memory */
6241 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6242 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6243}
6244
d2cc9bcd
JS
6245/**
6246 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6247 * @phba: Pointer to HBA context object.
6248 *
6249 * This function is called to free memory allocated for RAS FW logging
6250 * support in the driver.
6251 **/
6252void
6253lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6254{
6255 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6256 struct lpfc_dmabuf *dmabuf, *next;
6257
6258 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6259 list_for_each_entry_safe(dmabuf, next,
6260 &ras_fwlog->fwlog_buff_list,
6261 list) {
6262 list_del(&dmabuf->list);
6263 dma_free_coherent(&phba->pcidev->dev,
6264 LPFC_RAS_MAX_ENTRY_SIZE,
6265 dmabuf->virt, dmabuf->phys);
6266 kfree(dmabuf);
6267 }
6268 }
6269
6270 if (ras_fwlog->lwpd.virt) {
6271 dma_free_coherent(&phba->pcidev->dev,
6272 sizeof(uint32_t) * 2,
6273 ras_fwlog->lwpd.virt,
6274 ras_fwlog->lwpd.phys);
6275 ras_fwlog->lwpd.virt = NULL;
6276 }
6277
6278 ras_fwlog->ras_active = false;
6279}
6280
6281/**
6282 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6283 * @phba: Pointer to HBA context object.
6284 * @fwlog_buff_count: Count of buffers to be created.
6285 *
6286 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6287 * to update FW log is posted to the adapter.
6288 * Buffer count is calculated based on module param ras_fwlog_buffsize
6289 * Size of each buffer posted to FW is 64K.
6290 **/
6291
6292static int
6293lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6294 uint32_t fwlog_buff_count)
6295{
6296 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6297 struct lpfc_dmabuf *dmabuf;
6298 int rc = 0, i = 0;
6299
6300 /* Initialize List */
6301 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6302
6303 /* Allocate memory for the LWPD */
6304 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6305 sizeof(uint32_t) * 2,
6306 &ras_fwlog->lwpd.phys,
6307 GFP_KERNEL);
6308 if (!ras_fwlog->lwpd.virt) {
cb34990b 6309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6310 "6185 LWPD Memory Alloc Failed\n");
6311
6312 return -ENOMEM;
6313 }
6314
6315 ras_fwlog->fw_buffcount = fwlog_buff_count;
6316 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6317 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6318 GFP_KERNEL);
6319 if (!dmabuf) {
6320 rc = -ENOMEM;
6321 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6322 "6186 Memory Alloc failed FW logging");
6323 goto free_mem;
6324 }
6325
750afb08 6326 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
d2cc9bcd 6327 LPFC_RAS_MAX_ENTRY_SIZE,
750afb08 6328 &dmabuf->phys, GFP_KERNEL);
d2cc9bcd
JS
6329 if (!dmabuf->virt) {
6330 kfree(dmabuf);
6331 rc = -ENOMEM;
6332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6333 "6187 DMA Alloc Failed FW logging");
6334 goto free_mem;
6335 }
d2cc9bcd
JS
6336 dmabuf->buffer_tag = i;
6337 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6338 }
6339
6340free_mem:
6341 if (rc)
6342 lpfc_sli4_ras_dma_free(phba);
6343
6344 return rc;
6345}
6346
6347/**
6348 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6349 * @phba: pointer to lpfc hba data structure.
6350 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6351 *
6352 * Completion handler for driver's RAS MBX command to the device.
6353 **/
6354static void
6355lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6356{
6357 MAILBOX_t *mb;
6358 union lpfc_sli4_cfg_shdr *shdr;
6359 uint32_t shdr_status, shdr_add_status;
6360 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6361
6362 mb = &pmb->u.mb;
6363
6364 shdr = (union lpfc_sli4_cfg_shdr *)
6365 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6366 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6367 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6368
6369 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
cb34990b 6370 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
d2cc9bcd
JS
6371 "6188 FW LOG mailbox "
6372 "completed with status x%x add_status x%x,"
6373 " mbx status x%x\n",
6374 shdr_status, shdr_add_status, mb->mbxStatus);
cb34990b
JS
6375
6376 ras_fwlog->ras_hwsupport = false;
d2cc9bcd
JS
6377 goto disable_ras;
6378 }
6379
6380 ras_fwlog->ras_active = true;
6381 mempool_free(pmb, phba->mbox_mem_pool);
6382
6383 return;
6384
6385disable_ras:
6386 /* Free RAS DMA memory */
6387 lpfc_sli4_ras_dma_free(phba);
6388 mempool_free(pmb, phba->mbox_mem_pool);
6389}
6390
6391/**
6392 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6393 * @phba: pointer to lpfc hba data structure.
6394 * @fwlog_level: Logging verbosity level.
6395 * @fwlog_enable: Enable/Disable logging.
6396 *
6397 * Initialize memory and post mailbox command to enable FW logging in host
6398 * memory.
6399 **/
6400int
6401lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6402 uint32_t fwlog_level,
6403 uint32_t fwlog_enable)
6404{
6405 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6406 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6407 struct lpfc_dmabuf *dmabuf;
6408 LPFC_MBOXQ_t *mbox;
6409 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6410 int rc = 0;
6411
6412 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6413 phba->cfg_ras_fwlog_buffsize);
6414 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6415
6416 /*
6417 * If re-enabling FW logging support use earlier allocated
6418 * DMA buffers while posting MBX command.
6419 **/
6420 if (!ras_fwlog->lwpd.virt) {
6421 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6422 if (rc) {
6423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
cb34990b 6424 "6189 FW Log Memory Allocation Failed");
d2cc9bcd
JS
6425 return rc;
6426 }
6427 }
6428
6429 /* Setup Mailbox command */
6430 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6431 if (!mbox) {
cb34990b 6432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6433 "6190 RAS MBX Alloc Failed");
6434 rc = -ENOMEM;
6435 goto mem_free;
6436 }
6437
6438 ras_fwlog->fw_loglevel = fwlog_level;
6439 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6440 sizeof(struct lpfc_sli4_cfg_mhdr));
6441
6442 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6443 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6444 len, LPFC_SLI4_MBX_EMBED);
6445
6446 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6447 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6448 fwlog_enable);
6449 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6450 ras_fwlog->fw_loglevel);
6451 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6452 ras_fwlog->fw_buffcount);
6453 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6454 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6455
6456 /* Update DMA buffer address */
6457 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6458 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6459
6460 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6461 putPaddrLow(dmabuf->phys);
6462
6463 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6464 putPaddrHigh(dmabuf->phys);
6465 }
6466
6467 /* Update LPWD address */
6468 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6469 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6470
6471 mbox->vport = phba->pport;
6472 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6473
6474 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6475
6476 if (rc == MBX_NOT_FINISHED) {
cb34990b
JS
6477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6478 "6191 FW-Log Mailbox failed. "
d2cc9bcd
JS
6479 "status %d mbxStatus : x%x", rc,
6480 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6481 mempool_free(mbox, phba->mbox_mem_pool);
6482 rc = -EIO;
6483 goto mem_free;
6484 } else
6485 rc = 0;
6486mem_free:
6487 if (rc)
6488 lpfc_sli4_ras_dma_free(phba);
6489
6490 return rc;
6491}
6492
6493/**
6494 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6495 * @phba: Pointer to HBA context object.
6496 *
6497 * Check if RAS is supported on the adapter and initialize it.
6498 **/
6499void
6500lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6501{
6502 /* Check RAS FW Log needs to be enabled or not */
6503 if (lpfc_check_fwlog_support(phba))
6504 return;
6505
6506 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6507 LPFC_RAS_ENABLE_LOGGING);
6508}
6509
6d368e53
JS
6510/**
6511 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6512 * @phba: Pointer to HBA context object.
6513 *
6514 * This function allocates all SLI4 resource identifiers.
6515 **/
6516int
6517lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6518{
6519 int i, rc, error = 0;
6520 uint16_t count, base;
6521 unsigned long longs;
6522
ff78d8f9
JS
6523 if (!phba->sli4_hba.rpi_hdrs_in_use)
6524 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
6525 if (phba->sli4_hba.extents_in_use) {
6526 /*
6527 * The port supports resource extents. The XRI, VPI, VFI, RPI
6528 * resource extent count must be read and allocated before
6529 * provisioning the resource id arrays.
6530 */
6531 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6532 LPFC_IDX_RSRC_RDY) {
6533 /*
6534 * Extent-based resources are set - the driver could
6535 * be in a port reset. Figure out if any corrective
6536 * actions need to be taken.
6537 */
6538 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6539 LPFC_RSC_TYPE_FCOE_VFI);
6540 if (rc != 0)
6541 error++;
6542 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6543 LPFC_RSC_TYPE_FCOE_VPI);
6544 if (rc != 0)
6545 error++;
6546 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6547 LPFC_RSC_TYPE_FCOE_XRI);
6548 if (rc != 0)
6549 error++;
6550 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6551 LPFC_RSC_TYPE_FCOE_RPI);
6552 if (rc != 0)
6553 error++;
6554
6555 /*
6556 * It's possible that the number of resources
6557 * provided to this port instance changed between
6558 * resets. Detect this condition and reallocate
6559 * resources. Otherwise, there is no action.
6560 */
6561 if (error) {
6562 lpfc_printf_log(phba, KERN_INFO,
6563 LOG_MBOX | LOG_INIT,
6564 "2931 Detected extent resource "
6565 "change. Reallocating all "
6566 "extents.\n");
6567 rc = lpfc_sli4_dealloc_extent(phba,
6568 LPFC_RSC_TYPE_FCOE_VFI);
6569 rc = lpfc_sli4_dealloc_extent(phba,
6570 LPFC_RSC_TYPE_FCOE_VPI);
6571 rc = lpfc_sli4_dealloc_extent(phba,
6572 LPFC_RSC_TYPE_FCOE_XRI);
6573 rc = lpfc_sli4_dealloc_extent(phba,
6574 LPFC_RSC_TYPE_FCOE_RPI);
6575 } else
6576 return 0;
6577 }
6578
6579 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6580 if (unlikely(rc))
6581 goto err_exit;
6582
6583 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6584 if (unlikely(rc))
6585 goto err_exit;
6586
6587 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6588 if (unlikely(rc))
6589 goto err_exit;
6590
6591 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6592 if (unlikely(rc))
6593 goto err_exit;
6594 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6595 LPFC_IDX_RSRC_RDY);
6596 return rc;
6597 } else {
6598 /*
6599 * The port does not support resource extents. The XRI, VPI,
6600 * VFI, RPI resource ids were determined from READ_CONFIG.
6601 * Just allocate the bitmasks and provision the resource id
6602 * arrays. If a port reset is active, the resources don't
6603 * need any action - just exit.
6604 */
6605 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6606 LPFC_IDX_RSRC_RDY) {
6607 lpfc_sli4_dealloc_resource_identifiers(phba);
6608 lpfc_sli4_remove_rpis(phba);
6609 }
6d368e53
JS
6610 /* RPIs. */
6611 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6612 if (count <= 0) {
6613 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6614 "3279 Invalid provisioning of "
6615 "rpi:%d\n", count);
6616 rc = -EINVAL;
6617 goto err_exit;
6618 }
6d368e53
JS
6619 base = phba->sli4_hba.max_cfg_param.rpi_base;
6620 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6621 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6622 sizeof(unsigned long),
6623 GFP_KERNEL);
6624 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6625 rc = -ENOMEM;
6626 goto err_exit;
6627 }
6396bb22 6628 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6629 GFP_KERNEL);
6630 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6631 rc = -ENOMEM;
6632 goto free_rpi_bmask;
6633 }
6634
6635 for (i = 0; i < count; i++)
6636 phba->sli4_hba.rpi_ids[i] = base + i;
6637
6638 /* VPIs. */
6639 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6640 if (count <= 0) {
6641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6642 "3280 Invalid provisioning of "
6643 "vpi:%d\n", count);
6644 rc = -EINVAL;
6645 goto free_rpi_ids;
6646 }
6d368e53
JS
6647 base = phba->sli4_hba.max_cfg_param.vpi_base;
6648 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6649 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6650 GFP_KERNEL);
6651 if (unlikely(!phba->vpi_bmask)) {
6652 rc = -ENOMEM;
6653 goto free_rpi_ids;
6654 }
6396bb22 6655 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6656 GFP_KERNEL);
6657 if (unlikely(!phba->vpi_ids)) {
6658 rc = -ENOMEM;
6659 goto free_vpi_bmask;
6660 }
6661
6662 for (i = 0; i < count; i++)
6663 phba->vpi_ids[i] = base + i;
6664
6665 /* XRIs. */
6666 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6667 if (count <= 0) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669 "3281 Invalid provisioning of "
6670 "xri:%d\n", count);
6671 rc = -EINVAL;
6672 goto free_vpi_ids;
6673 }
6d368e53
JS
6674 base = phba->sli4_hba.max_cfg_param.xri_base;
6675 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6676 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6677 sizeof(unsigned long),
6678 GFP_KERNEL);
6679 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6680 rc = -ENOMEM;
6681 goto free_vpi_ids;
6682 }
41899be7 6683 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6684 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6685 GFP_KERNEL);
6686 if (unlikely(!phba->sli4_hba.xri_ids)) {
6687 rc = -ENOMEM;
6688 goto free_xri_bmask;
6689 }
6690
6691 for (i = 0; i < count; i++)
6692 phba->sli4_hba.xri_ids[i] = base + i;
6693
6694 /* VFIs. */
6695 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6696 if (count <= 0) {
6697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6698 "3282 Invalid provisioning of "
6699 "vfi:%d\n", count);
6700 rc = -EINVAL;
6701 goto free_xri_ids;
6702 }
6d368e53
JS
6703 base = phba->sli4_hba.max_cfg_param.vfi_base;
6704 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6705 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6706 sizeof(unsigned long),
6707 GFP_KERNEL);
6708 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6709 rc = -ENOMEM;
6710 goto free_xri_ids;
6711 }
6396bb22 6712 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6713 GFP_KERNEL);
6714 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6715 rc = -ENOMEM;
6716 goto free_vfi_bmask;
6717 }
6718
6719 for (i = 0; i < count; i++)
6720 phba->sli4_hba.vfi_ids[i] = base + i;
6721
6722 /*
6723 * Mark all resources ready. An HBA reset doesn't need
6724 * to reset the initialization.
6725 */
6726 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6727 LPFC_IDX_RSRC_RDY);
6728 return 0;
6729 }
6730
6731 free_vfi_bmask:
6732 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6733 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6734 free_xri_ids:
6735 kfree(phba->sli4_hba.xri_ids);
cd60be49 6736 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6737 free_xri_bmask:
6738 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6739 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6740 free_vpi_ids:
6741 kfree(phba->vpi_ids);
cd60be49 6742 phba->vpi_ids = NULL;
6d368e53
JS
6743 free_vpi_bmask:
6744 kfree(phba->vpi_bmask);
cd60be49 6745 phba->vpi_bmask = NULL;
6d368e53
JS
6746 free_rpi_ids:
6747 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6748 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6749 free_rpi_bmask:
6750 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6751 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6752 err_exit:
6753 return rc;
6754}
6755
6756/**
6757 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6758 * @phba: Pointer to HBA context object.
6759 *
6760 * This function allocates the number of elements for the specified
6761 * resource type.
6762 **/
6763int
6764lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6765{
6766 if (phba->sli4_hba.extents_in_use) {
6767 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6768 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6769 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6770 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6771 } else {
6772 kfree(phba->vpi_bmask);
16a3a208 6773 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6774 kfree(phba->vpi_ids);
6775 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6776 kfree(phba->sli4_hba.xri_bmask);
6777 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6778 kfree(phba->sli4_hba.vfi_bmask);
6779 kfree(phba->sli4_hba.vfi_ids);
6780 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6781 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6782 }
6783
6784 return 0;
6785}
6786
b76f2dc9
JS
6787/**
6788 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6789 * @phba: Pointer to HBA context object.
6790 * @type: The resource extent type.
6791 * @extnt_count: buffer to hold port extent count response
6792 * @extnt_size: buffer to hold port extent size response.
6793 *
6794 * This function calls the port to read the host allocated extents
6795 * for a particular type.
6796 **/
6797int
6798lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6799 uint16_t *extnt_cnt, uint16_t *extnt_size)
6800{
6801 bool emb;
6802 int rc = 0;
6803 uint16_t curr_blks = 0;
6804 uint32_t req_len, emb_len;
6805 uint32_t alloc_len, mbox_tmo;
6806 struct list_head *blk_list_head;
6807 struct lpfc_rsrc_blks *rsrc_blk;
6808 LPFC_MBOXQ_t *mbox;
6809 void *virtaddr = NULL;
6810 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6811 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6812 union lpfc_sli4_cfg_shdr *shdr;
6813
6814 switch (type) {
6815 case LPFC_RSC_TYPE_FCOE_VPI:
6816 blk_list_head = &phba->lpfc_vpi_blk_list;
6817 break;
6818 case LPFC_RSC_TYPE_FCOE_XRI:
6819 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6820 break;
6821 case LPFC_RSC_TYPE_FCOE_VFI:
6822 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6823 break;
6824 case LPFC_RSC_TYPE_FCOE_RPI:
6825 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6826 break;
6827 default:
6828 return -EIO;
6829 }
6830
6831 /* Count the number of extents currently allocatd for this type. */
6832 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6833 if (curr_blks == 0) {
6834 /*
6835 * The GET_ALLOCATED mailbox does not return the size,
6836 * just the count. The size should be just the size
6837 * stored in the current allocated block and all sizes
6838 * for an extent type are the same so set the return
6839 * value now.
6840 */
6841 *extnt_size = rsrc_blk->rsrc_size;
6842 }
6843 curr_blks++;
6844 }
6845
b76f2dc9
JS
6846 /*
6847 * Calculate the size of an embedded mailbox. The uint32_t
6848 * accounts for extents-specific word.
6849 */
6850 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6851 sizeof(uint32_t);
6852
6853 /*
6854 * Presume the allocation and response will fit into an embedded
6855 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6856 */
6857 emb = LPFC_SLI4_MBX_EMBED;
6858 req_len = emb_len;
6859 if (req_len > emb_len) {
6860 req_len = curr_blks * sizeof(uint16_t) +
6861 sizeof(union lpfc_sli4_cfg_shdr) +
6862 sizeof(uint32_t);
6863 emb = LPFC_SLI4_MBX_NEMBED;
6864 }
6865
6866 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6867 if (!mbox)
6868 return -ENOMEM;
6869 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6870
6871 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6872 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6873 req_len, emb);
6874 if (alloc_len < req_len) {
6875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6876 "2983 Allocated DMA memory size (x%x) is "
6877 "less than the requested DMA memory "
6878 "size (x%x)\n", alloc_len, req_len);
6879 rc = -ENOMEM;
6880 goto err_exit;
6881 }
6882 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6883 if (unlikely(rc)) {
6884 rc = -EIO;
6885 goto err_exit;
6886 }
6887
6888 if (!phba->sli4_hba.intr_enable)
6889 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6890 else {
a183a15f 6891 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
6892 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6893 }
6894
6895 if (unlikely(rc)) {
6896 rc = -EIO;
6897 goto err_exit;
6898 }
6899
6900 /*
6901 * Figure out where the response is located. Then get local pointers
6902 * to the response data. The port does not guarantee to respond to
6903 * all extents counts request so update the local variable with the
6904 * allocated count from the port.
6905 */
6906 if (emb == LPFC_SLI4_MBX_EMBED) {
6907 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6908 shdr = &rsrc_ext->header.cfg_shdr;
6909 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6910 } else {
6911 virtaddr = mbox->sge_array->addr[0];
6912 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6913 shdr = &n_rsrc->cfg_shdr;
6914 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6915 }
6916
6917 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6918 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6919 "2984 Failed to read allocated resources "
6920 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6921 type,
6922 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6923 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6924 rc = -EIO;
6925 goto err_exit;
6926 }
6927 err_exit:
6928 lpfc_sli4_mbox_cmd_free(phba, mbox);
6929 return rc;
6930}
6931
8a9d2e80 6932/**
0ef69968 6933 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 6934 * @phba: pointer to lpfc hba data structure.
895427bd
JS
6935 * @pring: Pointer to driver SLI ring object.
6936 * @sgl_list: linked link of sgl buffers to post
6937 * @cnt: number of linked list buffers
8a9d2e80 6938 *
895427bd 6939 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
6940 * repost them to the port by using SGL block post. This is needed after a
6941 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
6942 * of buffer sgls which contains contiguous xris and uses the non-embedded
6943 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
6944 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6945 * mailbox command for posting.
6946 *
6947 * Returns: 0 = success, non-zero failure.
6948 **/
6949static int
895427bd
JS
6950lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6951 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
6952{
6953 struct lpfc_sglq *sglq_entry = NULL;
6954 struct lpfc_sglq *sglq_entry_next = NULL;
6955 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
6956 int status, total_cnt;
6957 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
6958 int last_xritag = NO_XRI;
6959 LIST_HEAD(prep_sgl_list);
6960 LIST_HEAD(blck_sgl_list);
6961 LIST_HEAD(allc_sgl_list);
6962 LIST_HEAD(post_sgl_list);
6963 LIST_HEAD(free_sgl_list);
6964
38c20673 6965 spin_lock_irq(&phba->hbalock);
895427bd
JS
6966 spin_lock(&phba->sli4_hba.sgl_list_lock);
6967 list_splice_init(sgl_list, &allc_sgl_list);
6968 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6969 spin_unlock_irq(&phba->hbalock);
8a9d2e80 6970
895427bd 6971 total_cnt = cnt;
8a9d2e80
JS
6972 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6973 &allc_sgl_list, list) {
6974 list_del_init(&sglq_entry->list);
6975 block_cnt++;
6976 if ((last_xritag != NO_XRI) &&
6977 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6978 /* a hole in xri block, form a sgl posting block */
6979 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6980 post_cnt = block_cnt - 1;
6981 /* prepare list for next posting block */
6982 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6983 block_cnt = 1;
6984 } else {
6985 /* prepare list for next posting block */
6986 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6987 /* enough sgls for non-embed sgl mbox command */
6988 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6989 list_splice_init(&prep_sgl_list,
6990 &blck_sgl_list);
6991 post_cnt = block_cnt;
6992 block_cnt = 0;
6993 }
6994 }
6995 num_posted++;
6996
6997 /* keep track of last sgl's xritag */
6998 last_xritag = sglq_entry->sli4_xritag;
6999
895427bd
JS
7000 /* end of repost sgl list condition for buffers */
7001 if (num_posted == total_cnt) {
8a9d2e80
JS
7002 if (post_cnt == 0) {
7003 list_splice_init(&prep_sgl_list,
7004 &blck_sgl_list);
7005 post_cnt = block_cnt;
7006 } else if (block_cnt == 1) {
7007 status = lpfc_sli4_post_sgl(phba,
7008 sglq_entry->phys, 0,
7009 sglq_entry->sli4_xritag);
7010 if (!status) {
7011 /* successful, put sgl to posted list */
7012 list_add_tail(&sglq_entry->list,
7013 &post_sgl_list);
7014 } else {
7015 /* Failure, put sgl to free list */
7016 lpfc_printf_log(phba, KERN_WARNING,
7017 LOG_SLI,
895427bd 7018 "3159 Failed to post "
8a9d2e80
JS
7019 "sgl, xritag:x%x\n",
7020 sglq_entry->sli4_xritag);
7021 list_add_tail(&sglq_entry->list,
7022 &free_sgl_list);
711ea882 7023 total_cnt--;
8a9d2e80
JS
7024 }
7025 }
7026 }
7027
7028 /* continue until a nembed page worth of sgls */
7029 if (post_cnt == 0)
7030 continue;
7031
895427bd
JS
7032 /* post the buffer list sgls as a block */
7033 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7034 post_cnt);
8a9d2e80
JS
7035
7036 if (!status) {
7037 /* success, put sgl list to posted sgl list */
7038 list_splice_init(&blck_sgl_list, &post_sgl_list);
7039 } else {
7040 /* Failure, put sgl list to free sgl list */
7041 sglq_entry_first = list_first_entry(&blck_sgl_list,
7042 struct lpfc_sglq,
7043 list);
7044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 7045 "3160 Failed to post sgl-list, "
8a9d2e80
JS
7046 "xritag:x%x-x%x\n",
7047 sglq_entry_first->sli4_xritag,
7048 (sglq_entry_first->sli4_xritag +
7049 post_cnt - 1));
7050 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 7051 total_cnt -= post_cnt;
8a9d2e80
JS
7052 }
7053
7054 /* don't reset xirtag due to hole in xri block */
7055 if (block_cnt == 0)
7056 last_xritag = NO_XRI;
7057
895427bd 7058 /* reset sgl post count for next round of posting */
8a9d2e80
JS
7059 post_cnt = 0;
7060 }
7061
895427bd 7062 /* free the sgls failed to post */
8a9d2e80
JS
7063 lpfc_free_sgl_list(phba, &free_sgl_list);
7064
895427bd 7065 /* push sgls posted to the available list */
8a9d2e80 7066 if (!list_empty(&post_sgl_list)) {
38c20673 7067 spin_lock_irq(&phba->hbalock);
895427bd
JS
7068 spin_lock(&phba->sli4_hba.sgl_list_lock);
7069 list_splice_init(&post_sgl_list, sgl_list);
7070 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7071 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
7072 } else {
7073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 7074 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
7075 return -EIO;
7076 }
895427bd
JS
7077
7078 /* return the number of XRIs actually posted */
7079 return total_cnt;
8a9d2e80
JS
7080}
7081
0794d601 7082/**
5e5b511d 7083 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
0794d601
JS
7084 * @phba: pointer to lpfc hba data structure.
7085 *
7086 * This routine walks the list of nvme buffers that have been allocated and
7087 * repost them to the port by using SGL block post. This is needed after a
7088 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7089 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
5e5b511d 7090 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
0794d601
JS
7091 *
7092 * Returns: 0 = success, non-zero failure.
7093 **/
3999df75 7094static int
5e5b511d 7095lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
0794d601
JS
7096{
7097 LIST_HEAD(post_nblist);
7098 int num_posted, rc = 0;
7099
7100 /* get all NVME buffers need to repost to a local list */
5e5b511d 7101 lpfc_io_buf_flush(phba, &post_nblist);
0794d601
JS
7102
7103 /* post the list of nvme buffer sgls to port if available */
7104 if (!list_empty(&post_nblist)) {
5e5b511d
JS
7105 num_posted = lpfc_sli4_post_io_sgl_list(
7106 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
0794d601
JS
7107 /* failed to post any nvme buffer, return error */
7108 if (num_posted == 0)
7109 rc = -EIO;
7110 }
7111 return rc;
7112}
7113
3999df75 7114static void
61bda8f7
JS
7115lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7116{
7117 uint32_t len;
7118
7119 len = sizeof(struct lpfc_mbx_set_host_data) -
7120 sizeof(struct lpfc_sli4_cfg_mhdr);
7121 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7122 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7123 LPFC_SLI4_MBX_EMBED);
7124
7125 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
7126 mbox->u.mqe.un.set_host_data.param_len =
7127 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
7128 snprintf(mbox->u.mqe.un.set_host_data.data,
7129 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7130 "Linux %s v"LPFC_DRIVER_VERSION,
7131 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7132}
7133
a8cf5dfe 7134int
6c621a22 7135lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
a8cf5dfe 7136 struct lpfc_queue *drq, int count, int idx)
6c621a22
JS
7137{
7138 int rc, i;
7139 struct lpfc_rqe hrqe;
7140 struct lpfc_rqe drqe;
7141 struct lpfc_rqb *rqbp;
411de511 7142 unsigned long flags;
6c621a22
JS
7143 struct rqb_dmabuf *rqb_buffer;
7144 LIST_HEAD(rqb_buf_list);
7145
411de511 7146 spin_lock_irqsave(&phba->hbalock, flags);
6c621a22
JS
7147 rqbp = hrq->rqbp;
7148 for (i = 0; i < count; i++) {
7149 /* IF RQ is already full, don't bother */
7150 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7151 break;
7152 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7153 if (!rqb_buffer)
7154 break;
7155 rqb_buffer->hrq = hrq;
7156 rqb_buffer->drq = drq;
a8cf5dfe 7157 rqb_buffer->idx = idx;
6c621a22
JS
7158 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7159 }
7160 while (!list_empty(&rqb_buf_list)) {
7161 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7162 hbuf.list);
7163
7164 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7165 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7166 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7167 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7168 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7169 if (rc < 0) {
411de511
JS
7170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7171 "6421 Cannot post to HRQ %d: %x %x %x "
7172 "DRQ %x %x\n",
7173 hrq->queue_id,
7174 hrq->host_index,
7175 hrq->hba_index,
7176 hrq->entry_count,
7177 drq->host_index,
7178 drq->hba_index);
6c621a22
JS
7179 rqbp->rqb_free_buffer(phba, rqb_buffer);
7180 } else {
7181 list_add_tail(&rqb_buffer->hbuf.list,
7182 &rqbp->rqb_buffer_list);
7183 rqbp->buffer_count++;
7184 }
7185 }
411de511 7186 spin_unlock_irqrestore(&phba->hbalock, flags);
6c621a22
JS
7187 return 1;
7188}
7189
da0436e9 7190/**
183b8021 7191 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
7192 * @phba: Pointer to HBA context object.
7193 *
183b8021
MY
7194 * This function is the main SLI4 device initialization PCI function. This
7195 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
7196 * HBA error attention handler code. Caller is not required to hold any
7197 * locks.
7198 **/
7199int
7200lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7201{
c490850a 7202 int rc, i, cnt, len;
da0436e9
JS
7203 LPFC_MBOXQ_t *mboxq;
7204 struct lpfc_mqe *mqe;
7205 uint8_t *vpd;
7206 uint32_t vpd_size;
7207 uint32_t ftr_rsp = 0;
7208 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7209 struct lpfc_vport *vport = phba->pport;
7210 struct lpfc_dmabuf *mp;
2d7dbc4c 7211 struct lpfc_rqb *rqbp;
da0436e9
JS
7212
7213 /* Perform a PCI function reset to start from clean */
7214 rc = lpfc_pci_function_reset(phba);
7215 if (unlikely(rc))
7216 return -ENODEV;
7217
7218 /* Check the HBA Host Status Register for readyness */
7219 rc = lpfc_sli4_post_status_check(phba);
7220 if (unlikely(rc))
7221 return -ENODEV;
7222 else {
7223 spin_lock_irq(&phba->hbalock);
7224 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7225 spin_unlock_irq(&phba->hbalock);
7226 }
7227
7228 /*
7229 * Allocate a single mailbox container for initializing the
7230 * port.
7231 */
7232 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7233 if (!mboxq)
7234 return -ENOMEM;
7235
da0436e9 7236 /* Issue READ_REV to collect vpd and FW information. */
49198b37 7237 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
7238 vpd = kzalloc(vpd_size, GFP_KERNEL);
7239 if (!vpd) {
7240 rc = -ENOMEM;
7241 goto out_free_mbox;
7242 }
7243
7244 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
7245 if (unlikely(rc)) {
7246 kfree(vpd);
7247 goto out_free_mbox;
7248 }
572709e2 7249
da0436e9 7250 mqe = &mboxq->u.mqe;
f1126688 7251 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 7252 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 7253 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
7254 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7255 } else {
76a95d75 7256 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 7257 }
45ed1190
JS
7258
7259 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7260 LPFC_DCBX_CEE_MODE)
7261 phba->hba_flag |= HBA_FIP_SUPPORT;
7262 else
7263 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7264
4f2e66c6
JS
7265 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7266
c31098ce 7267 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
7268 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7269 "0376 READ_REV Error. SLI Level %d "
7270 "FCoE enabled %d\n",
76a95d75 7271 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 7272 rc = -EIO;
76a95d75
JS
7273 kfree(vpd);
7274 goto out_free_mbox;
da0436e9 7275 }
cd1c8301 7276
ff78d8f9
JS
7277 /*
7278 * Continue initialization with default values even if driver failed
7279 * to read FCoE param config regions, only read parameters if the
7280 * board is FCoE
7281 */
7282 if (phba->hba_flag & HBA_FCOE_MODE &&
7283 lpfc_sli4_read_fcoe_params(phba))
7284 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7285 "2570 Failed to read FCoE parameters\n");
7286
cd1c8301
JS
7287 /*
7288 * Retrieve sli4 device physical port name, failure of doing it
7289 * is considered as non-fatal.
7290 */
7291 rc = lpfc_sli4_retrieve_pport_name(phba);
7292 if (!rc)
7293 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7294 "3080 Successful retrieving SLI4 device "
7295 "physical port name: %s.\n", phba->Port);
7296
b3b4f3e1
JS
7297 rc = lpfc_sli4_get_ctl_attr(phba);
7298 if (!rc)
7299 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7300 "8351 Successful retrieving SLI4 device "
7301 "CTL ATTR\n");
7302
da0436e9
JS
7303 /*
7304 * Evaluate the read rev and vpd data. Populate the driver
7305 * state with the results. If this routine fails, the failure
7306 * is not fatal as the driver will use generic values.
7307 */
7308 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7309 if (unlikely(!rc)) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7311 "0377 Error %d parsing vpd. "
7312 "Using defaults.\n", rc);
7313 rc = 0;
7314 }
76a95d75 7315 kfree(vpd);
da0436e9 7316
f1126688
JS
7317 /* Save information as VPD data */
7318 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7319 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4e565cf0
JS
7320
7321 /*
7322 * This is because first G7 ASIC doesn't support the standard
7323 * 0x5a NVME cmd descriptor type/subtype
7324 */
7325 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7326 LPFC_SLI_INTF_IF_TYPE_6) &&
7327 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7328 (phba->vpd.rev.smRev == 0) &&
7329 (phba->cfg_nvme_embed_cmd == 1))
7330 phba->cfg_nvme_embed_cmd = 0;
7331
f1126688
JS
7332 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7333 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7334 &mqe->un.read_rev);
7335 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7336 &mqe->un.read_rev);
7337 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7338 &mqe->un.read_rev);
7339 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7340 &mqe->un.read_rev);
7341 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7342 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7343 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7344 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7345 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7346 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7347 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7348 "(%d):0380 READ_REV Status x%x "
7349 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7350 mboxq->vport ? mboxq->vport->vpi : 0,
7351 bf_get(lpfc_mqe_status, mqe),
7352 phba->vpd.rev.opFwName,
7353 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7354 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 7355
572709e2
JS
7356 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7357 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7358 if (phba->pport->cfg_lun_queue_depth > rc) {
7359 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7360 "3362 LUN queue depth changed from %d to %d\n",
7361 phba->pport->cfg_lun_queue_depth, rc);
7362 phba->pport->cfg_lun_queue_depth = rc;
7363 }
7364
65791f1f 7365 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
7366 LPFC_SLI_INTF_IF_TYPE_0) {
7367 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7368 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7369 if (rc == MBX_SUCCESS) {
7370 phba->hba_flag |= HBA_RECOVERABLE_UE;
7371 /* Set 1Sec interval to detect UE */
7372 phba->eratt_poll_interval = 1;
7373 phba->sli4_hba.ue_to_sr = bf_get(
7374 lpfc_mbx_set_feature_UESR,
7375 &mboxq->u.mqe.un.set_feature);
7376 phba->sli4_hba.ue_to_rp = bf_get(
7377 lpfc_mbx_set_feature_UERP,
7378 &mboxq->u.mqe.un.set_feature);
7379 }
7380 }
7381
7382 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7383 /* Enable MDS Diagnostics only if the SLI Port supports it */
7384 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7385 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7386 if (rc != MBX_SUCCESS)
7387 phba->mds_diags_support = 0;
7388 }
572709e2 7389
da0436e9
JS
7390 /*
7391 * Discover the port's supported feature set and match it against the
7392 * hosts requests.
7393 */
7394 lpfc_request_features(phba, mboxq);
7395 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7396 if (unlikely(rc)) {
7397 rc = -EIO;
76a95d75 7398 goto out_free_mbox;
da0436e9
JS
7399 }
7400
7401 /*
7402 * The port must support FCP initiator mode as this is the
7403 * only mode running in the host.
7404 */
7405 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7407 "0378 No support for fcpi mode.\n");
7408 ftr_rsp++;
7409 }
0bc2b7c5
JS
7410
7411 /* Performance Hints are ONLY for FCoE */
7412 if (phba->hba_flag & HBA_FCOE_MODE) {
7413 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7414 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7415 else
7416 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7417 }
7418
da0436e9
JS
7419 /*
7420 * If the port cannot support the host's requested features
7421 * then turn off the global config parameters to disable the
7422 * feature in the driver. This is not a fatal error.
7423 */
f44ac12f
JS
7424 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7425 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7426 phba->cfg_enable_bg = 0;
7427 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
bf08611b 7428 ftr_rsp++;
f44ac12f 7429 }
bf08611b 7430 }
da0436e9
JS
7431
7432 if (phba->max_vpi && phba->cfg_enable_npiv &&
7433 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7434 ftr_rsp++;
7435
7436 if (ftr_rsp) {
7437 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7438 "0379 Feature Mismatch Data: x%08x %08x "
7439 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7440 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7441 phba->cfg_enable_npiv, phba->max_vpi);
7442 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7443 phba->cfg_enable_bg = 0;
7444 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7445 phba->cfg_enable_npiv = 0;
7446 }
7447
7448 /* These SLI3 features are assumed in SLI4 */
7449 spin_lock_irq(&phba->hbalock);
7450 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7451 spin_unlock_irq(&phba->hbalock);
7452
6d368e53
JS
7453 /*
7454 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7455 * calls depends on these resources to complete port setup.
7456 */
7457 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7458 if (rc) {
7459 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7460 "2920 Failed to alloc Resource IDs "
7461 "rc = x%x\n", rc);
7462 goto out_free_mbox;
7463 }
7464
61bda8f7
JS
7465 lpfc_set_host_data(phba, mboxq);
7466
7467 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7468 if (rc) {
7469 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7470 "2134 Failed to set host os driver version %x",
7471 rc);
7472 }
7473
da0436e9 7474 /* Read the port's service parameters. */
9f1177a3
JS
7475 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7476 if (rc) {
7477 phba->link_state = LPFC_HBA_ERROR;
7478 rc = -ENOMEM;
76a95d75 7479 goto out_free_mbox;
9f1177a3
JS
7480 }
7481
da0436e9
JS
7482 mboxq->vport = vport;
7483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3e1f0718 7484 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
7485 if (rc == MBX_SUCCESS) {
7486 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7487 rc = 0;
7488 }
7489
7490 /*
7491 * This memory was allocated by the lpfc_read_sparam routine. Release
7492 * it to the mbuf pool.
7493 */
7494 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7495 kfree(mp);
3e1f0718 7496 mboxq->ctx_buf = NULL;
da0436e9
JS
7497 if (unlikely(rc)) {
7498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7499 "0382 READ_SPARAM command failed "
7500 "status %d, mbxStatus x%x\n",
7501 rc, bf_get(lpfc_mqe_status, mqe));
7502 phba->link_state = LPFC_HBA_ERROR;
7503 rc = -EIO;
76a95d75 7504 goto out_free_mbox;
da0436e9
JS
7505 }
7506
0558056c 7507 lpfc_update_vport_wwn(vport);
da0436e9
JS
7508
7509 /* Update the fc_host data structures with new wwn. */
7510 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7511 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7512
895427bd
JS
7513 /* Create all the SLI4 queues */
7514 rc = lpfc_sli4_queue_create(phba);
7515 if (rc) {
7516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7517 "3089 Failed to allocate queues\n");
7518 rc = -ENODEV;
7519 goto out_free_mbox;
7520 }
7521 /* Set up all the queues to the device */
7522 rc = lpfc_sli4_queue_setup(phba);
7523 if (unlikely(rc)) {
7524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7525 "0381 Error %d during queue setup.\n ", rc);
7526 goto out_stop_timers;
7527 }
7528 /* Initialize the driver internal SLI layer lists. */
7529 lpfc_sli4_setup(phba);
7530 lpfc_sli4_queue_init(phba);
7531
7532 /* update host els xri-sgl sizes and mappings */
7533 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
7534 if (unlikely(rc)) {
7535 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7536 "1400 Failed to update xri-sgl size and "
7537 "mapping: %d\n", rc);
895427bd 7538 goto out_destroy_queue;
da0436e9
JS
7539 }
7540
8a9d2e80 7541 /* register the els sgl pool to the port */
895427bd
JS
7542 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7543 phba->sli4_hba.els_xri_cnt);
7544 if (unlikely(rc < 0)) {
8a9d2e80
JS
7545 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7546 "0582 Error %d during els sgl post "
7547 "operation\n", rc);
7548 rc = -ENODEV;
895427bd 7549 goto out_destroy_queue;
8a9d2e80 7550 }
895427bd 7551 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 7552
f358dd0c
JS
7553 if (phba->nvmet_support) {
7554 /* update host nvmet xri-sgl sizes and mappings */
7555 rc = lpfc_sli4_nvmet_sgl_update(phba);
7556 if (unlikely(rc)) {
7557 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7558 "6308 Failed to update nvmet-sgl size "
7559 "and mapping: %d\n", rc);
7560 goto out_destroy_queue;
7561 }
7562
7563 /* register the nvmet sgl pool to the port */
7564 rc = lpfc_sli4_repost_sgl_list(
7565 phba,
7566 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7567 phba->sli4_hba.nvmet_xri_cnt);
7568 if (unlikely(rc < 0)) {
7569 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7570 "3117 Error %d during nvmet "
7571 "sgl post\n", rc);
7572 rc = -ENODEV;
7573 goto out_destroy_queue;
7574 }
7575 phba->sli4_hba.nvmet_xri_cnt = rc;
6c621a22
JS
7576
7577 cnt = phba->cfg_iocb_cnt * 1024;
7578 /* We need 1 iocbq for every SGL, for IO processing */
7579 cnt += phba->sli4_hba.nvmet_xri_cnt;
f358dd0c 7580 } else {
0794d601 7581 /* update host common xri-sgl sizes and mappings */
5e5b511d 7582 rc = lpfc_sli4_io_sgl_update(phba);
895427bd
JS
7583 if (unlikely(rc)) {
7584 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601 7585 "6082 Failed to update nvme-sgl size "
895427bd
JS
7586 "and mapping: %d\n", rc);
7587 goto out_destroy_queue;
7588 }
7589
0794d601 7590 /* register the allocated common sgl pool to the port */
5e5b511d 7591 rc = lpfc_sli4_repost_io_sgl_list(phba);
895427bd
JS
7592 if (unlikely(rc)) {
7593 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601
JS
7594 "6116 Error %d during nvme sgl post "
7595 "operation\n", rc);
7596 /* Some NVME buffers were moved to abort nvme list */
7597 /* A pci function reset will repost them */
7598 rc = -ENODEV;
895427bd
JS
7599 goto out_destroy_queue;
7600 }
6c621a22 7601 cnt = phba->cfg_iocb_cnt * 1024;
11e644e2
JS
7602 }
7603
7604 if (!phba->sli.iocbq_lookup) {
6c621a22
JS
7605 /* Initialize and populate the iocb list per host */
7606 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11e644e2 7607 "2821 initialize iocb list %d total %d\n",
6c621a22
JS
7608 phba->cfg_iocb_cnt, cnt);
7609 rc = lpfc_init_iocb_list(phba, cnt);
7610 if (rc) {
7611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11e644e2 7612 "1413 Failed to init iocb list.\n");
6c621a22
JS
7613 goto out_destroy_queue;
7614 }
895427bd
JS
7615 }
7616
11e644e2
JS
7617 if (phba->nvmet_support)
7618 lpfc_nvmet_create_targetport(phba);
7619
2d7dbc4c 7620 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
2d7dbc4c
JS
7621 /* Post initial buffers to all RQs created */
7622 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7623 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7624 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7625 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7626 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 7627 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
7628 rqbp->buffer_count = 0;
7629
2d7dbc4c
JS
7630 lpfc_post_rq_buffer(
7631 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7632 phba->sli4_hba.nvmet_mrq_data[i],
2448e484 7633 phba->cfg_nvmet_mrq_post, i);
2d7dbc4c
JS
7634 }
7635 }
7636
da0436e9
JS
7637 /* Post the rpi header region to the device. */
7638 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7639 if (unlikely(rc)) {
7640 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7641 "0393 Error %d during rpi post operation\n",
7642 rc);
7643 rc = -ENODEV;
895427bd 7644 goto out_destroy_queue;
da0436e9 7645 }
97f2ecf1 7646 lpfc_sli4_node_prep(phba);
da0436e9 7647
895427bd 7648 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 7649 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
7650 /*
7651 * The FC Port needs to register FCFI (index 0)
7652 */
7653 lpfc_reg_fcfi(phba, mboxq);
7654 mboxq->vport = phba->pport;
7655 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7656 if (rc != MBX_SUCCESS)
7657 goto out_unset_queue;
7658 rc = 0;
7659 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7660 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
7661 } else {
7662 /* We are a NVME Target mode with MRQ > 1 */
7663
7664 /* First register the FCFI */
7665 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7666 mboxq->vport = phba->pport;
7667 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7668 if (rc != MBX_SUCCESS)
7669 goto out_unset_queue;
7670 rc = 0;
7671 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7672 &mboxq->u.mqe.un.reg_fcfi_mrq);
7673
7674 /* Next register the MRQs */
7675 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7676 mboxq->vport = phba->pport;
7677 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7678 if (rc != MBX_SUCCESS)
7679 goto out_unset_queue;
7680 rc = 0;
895427bd
JS
7681 }
7682 /* Check if the port is configured to be disabled */
7683 lpfc_sli_read_link_ste(phba);
da0436e9
JS
7684 }
7685
c490850a
JS
7686 /* Don't post more new bufs if repost already recovered
7687 * the nvme sgls.
7688 */
7689 if (phba->nvmet_support == 0) {
7690 if (phba->sli4_hba.io_xri_cnt == 0) {
7691 len = lpfc_new_io_buf(
7692 phba, phba->sli4_hba.io_xri_max);
7693 if (len == 0) {
7694 rc = -ENOMEM;
7695 goto out_unset_queue;
7696 }
7697
7698 if (phba->cfg_xri_rebalancing)
7699 lpfc_create_multixri_pools(phba);
7700 }
7701 } else {
7702 phba->cfg_xri_rebalancing = 0;
7703 }
7704
da0436e9
JS
7705 /* Allow asynchronous mailbox command to go through */
7706 spin_lock_irq(&phba->hbalock);
7707 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7708 spin_unlock_irq(&phba->hbalock);
7709
7710 /* Post receive buffers to the device */
7711 lpfc_sli4_rb_setup(phba);
7712
fc2b989b
JS
7713 /* Reset HBA FCF states after HBA reset */
7714 phba->fcf.fcf_flag = 0;
7715 phba->fcf.current_rec.flag = 0;
7716
da0436e9 7717 /* Start the ELS watchdog timer */
8fa38513 7718 mod_timer(&vport->els_tmofunc,
256ec0d0 7719 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7720
7721 /* Start heart beat timer */
7722 mod_timer(&phba->hb_tmofunc,
256ec0d0 7723 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7724 phba->hb_outstanding = 0;
7725 phba->last_completion_time = jiffies;
7726
32517fc0
JS
7727 /* start eq_delay heartbeat */
7728 if (phba->cfg_auto_imax)
7729 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7730 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7731
da0436e9 7732 /* Start error attention (ERATT) polling timer */
256ec0d0 7733 mod_timer(&phba->eratt_poll,
65791f1f 7734 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7735
75baf696
JS
7736 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7737 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7738 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7739 if (!rc) {
7740 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7741 "2829 This device supports "
7742 "Advanced Error Reporting (AER)\n");
7743 spin_lock_irq(&phba->hbalock);
7744 phba->hba_flag |= HBA_AER_ENABLED;
7745 spin_unlock_irq(&phba->hbalock);
7746 } else {
7747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7748 "2830 This device does not support "
7749 "Advanced Error Reporting (AER)\n");
7750 phba->cfg_aer_support = 0;
7751 }
0a96e975 7752 rc = 0;
75baf696
JS
7753 }
7754
da0436e9
JS
7755 /*
7756 * The port is ready, set the host's link state to LINK_DOWN
7757 * in preparation for link interrupts.
7758 */
da0436e9
JS
7759 spin_lock_irq(&phba->hbalock);
7760 phba->link_state = LPFC_LINK_DOWN;
1dc5ec24
JS
7761
7762 /* Check if physical ports are trunked */
7763 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7764 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7765 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7766 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7767 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7768 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7769 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7770 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
da0436e9 7771 spin_unlock_irq(&phba->hbalock);
1dc5ec24 7772
e8869f5b
JS
7773 /* Arm the CQs and then EQs on device */
7774 lpfc_sli4_arm_cqeq_intr(phba);
7775
7776 /* Indicate device interrupt mode */
7777 phba->sli4_hba.intr_enable = 1;
7778
026abb87
JS
7779 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7780 (phba->hba_flag & LINK_DISABLED)) {
7781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7782 "3103 Adapter Link is disabled.\n");
7783 lpfc_down_link(phba, mboxq);
7784 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7785 if (rc != MBX_SUCCESS) {
7786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7787 "3104 Adapter failed to issue "
7788 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
c490850a 7789 goto out_io_buff_free;
026abb87
JS
7790 }
7791 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7792 /* don't perform init_link on SLI4 FC port loopback test */
7793 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7794 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7795 if (rc)
c490850a 7796 goto out_io_buff_free;
1b51197d 7797 }
5350d872
JS
7798 }
7799 mempool_free(mboxq, phba->mbox_mem_pool);
7800 return rc;
c490850a
JS
7801out_io_buff_free:
7802 /* Free allocated IO Buffers */
7803 lpfc_io_free(phba);
76a95d75 7804out_unset_queue:
da0436e9 7805 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7806 lpfc_sli4_queue_unset(phba);
7807out_destroy_queue:
6c621a22 7808 lpfc_free_iocb_list(phba);
5350d872 7809 lpfc_sli4_queue_destroy(phba);
da0436e9 7810out_stop_timers:
5350d872 7811 lpfc_stop_hba_timers(phba);
da0436e9
JS
7812out_free_mbox:
7813 mempool_free(mboxq, phba->mbox_mem_pool);
7814 return rc;
7815}
7816
7817/**
7818 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7819 * @ptr: context object - pointer to hba structure.
7820 *
7821 * This is the callback function for mailbox timer. The mailbox
7822 * timer is armed when a new mailbox command is issued and the timer
7823 * is deleted when the mailbox complete. The function is called by
7824 * the kernel timer code when a mailbox does not complete within
7825 * expected time. This function wakes up the worker thread to
7826 * process the mailbox timeout and returns. All the processing is
7827 * done by the worker thread function lpfc_mbox_timeout_handler.
7828 **/
7829void
f22eb4d3 7830lpfc_mbox_timeout(struct timer_list *t)
da0436e9 7831{
f22eb4d3 7832 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
da0436e9
JS
7833 unsigned long iflag;
7834 uint32_t tmo_posted;
7835
7836 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7837 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7838 if (!tmo_posted)
7839 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7840 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7841
7842 if (!tmo_posted)
7843 lpfc_worker_wake_up(phba);
7844 return;
7845}
7846
e8d3c3b1
JS
7847/**
7848 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7849 * are pending
7850 * @phba: Pointer to HBA context object.
7851 *
7852 * This function checks if any mailbox completions are present on the mailbox
7853 * completion queue.
7854 **/
3bb11fc5 7855static bool
e8d3c3b1
JS
7856lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7857{
7858
7859 uint32_t idx;
7860 struct lpfc_queue *mcq;
7861 struct lpfc_mcqe *mcqe;
7862 bool pending_completions = false;
7365f6fd 7863 uint8_t qe_valid;
e8d3c3b1
JS
7864
7865 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7866 return false;
7867
7868 /* Check for completions on mailbox completion queue */
7869
7870 mcq = phba->sli4_hba.mbx_cq;
7871 idx = mcq->hba_index;
7365f6fd 7872 qe_valid = mcq->qe_valid;
9afbee3d
JS
7873 while (bf_get_le32(lpfc_cqe_valid,
7874 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7875 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
e8d3c3b1
JS
7876 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7877 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7878 pending_completions = true;
7879 break;
7880 }
7881 idx = (idx + 1) % mcq->entry_count;
7882 if (mcq->hba_index == idx)
7883 break;
7365f6fd
JS
7884
7885 /* if the index wrapped around, toggle the valid bit */
7886 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7887 qe_valid = (qe_valid) ? 0 : 1;
e8d3c3b1
JS
7888 }
7889 return pending_completions;
7890
7891}
7892
7893/**
7894 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7895 * that were missed.
7896 * @phba: Pointer to HBA context object.
7897 *
7898 * For sli4, it is possible to miss an interrupt. As such mbox completions
7899 * maybe missed causing erroneous mailbox timeouts to occur. This function
7900 * checks to see if mbox completions are on the mailbox completion queue
7901 * and will process all the completions associated with the eq for the
7902 * mailbox completion queue.
7903 **/
d7b761b0 7904static bool
e8d3c3b1
JS
7905lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7906{
b71413dd 7907 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
e8d3c3b1
JS
7908 uint32_t eqidx;
7909 struct lpfc_queue *fpeq = NULL;
657add4e 7910 struct lpfc_queue *eq;
e8d3c3b1
JS
7911 bool mbox_pending;
7912
7913 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7914 return false;
7915
657add4e
JS
7916 /* Find the EQ associated with the mbox CQ */
7917 if (sli4_hba->hdwq) {
7918 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7919 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7920 if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7921 fpeq = eq;
e8d3c3b1
JS
7922 break;
7923 }
657add4e
JS
7924 }
7925 }
e8d3c3b1
JS
7926 if (!fpeq)
7927 return false;
7928
7929 /* Turn off interrupts from this EQ */
7930
b71413dd 7931 sli4_hba->sli4_eq_clr_intr(fpeq);
e8d3c3b1
JS
7932
7933 /* Check to see if a mbox completion is pending */
7934
7935 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7936
7937 /*
7938 * If a mbox completion is pending, process all the events on EQ
7939 * associated with the mbox completion queue (this could include
7940 * mailbox commands, async events, els commands, receive queue data
7941 * and fcp commands)
7942 */
7943
7944 if (mbox_pending)
32517fc0
JS
7945 /* process and rearm the EQ */
7946 lpfc_sli4_process_eq(phba, fpeq);
7947 else
7948 /* Always clear and re-arm the EQ */
7949 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
e8d3c3b1
JS
7950
7951 return mbox_pending;
7952
7953}
da0436e9
JS
7954
7955/**
7956 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7957 * @phba: Pointer to HBA context object.
7958 *
7959 * This function is called from worker thread when a mailbox command times out.
7960 * The caller is not required to hold any locks. This function will reset the
7961 * HBA and recover all the pending commands.
7962 **/
7963void
7964lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7965{
7966 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
7967 MAILBOX_t *mb = NULL;
7968
da0436e9 7969 struct lpfc_sli *psli = &phba->sli;
da0436e9 7970
e8d3c3b1
JS
7971 /* If the mailbox completed, process the completion and return */
7972 if (lpfc_sli4_process_missed_mbox_completions(phba))
7973 return;
7974
eb016566
JS
7975 if (pmbox != NULL)
7976 mb = &pmbox->u.mb;
da0436e9
JS
7977 /* Check the pmbox pointer first. There is a race condition
7978 * between the mbox timeout handler getting executed in the
7979 * worklist and the mailbox actually completing. When this
7980 * race condition occurs, the mbox_active will be NULL.
7981 */
7982 spin_lock_irq(&phba->hbalock);
7983 if (pmbox == NULL) {
7984 lpfc_printf_log(phba, KERN_WARNING,
7985 LOG_MBOX | LOG_SLI,
7986 "0353 Active Mailbox cleared - mailbox timeout "
7987 "exiting\n");
7988 spin_unlock_irq(&phba->hbalock);
7989 return;
7990 }
7991
7992 /* Mbox cmd <mbxCommand> timeout */
7993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
32350664 7994 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
da0436e9
JS
7995 mb->mbxCommand,
7996 phba->pport->port_state,
7997 phba->sli.sli_flag,
7998 phba->sli.mbox_active);
7999 spin_unlock_irq(&phba->hbalock);
8000
8001 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8002 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 8003 * it to fail all outstanding SCSI IO.
da0436e9
JS
8004 */
8005 spin_lock_irq(&phba->pport->work_port_lock);
8006 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8007 spin_unlock_irq(&phba->pport->work_port_lock);
8008 spin_lock_irq(&phba->hbalock);
8009 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 8010 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
8011 spin_unlock_irq(&phba->hbalock);
8012
db55fba8 8013 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
8014
8015 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8016 "0345 Resetting board due to mailbox timeout\n");
8017
8018 /* Reset the HBA device */
8019 lpfc_reset_hba(phba);
8020}
8021
8022/**
8023 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8024 * @phba: Pointer to HBA context object.
8025 * @pmbox: Pointer to mailbox object.
8026 * @flag: Flag indicating how the mailbox need to be processed.
8027 *
8028 * This function is called by discovery code and HBA management code
8029 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8030 * function gets the hbalock to protect the data structures.
8031 * The mailbox command can be submitted in polling mode, in which case
8032 * this function will wait in a polling loop for the completion of the
8033 * mailbox.
8034 * If the mailbox is submitted in no_wait mode (not polling) the
8035 * function will submit the command and returns immediately without waiting
8036 * for the mailbox completion. The no_wait is supported only when HBA
8037 * is in SLI2/SLI3 mode - interrupts are enabled.
8038 * The SLI interface allows only one mailbox pending at a time. If the
8039 * mailbox is issued in polling mode and there is already a mailbox
8040 * pending, then the function will return an error. If the mailbox is issued
8041 * in NO_WAIT mode and there is a mailbox pending already, the function
8042 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8043 * The sli layer owns the mailbox object until the completion of mailbox
8044 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8045 * return codes the caller owns the mailbox command after the return of
8046 * the function.
e59058c4 8047 **/
3772a991
JS
8048static int
8049lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8050 uint32_t flag)
dea3101e 8051{
bf07bdea 8052 MAILBOX_t *mbx;
2e0fef85 8053 struct lpfc_sli *psli = &phba->sli;
dea3101e 8054 uint32_t status, evtctr;
9940b97b 8055 uint32_t ha_copy, hc_copy;
dea3101e 8056 int i;
09372820 8057 unsigned long timeout;
dea3101e 8058 unsigned long drvr_flag = 0;
34b02dcd 8059 uint32_t word0, ldata;
dea3101e 8060 void __iomem *to_slim;
58da1ffb
JS
8061 int processing_queue = 0;
8062
8063 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8064 if (!pmbox) {
8568a4d2 8065 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 8066 /* processing mbox queue from intr_handler */
3772a991
JS
8067 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8068 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8069 return MBX_SUCCESS;
8070 }
58da1ffb 8071 processing_queue = 1;
58da1ffb
JS
8072 pmbox = lpfc_mbox_get(phba);
8073 if (!pmbox) {
8074 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8075 return MBX_SUCCESS;
8076 }
8077 }
dea3101e 8078
ed957684 8079 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 8080 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 8081 if(!pmbox->vport) {
58da1ffb 8082 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 8083 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 8084 LOG_MBOX | LOG_VPORT,
e8b62011 8085 "1806 Mbox x%x failed. No vport\n",
3772a991 8086 pmbox->u.mb.mbxCommand);
ed957684 8087 dump_stack();
58da1ffb 8088 goto out_not_finished;
ed957684
JS
8089 }
8090 }
8091
8d63f375 8092 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
8093 if (unlikely(pci_channel_offline(phba->pcidev))) {
8094 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8095 goto out_not_finished;
8096 }
8d63f375 8097
a257bf90
JS
8098 /* If HBA has a deferred error attention, fail the iocb. */
8099 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8100 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8101 goto out_not_finished;
8102 }
8103
dea3101e 8104 psli = &phba->sli;
92d7f7b0 8105
bf07bdea 8106 mbx = &pmbox->u.mb;
dea3101e 8107 status = MBX_SUCCESS;
8108
2e0fef85
JS
8109 if (phba->link_state == LPFC_HBA_ERROR) {
8110 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
8111
8112 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8113 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8114 "(%d):0311 Mailbox command x%x cannot "
8115 "issue Data: x%x x%x\n",
8116 pmbox->vport ? pmbox->vport->vpi : 0,
8117 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 8118 goto out_not_finished;
41415862
JW
8119 }
8120
bf07bdea 8121 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
8122 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8123 !(hc_copy & HC_MBINT_ENA)) {
8124 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8125 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
8126 "(%d):2528 Mailbox command x%x cannot "
8127 "issue Data: x%x x%x\n",
8128 pmbox->vport ? pmbox->vport->vpi : 0,
8129 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
8130 goto out_not_finished;
8131 }
9290831f
JS
8132 }
8133
dea3101e 8134 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8135 /* Polling for a mbox command when another one is already active
8136 * is not allowed in SLI. Also, the driver must have established
8137 * SLI2 mode to queue and process multiple mbox commands.
8138 */
8139
8140 if (flag & MBX_POLL) {
2e0fef85 8141 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8142
8143 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8145 "(%d):2529 Mailbox command x%x "
8146 "cannot issue Data: x%x x%x\n",
8147 pmbox->vport ? pmbox->vport->vpi : 0,
8148 pmbox->u.mb.mbxCommand,
8149 psli->sli_flag, flag);
58da1ffb 8150 goto out_not_finished;
dea3101e 8151 }
8152
3772a991 8153 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 8154 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8155 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8157 "(%d):2530 Mailbox command x%x "
8158 "cannot issue Data: x%x x%x\n",
8159 pmbox->vport ? pmbox->vport->vpi : 0,
8160 pmbox->u.mb.mbxCommand,
8161 psli->sli_flag, flag);
58da1ffb 8162 goto out_not_finished;
dea3101e 8163 }
8164
dea3101e 8165 /* Another mailbox command is still being processed, queue this
8166 * command to be processed later.
8167 */
8168 lpfc_mbox_put(phba, pmbox);
8169
8170 /* Mbox cmd issue - BUSY */
ed957684 8171 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8172 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 8173 "x%x x%x x%x x%x\n",
92d7f7b0 8174 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
e92974f6
JS
8175 mbx->mbxCommand,
8176 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8177 psli->sli_flag, flag);
dea3101e 8178
8179 psli->slistat.mbox_busy++;
2e0fef85 8180 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8181
858c9f6c
JS
8182 if (pmbox->vport) {
8183 lpfc_debugfs_disc_trc(pmbox->vport,
8184 LPFC_DISC_TRC_MBOX_VPORT,
8185 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8186 (uint32_t)mbx->mbxCommand,
8187 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8188 }
8189 else {
8190 lpfc_debugfs_disc_trc(phba->pport,
8191 LPFC_DISC_TRC_MBOX,
8192 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8193 (uint32_t)mbx->mbxCommand,
8194 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8195 }
8196
2e0fef85 8197 return MBX_BUSY;
dea3101e 8198 }
8199
dea3101e 8200 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8201
8202 /* If we are not polling, we MUST be in SLI2 mode */
8203 if (flag != MBX_POLL) {
3772a991 8204 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 8205 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 8206 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8207 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8208 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8209 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8210 "(%d):2531 Mailbox command x%x "
8211 "cannot issue Data: x%x x%x\n",
8212 pmbox->vport ? pmbox->vport->vpi : 0,
8213 pmbox->u.mb.mbxCommand,
8214 psli->sli_flag, flag);
58da1ffb 8215 goto out_not_finished;
dea3101e 8216 }
8217 /* timeout active mbox command */
256ec0d0
JS
8218 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8219 1000);
8220 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 8221 }
8222
8223 /* Mailbox cmd <cmd> issue */
ed957684 8224 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8225 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 8226 "x%x\n",
e8b62011 8227 pmbox->vport ? pmbox->vport->vpi : 0,
e92974f6
JS
8228 mbx->mbxCommand,
8229 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8230 psli->sli_flag, flag);
dea3101e 8231
bf07bdea 8232 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
8233 if (pmbox->vport) {
8234 lpfc_debugfs_disc_trc(pmbox->vport,
8235 LPFC_DISC_TRC_MBOX_VPORT,
8236 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8237 (uint32_t)mbx->mbxCommand,
8238 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8239 }
8240 else {
8241 lpfc_debugfs_disc_trc(phba->pport,
8242 LPFC_DISC_TRC_MBOX,
8243 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8244 (uint32_t)mbx->mbxCommand,
8245 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8246 }
8247 }
8248
dea3101e 8249 psli->slistat.mbox_cmd++;
8250 evtctr = psli->slistat.mbox_event;
8251
8252 /* next set own bit for the adapter and copy over command word */
bf07bdea 8253 mbx->mbxOwner = OWN_CHIP;
dea3101e 8254
3772a991 8255 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
8256 /* Populate mbox extension offset word. */
8257 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 8258 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8259 = (uint8_t *)phba->mbox_ext
8260 - (uint8_t *)phba->mbox;
8261 }
8262
8263 /* Copy the mailbox extension data */
3e1f0718
JS
8264 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8265 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8266 (uint8_t *)phba->mbox_ext,
8267 pmbox->in_ext_byte_len);
7a470277
JS
8268 }
8269 /* Copy command data to host SLIM area */
bf07bdea 8270 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 8271 } else {
7a470277
JS
8272 /* Populate mbox extension offset word. */
8273 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 8274 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8275 = MAILBOX_HBA_EXT_OFFSET;
8276
8277 /* Copy the mailbox extension data */
3e1f0718 8278 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
7a470277
JS
8279 lpfc_memcpy_to_slim(phba->MBslimaddr +
8280 MAILBOX_HBA_EXT_OFFSET,
3e1f0718 8281 pmbox->ctx_buf, pmbox->in_ext_byte_len);
7a470277 8282
895427bd 8283 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8284 /* copy command data into host mbox for cmpl */
895427bd
JS
8285 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8286 MAILBOX_CMD_SIZE);
dea3101e 8287
8288 /* First copy mbox command data to HBA SLIM, skip past first
8289 word */
8290 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 8291 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 8292 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8293
8294 /* Next copy over first word, with mbxOwner set */
bf07bdea 8295 ldata = *((uint32_t *)mbx);
dea3101e 8296 to_slim = phba->MBslimaddr;
8297 writel(ldata, to_slim);
8298 readl(to_slim); /* flush */
8299
895427bd 8300 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8301 /* switch over to host mailbox */
3772a991 8302 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 8303 }
8304
8305 wmb();
dea3101e 8306
8307 switch (flag) {
8308 case MBX_NOWAIT:
09372820 8309 /* Set up reference to mailbox command */
dea3101e 8310 psli->mbox_active = pmbox;
09372820
JS
8311 /* Interrupt board to do it */
8312 writel(CA_MBATT, phba->CAregaddr);
8313 readl(phba->CAregaddr); /* flush */
8314 /* Don't wait for it to finish, just return */
dea3101e 8315 break;
8316
8317 case MBX_POLL:
09372820 8318 /* Set up null reference to mailbox command */
dea3101e 8319 psli->mbox_active = NULL;
09372820
JS
8320 /* Interrupt board to do it */
8321 writel(CA_MBATT, phba->CAregaddr);
8322 readl(phba->CAregaddr); /* flush */
8323
3772a991 8324 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8325 /* First read mbox status word */
34b02dcd 8326 word0 = *((uint32_t *)phba->mbox);
dea3101e 8327 word0 = le32_to_cpu(word0);
8328 } else {
8329 /* First read mbox status word */
9940b97b
JS
8330 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8331 spin_unlock_irqrestore(&phba->hbalock,
8332 drvr_flag);
8333 goto out_not_finished;
8334 }
dea3101e 8335 }
8336
8337 /* Read the HBA Host Attention Register */
9940b97b
JS
8338 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8339 spin_unlock_irqrestore(&phba->hbalock,
8340 drvr_flag);
8341 goto out_not_finished;
8342 }
a183a15f
JS
8343 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8344 1000) + jiffies;
09372820 8345 i = 0;
dea3101e 8346 /* Wait for command to complete */
41415862
JW
8347 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8348 (!(ha_copy & HA_MBATT) &&
2e0fef85 8349 (phba->link_state > LPFC_WARM_START))) {
09372820 8350 if (time_after(jiffies, timeout)) {
dea3101e 8351 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8352 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 8353 drvr_flag);
58da1ffb 8354 goto out_not_finished;
dea3101e 8355 }
8356
8357 /* Check if we took a mbox interrupt while we were
8358 polling */
8359 if (((word0 & OWN_CHIP) != OWN_CHIP)
8360 && (evtctr != psli->slistat.mbox_event))
8361 break;
8362
09372820
JS
8363 if (i++ > 10) {
8364 spin_unlock_irqrestore(&phba->hbalock,
8365 drvr_flag);
8366 msleep(1);
8367 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8368 }
dea3101e 8369
3772a991 8370 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8371 /* First copy command data */
34b02dcd 8372 word0 = *((uint32_t *)phba->mbox);
dea3101e 8373 word0 = le32_to_cpu(word0);
bf07bdea 8374 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 8375 MAILBOX_t *slimmb;
34b02dcd 8376 uint32_t slimword0;
dea3101e 8377 /* Check real SLIM for any errors */
8378 slimword0 = readl(phba->MBslimaddr);
8379 slimmb = (MAILBOX_t *) & slimword0;
8380 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8381 && slimmb->mbxStatus) {
8382 psli->sli_flag &=
3772a991 8383 ~LPFC_SLI_ACTIVE;
dea3101e 8384 word0 = slimword0;
8385 }
8386 }
8387 } else {
8388 /* First copy command data */
8389 word0 = readl(phba->MBslimaddr);
8390 }
8391 /* Read the HBA Host Attention Register */
9940b97b
JS
8392 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8393 spin_unlock_irqrestore(&phba->hbalock,
8394 drvr_flag);
8395 goto out_not_finished;
8396 }
dea3101e 8397 }
8398
3772a991 8399 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8400 /* copy results back to user */
2ea259ee
JS
8401 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8402 MAILBOX_CMD_SIZE);
7a470277 8403 /* Copy the mailbox extension data */
3e1f0718 8404 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
7a470277 8405 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
3e1f0718 8406 pmbox->ctx_buf,
7a470277
JS
8407 pmbox->out_ext_byte_len);
8408 }
dea3101e 8409 } else {
8410 /* First copy command data */
bf07bdea 8411 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 8412 MAILBOX_CMD_SIZE);
7a470277 8413 /* Copy the mailbox extension data */
3e1f0718
JS
8414 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8415 lpfc_memcpy_from_slim(
8416 pmbox->ctx_buf,
7a470277
JS
8417 phba->MBslimaddr +
8418 MAILBOX_HBA_EXT_OFFSET,
8419 pmbox->out_ext_byte_len);
dea3101e 8420 }
8421 }
8422
8423 writel(HA_MBATT, phba->HAregaddr);
8424 readl(phba->HAregaddr); /* flush */
8425
8426 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 8427 status = mbx->mbxStatus;
dea3101e 8428 }
8429
2e0fef85
JS
8430 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8431 return status;
58da1ffb
JS
8432
8433out_not_finished:
8434 if (processing_queue) {
da0436e9 8435 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
8436 lpfc_mbox_cmpl_put(phba, pmbox);
8437 }
8438 return MBX_NOT_FINISHED;
dea3101e 8439}
8440
f1126688
JS
8441/**
8442 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8443 * @phba: Pointer to HBA context object.
8444 *
8445 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8446 * the driver internal pending mailbox queue. It will then try to wait out the
8447 * possible outstanding mailbox command before return.
8448 *
8449 * Returns:
8450 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8451 * the outstanding mailbox command timed out.
8452 **/
8453static int
8454lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8455{
8456 struct lpfc_sli *psli = &phba->sli;
f1126688 8457 int rc = 0;
a183a15f 8458 unsigned long timeout = 0;
f1126688
JS
8459
8460 /* Mark the asynchronous mailbox command posting as blocked */
8461 spin_lock_irq(&phba->hbalock);
8462 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
8463 /* Determine how long we might wait for the active mailbox
8464 * command to be gracefully completed by firmware.
8465 */
a183a15f
JS
8466 if (phba->sli.mbox_active)
8467 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8468 phba->sli.mbox_active) *
8469 1000) + jiffies;
8470 spin_unlock_irq(&phba->hbalock);
8471
e8d3c3b1
JS
8472 /* Make sure the mailbox is really active */
8473 if (timeout)
8474 lpfc_sli4_process_missed_mbox_completions(phba);
8475
f1126688
JS
8476 /* Wait for the outstnading mailbox command to complete */
8477 while (phba->sli.mbox_active) {
8478 /* Check active mailbox complete status every 2ms */
8479 msleep(2);
8480 if (time_after(jiffies, timeout)) {
8481 /* Timeout, marked the outstanding cmd not complete */
8482 rc = 1;
8483 break;
8484 }
8485 }
8486
8487 /* Can not cleanly block async mailbox command, fails it */
8488 if (rc) {
8489 spin_lock_irq(&phba->hbalock);
8490 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8491 spin_unlock_irq(&phba->hbalock);
8492 }
8493 return rc;
8494}
8495
8496/**
8497 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8498 * @phba: Pointer to HBA context object.
8499 *
8500 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8501 * commands from the driver internal pending mailbox queue. It makes sure
8502 * that there is no outstanding mailbox command before resuming posting
8503 * asynchronous mailbox commands. If, for any reason, there is outstanding
8504 * mailbox command, it will try to wait it out before resuming asynchronous
8505 * mailbox command posting.
8506 **/
8507static void
8508lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8509{
8510 struct lpfc_sli *psli = &phba->sli;
8511
8512 spin_lock_irq(&phba->hbalock);
8513 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8514 /* Asynchronous mailbox posting is not blocked, do nothing */
8515 spin_unlock_irq(&phba->hbalock);
8516 return;
8517 }
8518
8519 /* Outstanding synchronous mailbox command is guaranteed to be done,
8520 * successful or timeout, after timing-out the outstanding mailbox
8521 * command shall always be removed, so just unblock posting async
8522 * mailbox command and resume
8523 */
8524 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8525 spin_unlock_irq(&phba->hbalock);
8526
8527 /* wake up worker thread to post asynchronlous mailbox command */
8528 lpfc_worker_wake_up(phba);
8529}
8530
2d843edc
JS
8531/**
8532 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8533 * @phba: Pointer to HBA context object.
8534 * @mboxq: Pointer to mailbox object.
8535 *
8536 * The function waits for the bootstrap mailbox register ready bit from
8537 * port for twice the regular mailbox command timeout value.
8538 *
8539 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8540 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8541 **/
8542static int
8543lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8544{
8545 uint32_t db_ready;
8546 unsigned long timeout;
8547 struct lpfc_register bmbx_reg;
8548
8549 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8550 * 1000) + jiffies;
8551
8552 do {
8553 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8554 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8555 if (!db_ready)
e2ffe4d5 8556 mdelay(2);
2d843edc
JS
8557
8558 if (time_after(jiffies, timeout))
8559 return MBXERR_ERROR;
8560 } while (!db_ready);
8561
8562 return 0;
8563}
8564
da0436e9
JS
8565/**
8566 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8567 * @phba: Pointer to HBA context object.
8568 * @mboxq: Pointer to mailbox object.
8569 *
8570 * The function posts a mailbox to the port. The mailbox is expected
8571 * to be comletely filled in and ready for the port to operate on it.
8572 * This routine executes a synchronous completion operation on the
8573 * mailbox by polling for its completion.
8574 *
8575 * The caller must not be holding any locks when calling this routine.
8576 *
8577 * Returns:
8578 * MBX_SUCCESS - mailbox posted successfully
8579 * Any of the MBX error values.
8580 **/
8581static int
8582lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8583{
8584 int rc = MBX_SUCCESS;
8585 unsigned long iflag;
da0436e9
JS
8586 uint32_t mcqe_status;
8587 uint32_t mbx_cmnd;
da0436e9
JS
8588 struct lpfc_sli *psli = &phba->sli;
8589 struct lpfc_mqe *mb = &mboxq->u.mqe;
8590 struct lpfc_bmbx_create *mbox_rgn;
8591 struct dma_address *dma_address;
da0436e9
JS
8592
8593 /*
8594 * Only one mailbox can be active to the bootstrap mailbox region
8595 * at a time and there is no queueing provided.
8596 */
8597 spin_lock_irqsave(&phba->hbalock, iflag);
8598 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8599 spin_unlock_irqrestore(&phba->hbalock, iflag);
8600 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8601 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8602 "cannot issue Data: x%x x%x\n",
8603 mboxq->vport ? mboxq->vport->vpi : 0,
8604 mboxq->u.mb.mbxCommand,
a183a15f
JS
8605 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8606 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8607 psli->sli_flag, MBX_POLL);
8608 return MBXERR_ERROR;
8609 }
8610 /* The server grabs the token and owns it until release */
8611 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8612 phba->sli.mbox_active = mboxq;
8613 spin_unlock_irqrestore(&phba->hbalock, iflag);
8614
2d843edc
JS
8615 /* wait for bootstrap mbox register for readyness */
8616 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8617 if (rc)
8618 goto exit;
da0436e9
JS
8619 /*
8620 * Initialize the bootstrap memory region to avoid stale data areas
8621 * in the mailbox post. Then copy the caller's mailbox contents to
8622 * the bmbx mailbox region.
8623 */
8624 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8625 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
48f8fdb4
JS
8626 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8627 sizeof(struct lpfc_mqe));
da0436e9
JS
8628
8629 /* Post the high mailbox dma address to the port and wait for ready. */
8630 dma_address = &phba->sli4_hba.bmbx.dma_address;
8631 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8632
2d843edc
JS
8633 /* wait for bootstrap mbox register for hi-address write done */
8634 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8635 if (rc)
8636 goto exit;
da0436e9
JS
8637
8638 /* Post the low mailbox dma address to the port. */
8639 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 8640
2d843edc
JS
8641 /* wait for bootstrap mbox register for low address write done */
8642 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8643 if (rc)
8644 goto exit;
da0436e9
JS
8645
8646 /*
8647 * Read the CQ to ensure the mailbox has completed.
8648 * If so, update the mailbox status so that the upper layers
8649 * can complete the request normally.
8650 */
48f8fdb4
JS
8651 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8652 sizeof(struct lpfc_mqe));
da0436e9 8653 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
48f8fdb4
JS
8654 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8655 sizeof(struct lpfc_mcqe));
da0436e9 8656 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
8657 /*
8658 * When the CQE status indicates a failure and the mailbox status
8659 * indicates success then copy the CQE status into the mailbox status
8660 * (and prefix it with x4000).
8661 */
da0436e9 8662 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
8663 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8664 bf_set(lpfc_mqe_status, mb,
8665 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 8666 rc = MBXERR_ERROR;
d7c47992
JS
8667 } else
8668 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
8669
8670 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8671 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
8672 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8673 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
8674 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8675 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8676 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8677 bf_get(lpfc_mqe_status, mb),
8678 mb->un.mb_words[0], mb->un.mb_words[1],
8679 mb->un.mb_words[2], mb->un.mb_words[3],
8680 mb->un.mb_words[4], mb->un.mb_words[5],
8681 mb->un.mb_words[6], mb->un.mb_words[7],
8682 mb->un.mb_words[8], mb->un.mb_words[9],
8683 mb->un.mb_words[10], mb->un.mb_words[11],
8684 mb->un.mb_words[12], mboxq->mcqe.word0,
8685 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8686 mboxq->mcqe.trailer);
8687exit:
8688 /* We are holding the token, no needed for lock when release */
8689 spin_lock_irqsave(&phba->hbalock, iflag);
8690 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8691 phba->sli.mbox_active = NULL;
8692 spin_unlock_irqrestore(&phba->hbalock, iflag);
8693 return rc;
8694}
8695
8696/**
8697 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8698 * @phba: Pointer to HBA context object.
8699 * @pmbox: Pointer to mailbox object.
8700 * @flag: Flag indicating how the mailbox need to be processed.
8701 *
8702 * This function is called by discovery code and HBA management code to submit
8703 * a mailbox command to firmware with SLI-4 interface spec.
8704 *
8705 * Return codes the caller owns the mailbox command after the return of the
8706 * function.
8707 **/
8708static int
8709lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8710 uint32_t flag)
8711{
8712 struct lpfc_sli *psli = &phba->sli;
8713 unsigned long iflags;
8714 int rc;
8715
b76f2dc9
JS
8716 /* dump from issue mailbox command if setup */
8717 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8718
8fa38513
JS
8719 rc = lpfc_mbox_dev_check(phba);
8720 if (unlikely(rc)) {
8721 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8722 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
8723 "cannot issue Data: x%x x%x\n",
8724 mboxq->vport ? mboxq->vport->vpi : 0,
8725 mboxq->u.mb.mbxCommand,
a183a15f
JS
8726 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8727 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
8728 psli->sli_flag, flag);
8729 goto out_not_finished;
8730 }
8731
da0436e9
JS
8732 /* Detect polling mode and jump to a handler */
8733 if (!phba->sli4_hba.intr_enable) {
8734 if (flag == MBX_POLL)
8735 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8736 else
8737 rc = -EIO;
8738 if (rc != MBX_SUCCESS)
0558056c 8739 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8740 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8741 "(x%x/x%x) failure: "
8742 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8743 "Data: x%x x%x\n,",
da0436e9
JS
8744 mboxq->vport ? mboxq->vport->vpi : 0,
8745 mboxq->u.mb.mbxCommand,
a183a15f
JS
8746 lpfc_sli_config_mbox_subsys_get(phba,
8747 mboxq),
8748 lpfc_sli_config_mbox_opcode_get(phba,
8749 mboxq),
cc459f19
JS
8750 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8751 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8752 bf_get(lpfc_mcqe_ext_status,
8753 &mboxq->mcqe),
da0436e9
JS
8754 psli->sli_flag, flag);
8755 return rc;
8756 } else if (flag == MBX_POLL) {
f1126688
JS
8757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8758 "(%d):2542 Try to issue mailbox command "
7365f6fd 8759 "x%x (x%x/x%x) synchronously ahead of async "
f1126688 8760 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8761 mboxq->vport ? mboxq->vport->vpi : 0,
8762 mboxq->u.mb.mbxCommand,
a183a15f
JS
8763 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8764 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8765 psli->sli_flag, flag);
f1126688
JS
8766 /* Try to block the asynchronous mailbox posting */
8767 rc = lpfc_sli4_async_mbox_block(phba);
8768 if (!rc) {
8769 /* Successfully blocked, now issue sync mbox cmd */
8770 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8771 if (rc != MBX_SUCCESS)
cc459f19 8772 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8773 LOG_MBOX | LOG_SLI,
cc459f19
JS
8774 "(%d):2597 Sync Mailbox command "
8775 "x%x (x%x/x%x) failure: "
8776 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8777 "Data: x%x x%x\n,",
8778 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8779 mboxq->u.mb.mbxCommand,
8780 lpfc_sli_config_mbox_subsys_get(phba,
8781 mboxq),
8782 lpfc_sli_config_mbox_opcode_get(phba,
8783 mboxq),
cc459f19
JS
8784 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8785 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8786 bf_get(lpfc_mcqe_ext_status,
8787 &mboxq->mcqe),
a183a15f 8788 psli->sli_flag, flag);
f1126688
JS
8789 /* Unblock the async mailbox posting afterward */
8790 lpfc_sli4_async_mbox_unblock(phba);
8791 }
8792 return rc;
da0436e9
JS
8793 }
8794
8795 /* Now, interrupt mode asynchrous mailbox command */
8796 rc = lpfc_mbox_cmd_check(phba, mboxq);
8797 if (rc) {
8798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8799 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8800 "cannot issue Data: x%x x%x\n",
8801 mboxq->vport ? mboxq->vport->vpi : 0,
8802 mboxq->u.mb.mbxCommand,
a183a15f
JS
8803 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8804 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8805 psli->sli_flag, flag);
8806 goto out_not_finished;
8807 }
da0436e9
JS
8808
8809 /* Put the mailbox command to the driver internal FIFO */
8810 psli->slistat.mbox_busy++;
8811 spin_lock_irqsave(&phba->hbalock, iflags);
8812 lpfc_mbox_put(phba, mboxq);
8813 spin_unlock_irqrestore(&phba->hbalock, iflags);
8814 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8815 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8816 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8817 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8818 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8819 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8820 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8821 phba->pport->port_state,
8822 psli->sli_flag, MBX_NOWAIT);
8823 /* Wake up worker thread to transport mailbox command from head */
8824 lpfc_worker_wake_up(phba);
8825
8826 return MBX_BUSY;
8827
8828out_not_finished:
8829 return MBX_NOT_FINISHED;
8830}
8831
8832/**
8833 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8834 * @phba: Pointer to HBA context object.
8835 *
8836 * This function is called by worker thread to send a mailbox command to
8837 * SLI4 HBA firmware.
8838 *
8839 **/
8840int
8841lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8842{
8843 struct lpfc_sli *psli = &phba->sli;
8844 LPFC_MBOXQ_t *mboxq;
8845 int rc = MBX_SUCCESS;
8846 unsigned long iflags;
8847 struct lpfc_mqe *mqe;
8848 uint32_t mbx_cmnd;
8849
8850 /* Check interrupt mode before post async mailbox command */
8851 if (unlikely(!phba->sli4_hba.intr_enable))
8852 return MBX_NOT_FINISHED;
8853
8854 /* Check for mailbox command service token */
8855 spin_lock_irqsave(&phba->hbalock, iflags);
8856 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8857 spin_unlock_irqrestore(&phba->hbalock, iflags);
8858 return MBX_NOT_FINISHED;
8859 }
8860 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8861 spin_unlock_irqrestore(&phba->hbalock, iflags);
8862 return MBX_NOT_FINISHED;
8863 }
8864 if (unlikely(phba->sli.mbox_active)) {
8865 spin_unlock_irqrestore(&phba->hbalock, iflags);
8866 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8867 "0384 There is pending active mailbox cmd\n");
8868 return MBX_NOT_FINISHED;
8869 }
8870 /* Take the mailbox command service token */
8871 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8872
8873 /* Get the next mailbox command from head of queue */
8874 mboxq = lpfc_mbox_get(phba);
8875
8876 /* If no more mailbox command waiting for post, we're done */
8877 if (!mboxq) {
8878 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8879 spin_unlock_irqrestore(&phba->hbalock, iflags);
8880 return MBX_SUCCESS;
8881 }
8882 phba->sli.mbox_active = mboxq;
8883 spin_unlock_irqrestore(&phba->hbalock, iflags);
8884
8885 /* Check device readiness for posting mailbox command */
8886 rc = lpfc_mbox_dev_check(phba);
8887 if (unlikely(rc))
8888 /* Driver clean routine will clean up pending mailbox */
8889 goto out_not_finished;
8890
8891 /* Prepare the mbox command to be posted */
8892 mqe = &mboxq->u.mqe;
8893 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8894
8895 /* Start timer for the mbox_tmo and log some mailbox post messages */
8896 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 8897 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
8898
8899 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8900 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
8901 "x%x x%x\n",
8902 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
8903 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8904 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8905 phba->pport->port_state, psli->sli_flag);
8906
8907 if (mbx_cmnd != MBX_HEARTBEAT) {
8908 if (mboxq->vport) {
8909 lpfc_debugfs_disc_trc(mboxq->vport,
8910 LPFC_DISC_TRC_MBOX_VPORT,
8911 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8912 mbx_cmnd, mqe->un.mb_words[0],
8913 mqe->un.mb_words[1]);
8914 } else {
8915 lpfc_debugfs_disc_trc(phba->pport,
8916 LPFC_DISC_TRC_MBOX,
8917 "MBOX Send: cmd:x%x mb:x%x x%x",
8918 mbx_cmnd, mqe->un.mb_words[0],
8919 mqe->un.mb_words[1]);
8920 }
8921 }
8922 psli->slistat.mbox_cmd++;
8923
8924 /* Post the mailbox command to the port */
8925 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8926 if (rc != MBX_SUCCESS) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8928 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8929 "cannot issue Data: x%x x%x\n",
8930 mboxq->vport ? mboxq->vport->vpi : 0,
8931 mboxq->u.mb.mbxCommand,
a183a15f
JS
8932 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8933 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8934 psli->sli_flag, MBX_NOWAIT);
8935 goto out_not_finished;
8936 }
8937
8938 return rc;
8939
8940out_not_finished:
8941 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
8942 if (phba->sli.mbox_active) {
8943 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8944 __lpfc_mbox_cmpl_put(phba, mboxq);
8945 /* Release the token */
8946 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8947 phba->sli.mbox_active = NULL;
8948 }
da0436e9
JS
8949 spin_unlock_irqrestore(&phba->hbalock, iflags);
8950
8951 return MBX_NOT_FINISHED;
8952}
8953
8954/**
8955 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8956 * @phba: Pointer to HBA context object.
8957 * @pmbox: Pointer to mailbox object.
8958 * @flag: Flag indicating how the mailbox need to be processed.
8959 *
8960 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8961 * the API jump table function pointer from the lpfc_hba struct.
8962 *
8963 * Return codes the caller owns the mailbox command after the return of the
8964 * function.
8965 **/
8966int
8967lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8968{
8969 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8970}
8971
8972/**
25985edc 8973 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
8974 * @phba: The hba struct for which this call is being executed.
8975 * @dev_grp: The HBA PCI-Device group number.
8976 *
8977 * This routine sets up the mbox interface API function jump table in @phba
8978 * struct.
8979 * Returns: 0 - success, -ENODEV - failure.
8980 **/
8981int
8982lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8983{
8984
8985 switch (dev_grp) {
8986 case LPFC_PCI_DEV_LP:
8987 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8988 phba->lpfc_sli_handle_slow_ring_event =
8989 lpfc_sli_handle_slow_ring_event_s3;
8990 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8991 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8992 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8993 break;
8994 case LPFC_PCI_DEV_OC:
8995 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8996 phba->lpfc_sli_handle_slow_ring_event =
8997 lpfc_sli_handle_slow_ring_event_s4;
8998 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8999 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9000 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9001 break;
9002 default:
9003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9004 "1420 Invalid HBA PCI-device group: 0x%x\n",
9005 dev_grp);
9006 return -ENODEV;
9007 break;
9008 }
9009 return 0;
9010}
9011
e59058c4 9012/**
3621a710 9013 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
9014 * @phba: Pointer to HBA context object.
9015 * @pring: Pointer to driver SLI ring object.
9016 * @piocb: Pointer to address of newly added command iocb.
9017 *
9018 * This function is called with hbalock held to add a command
9019 * iocb to the txq when SLI layer cannot submit the command iocb
9020 * to the ring.
9021 **/
2a9bf3d0 9022void
92d7f7b0 9023__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9024 struct lpfc_iocbq *piocb)
dea3101e 9025{
1c2ba475 9026 lockdep_assert_held(&phba->hbalock);
dea3101e 9027 /* Insert the caller's iocb in the txq tail for later processing. */
9028 list_add_tail(&piocb->list, &pring->txq);
dea3101e 9029}
9030
e59058c4 9031/**
3621a710 9032 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
9033 * @phba: Pointer to HBA context object.
9034 * @pring: Pointer to driver SLI ring object.
9035 * @piocb: Pointer to address of newly added command iocb.
9036 *
9037 * This function is called with hbalock held before a new
9038 * iocb is submitted to the firmware. This function checks
9039 * txq to flush the iocbs in txq to Firmware before
9040 * submitting new iocbs to the Firmware.
9041 * If there are iocbs in the txq which need to be submitted
9042 * to firmware, lpfc_sli_next_iocb returns the first element
9043 * of the txq after dequeuing it from txq.
9044 * If there is no iocb in the txq then the function will return
9045 * *piocb and *piocb is set to NULL. Caller needs to check
9046 * *piocb to find if there are more commands in the txq.
9047 **/
dea3101e 9048static struct lpfc_iocbq *
9049lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 9050 struct lpfc_iocbq **piocb)
dea3101e 9051{
9052 struct lpfc_iocbq * nextiocb;
9053
1c2ba475
JT
9054 lockdep_assert_held(&phba->hbalock);
9055
dea3101e 9056 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9057 if (!nextiocb) {
9058 nextiocb = *piocb;
9059 *piocb = NULL;
9060 }
9061
9062 return nextiocb;
9063}
9064
e59058c4 9065/**
3772a991 9066 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 9067 * @phba: Pointer to HBA context object.
3772a991 9068 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
9069 * @piocb: Pointer to command iocb.
9070 * @flag: Flag indicating if this command can be put into txq.
9071 *
3772a991
JS
9072 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9073 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9074 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9075 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9076 * this function allows only iocbs for posting buffers. This function finds
9077 * next available slot in the command ring and posts the command to the
9078 * available slot and writes the port attention register to request HBA start
9079 * processing new iocb. If there is no slot available in the ring and
9080 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9081 * the function returns IOCB_BUSY.
e59058c4 9082 *
3772a991
JS
9083 * This function is called with hbalock held. The function will return success
9084 * after it successfully submit the iocb to firmware or after adding to the
9085 * txq.
e59058c4 9086 **/
98c9ea5c 9087static int
3772a991 9088__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 9089 struct lpfc_iocbq *piocb, uint32_t flag)
9090{
9091 struct lpfc_iocbq *nextiocb;
9092 IOCB_t *iocb;
895427bd 9093 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 9094
1c2ba475
JT
9095 lockdep_assert_held(&phba->hbalock);
9096
92d7f7b0
JS
9097 if (piocb->iocb_cmpl && (!piocb->vport) &&
9098 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9099 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9100 lpfc_printf_log(phba, KERN_ERR,
9101 LOG_SLI | LOG_VPORT,
e8b62011 9102 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
9103 piocb->iocb.ulpCommand);
9104 dump_stack();
9105 return IOCB_ERROR;
9106 }
9107
9108
8d63f375
LV
9109 /* If the PCI channel is in offline state, do not post iocbs. */
9110 if (unlikely(pci_channel_offline(phba->pcidev)))
9111 return IOCB_ERROR;
9112
a257bf90
JS
9113 /* If HBA has a deferred error attention, fail the iocb. */
9114 if (unlikely(phba->hba_flag & DEFER_ERATT))
9115 return IOCB_ERROR;
9116
dea3101e 9117 /*
9118 * We should never get an IOCB if we are in a < LINK_DOWN state
9119 */
2e0fef85 9120 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 9121 return IOCB_ERROR;
9122
9123 /*
9124 * Check to see if we are blocking IOCB processing because of a
0b727fea 9125 * outstanding event.
dea3101e 9126 */
0b727fea 9127 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 9128 goto iocb_busy;
9129
2e0fef85 9130 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 9131 /*
2680eeaa 9132 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 9133 * can be issued if the link is not up.
9134 */
9135 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
9136 case CMD_GEN_REQUEST64_CR:
9137 case CMD_GEN_REQUEST64_CX:
9138 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9139 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 9140 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
9141 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9142 MENLO_TRANSPORT_TYPE))
9143
9144 goto iocb_busy;
9145 break;
dea3101e 9146 case CMD_QUE_RING_BUF_CN:
9147 case CMD_QUE_RING_BUF64_CN:
dea3101e 9148 /*
9149 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9150 * completion, iocb_cmpl MUST be 0.
9151 */
9152 if (piocb->iocb_cmpl)
9153 piocb->iocb_cmpl = NULL;
9154 /*FALLTHROUGH*/
9155 case CMD_CREATE_XRI_CR:
2680eeaa
JS
9156 case CMD_CLOSE_XRI_CN:
9157 case CMD_CLOSE_XRI_CX:
dea3101e 9158 break;
9159 default:
9160 goto iocb_busy;
9161 }
9162
9163 /*
9164 * For FCP commands, we must be in a state where we can process link
9165 * attention events.
9166 */
895427bd 9167 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 9168 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 9169 goto iocb_busy;
92d7f7b0 9170 }
dea3101e 9171
dea3101e 9172 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9173 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9174 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9175
9176 if (iocb)
9177 lpfc_sli_update_ring(phba, pring);
9178 else
9179 lpfc_sli_update_full_ring(phba, pring);
9180
9181 if (!piocb)
9182 return IOCB_SUCCESS;
9183
9184 goto out_busy;
9185
9186 iocb_busy:
9187 pring->stats.iocb_cmd_delay++;
9188
9189 out_busy:
9190
9191 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 9192 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 9193 return IOCB_SUCCESS;
9194 }
9195
9196 return IOCB_BUSY;
9197}
9198
3772a991 9199/**
4f774513
JS
9200 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9201 * @phba: Pointer to HBA context object.
9202 * @piocb: Pointer to command iocb.
9203 * @sglq: Pointer to the scatter gather queue object.
9204 *
9205 * This routine converts the bpl or bde that is in the IOCB
9206 * to a sgl list for the sli4 hardware. The physical address
9207 * of the bpl/bde is converted back to a virtual address.
9208 * If the IOCB contains a BPL then the list of BDE's is
9209 * converted to sli4_sge's. If the IOCB contains a single
9210 * BDE then it is converted to a single sli_sge.
9211 * The IOCB is still in cpu endianess so the contents of
9212 * the bpl can be used without byte swapping.
9213 *
9214 * Returns valid XRI = Success, NO_XRI = Failure.
9215**/
9216static uint16_t
9217lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9218 struct lpfc_sglq *sglq)
3772a991 9219{
4f774513
JS
9220 uint16_t xritag = NO_XRI;
9221 struct ulp_bde64 *bpl = NULL;
9222 struct ulp_bde64 bde;
9223 struct sli4_sge *sgl = NULL;
1b51197d 9224 struct lpfc_dmabuf *dmabuf;
4f774513
JS
9225 IOCB_t *icmd;
9226 int numBdes = 0;
9227 int i = 0;
63e801ce
JS
9228 uint32_t offset = 0; /* accumulated offset in the sg request list */
9229 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 9230
4f774513
JS
9231 if (!piocbq || !sglq)
9232 return xritag;
9233
9234 sgl = (struct sli4_sge *)sglq->sgl;
9235 icmd = &piocbq->iocb;
6b5151fd
JS
9236 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9237 return sglq->sli4_xritag;
4f774513
JS
9238 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9239 numBdes = icmd->un.genreq64.bdl.bdeSize /
9240 sizeof(struct ulp_bde64);
9241 /* The addrHigh and addrLow fields within the IOCB
9242 * have not been byteswapped yet so there is no
9243 * need to swap them back.
9244 */
1b51197d
JS
9245 if (piocbq->context3)
9246 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9247 else
9248 return xritag;
4f774513 9249
1b51197d 9250 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
9251 if (!bpl)
9252 return xritag;
9253
9254 for (i = 0; i < numBdes; i++) {
9255 /* Should already be byte swapped. */
28baac74
JS
9256 sgl->addr_hi = bpl->addrHigh;
9257 sgl->addr_lo = bpl->addrLow;
9258
0558056c 9259 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9260 if ((i+1) == numBdes)
9261 bf_set(lpfc_sli4_sge_last, sgl, 1);
9262 else
9263 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
9264 /* swap the size field back to the cpu so we
9265 * can assign it to the sgl.
9266 */
9267 bde.tus.w = le32_to_cpu(bpl->tus.w);
9268 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
9269 /* The offsets in the sgl need to be accumulated
9270 * separately for the request and reply lists.
9271 * The request is always first, the reply follows.
9272 */
9273 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9274 /* add up the reply sg entries */
9275 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9276 inbound++;
9277 /* first inbound? reset the offset */
9278 if (inbound == 1)
9279 offset = 0;
9280 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
9281 bf_set(lpfc_sli4_sge_type, sgl,
9282 LPFC_SGE_TYPE_DATA);
63e801ce
JS
9283 offset += bde.tus.f.bdeSize;
9284 }
546fc854 9285 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
9286 bpl++;
9287 sgl++;
9288 }
9289 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9290 /* The addrHigh and addrLow fields of the BDE have not
9291 * been byteswapped yet so they need to be swapped
9292 * before putting them in the sgl.
9293 */
9294 sgl->addr_hi =
9295 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9296 sgl->addr_lo =
9297 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 9298 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9299 bf_set(lpfc_sli4_sge_last, sgl, 1);
9300 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
9301 sgl->sge_len =
9302 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
9303 }
9304 return sglq->sli4_xritag;
3772a991 9305}
92d7f7b0 9306
e59058c4 9307/**
4f774513 9308 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 9309 * @phba: Pointer to HBA context object.
4f774513
JS
9310 * @piocb: Pointer to command iocb.
9311 * @wqe: Pointer to the work queue entry.
e59058c4 9312 *
4f774513
JS
9313 * This routine converts the iocb command to its Work Queue Entry
9314 * equivalent. The wqe pointer should not have any fields set when
9315 * this routine is called because it will memcpy over them.
9316 * This routine does not set the CQ_ID or the WQEC bits in the
9317 * wqe.
e59058c4 9318 *
4f774513 9319 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 9320 **/
cf5bf97e 9321static int
4f774513 9322lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
205e8240 9323 union lpfc_wqe128 *wqe)
cf5bf97e 9324{
5ffc266e 9325 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
9326 uint8_t ct = 0;
9327 uint32_t fip;
9328 uint32_t abort_tag;
9329 uint8_t command_type = ELS_COMMAND_NON_FIP;
9330 uint8_t cmnd;
9331 uint16_t xritag;
dcf2a4e0
JS
9332 uint16_t abrt_iotag;
9333 struct lpfc_iocbq *abrtiocbq;
4f774513 9334 struct ulp_bde64 *bpl = NULL;
f0d9bccc 9335 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
9336 int numBdes, i;
9337 struct ulp_bde64 bde;
c31098ce 9338 struct lpfc_nodelist *ndlp;
ff78d8f9 9339 uint32_t *pcmd;
1b51197d 9340 uint32_t if_type;
4f774513 9341
45ed1190 9342 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 9343 /* The fcp commands will set command type */
0c287589 9344 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 9345 command_type = FCP_COMMAND;
c868595d 9346 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
9347 command_type = ELS_COMMAND_FIP;
9348 else
9349 command_type = ELS_COMMAND_NON_FIP;
9350
b5c53958
JS
9351 if (phba->fcp_embed_io)
9352 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
9353 /* Some of the fields are in the right position already */
9354 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
ae9e28f3
JS
9355 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9356 /* The ct field has moved so reset */
9357 wqe->generic.wqe_com.word7 = 0;
9358 wqe->generic.wqe_com.word10 = 0;
9359 }
b5c53958
JS
9360
9361 abort_tag = (uint32_t) iocbq->iotag;
9362 xritag = iocbq->sli4_xritag;
4f774513
JS
9363 /* words0-2 bpl convert bde */
9364 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
9365 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9366 sizeof(struct ulp_bde64);
4f774513
JS
9367 bpl = (struct ulp_bde64 *)
9368 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9369 if (!bpl)
9370 return IOCB_ERROR;
cf5bf97e 9371
4f774513
JS
9372 /* Should already be byte swapped. */
9373 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9374 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9375 /* swap the size field back to the cpu so we
9376 * can assign it to the sgl.
9377 */
9378 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
9379 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9380 total_len = 0;
9381 for (i = 0; i < numBdes; i++) {
9382 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9383 total_len += bde.tus.f.bdeSize;
9384 }
4f774513 9385 } else
5ffc266e 9386 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 9387
4f774513
JS
9388 iocbq->iocb.ulpIoTag = iocbq->iotag;
9389 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 9390
4f774513
JS
9391 switch (iocbq->iocb.ulpCommand) {
9392 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
9393 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9394 ndlp = iocbq->context_un.ndlp;
9395 else
9396 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
9397 if (!iocbq->iocb.ulpLe) {
9398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9399 "2007 Only Limited Edition cmd Format"
9400 " supported 0x%x\n",
9401 iocbq->iocb.ulpCommand);
9402 return IOCB_ERROR;
9403 }
ff78d8f9 9404
5ffc266e 9405 wqe->els_req.payload_len = xmit_len;
4f774513
JS
9406 /* Els_reguest64 has a TMO */
9407 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9408 iocbq->iocb.ulpTimeout);
9409 /* Need a VF for word 4 set the vf bit*/
9410 bf_set(els_req64_vf, &wqe->els_req, 0);
9411 /* And a VFID for word 12 */
9412 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 9413 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
9414 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9415 iocbq->iocb.ulpContext);
9416 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9417 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 9418 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 9419 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
9420 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9421 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
9422 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9423 iocbq->context2)->virt);
1b51197d
JS
9424 if_type = bf_get(lpfc_sli_intf_if_type,
9425 &phba->sli4_hba.sli_intf);
27d6ac0a 9426 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 9427 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 9428 *pcmd == ELS_CMD_SCR ||
f60cb93b 9429 *pcmd == ELS_CMD_RSCN_XMT ||
6b5151fd 9430 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 9431 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
9432 *pcmd == ELS_CMD_PLOGI)) {
9433 bf_set(els_req64_sp, &wqe->els_req, 1);
9434 bf_set(els_req64_sid, &wqe->els_req,
9435 iocbq->vport->fc_myDID);
939723a4
JS
9436 if ((*pcmd == ELS_CMD_FLOGI) &&
9437 !(phba->fc_topology ==
9438 LPFC_TOPOLOGY_LOOP))
9439 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
9440 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9441 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 9442 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 9443 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
9444 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9445 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9446 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9447 }
c868595d 9448 }
6d368e53
JS
9449 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9450 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
9451 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9452 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9453 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9454 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9455 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9456 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 9457 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 9458 break;
5ffc266e 9459 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
9460 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9461 iocbq->iocb.un.ulpWord[3]);
9462 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 9463 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
9464 /* The entire sequence is transmitted for this IOCB */
9465 xmit_len = total_len;
9466 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
9467 if (phba->link_flag & LS_LOOPBACK_MODE)
9468 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
5bd5f66c 9469 /* fall through */
4f774513 9470 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
9471 /* word3 iocb=io_tag32 wqe=reserved */
9472 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
9473 /* word4 relative_offset memcpy */
9474 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
9475 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9476 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9477 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9478 LPFC_WQE_IOD_WRITE);
9479 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9480 LPFC_WQE_LENLOC_WORD12);
9481 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
9482 wqe->xmit_sequence.xmit_len = xmit_len;
9483 command_type = OTHER_COMMAND;
7851fe2c 9484 break;
4f774513 9485 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
9486 /* word3 iocb=iotag32 wqe=seq_payload_len */
9487 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
9488 /* word4 iocb=rsvd wqe=rsvd */
9489 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9490 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 9491 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 9492 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
9493 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9494 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9495 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9496 LPFC_WQE_LENLOC_WORD3);
9497 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 9498 break;
4f774513
JS
9499 case CMD_FCP_IWRITE64_CR:
9500 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
9501 /* word3 iocb=iotag wqe=payload_offset_len */
9502 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9503 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9504 xmit_len + sizeof(struct fcp_rsp));
9505 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9506 0);
f0d9bccc
JS
9507 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9508 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9509 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9510 iocbq->iocb.ulpFCP2Rcvy);
9511 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9512 /* Always open the exchange */
f0d9bccc
JS
9513 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9514 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9515 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9516 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9517 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
9518 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9519 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
9520 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9521 if (iocbq->priority) {
9522 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9523 (iocbq->priority << 1));
9524 } else {
1ba981fd
JS
9525 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9526 (phba->cfg_XLanePriority << 1));
9527 }
9528 }
b5c53958
JS
9529 /* Note, word 10 is already initialized to 0 */
9530
414abe0a
JS
9531 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9532 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9533 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9534 else
9535 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9536
b5c53958 9537 if (phba->fcp_embed_io) {
c490850a 9538 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9539 struct sli4_sge *sgl;
b5c53958
JS
9540 struct fcp_cmnd *fcp_cmnd;
9541 uint32_t *ptr;
9542
9543 /* 128 byte wqe support here */
b5c53958
JS
9544
9545 lpfc_cmd = iocbq->context1;
0794d601 9546 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9547 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9548
9549 /* Word 0-2 - FCP_CMND */
205e8240 9550 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9551 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9552 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9553 wqe->generic.bde.addrHigh = 0;
9554 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9555
205e8240
JS
9556 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9557 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
b5c53958
JS
9558
9559 /* Word 22-29 FCP CMND Payload */
205e8240 9560 ptr = &wqe->words[22];
b5c53958
JS
9561 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9562 }
7851fe2c 9563 break;
4f774513 9564 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
9565 /* word3 iocb=iotag wqe=payload_offset_len */
9566 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9567 bf_set(payload_offset_len, &wqe->fcp_iread,
9568 xmit_len + sizeof(struct fcp_rsp));
9569 bf_set(cmd_buff_len, &wqe->fcp_iread,
9570 0);
f0d9bccc
JS
9571 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9572 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9573 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9574 iocbq->iocb.ulpFCP2Rcvy);
9575 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 9576 /* Always open the exchange */
f0d9bccc
JS
9577 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9578 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9579 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9580 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9581 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
9582 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9583 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
9584 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9585 if (iocbq->priority) {
9586 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9587 (iocbq->priority << 1));
9588 } else {
1ba981fd
JS
9589 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9590 (phba->cfg_XLanePriority << 1));
9591 }
9592 }
b5c53958
JS
9593 /* Note, word 10 is already initialized to 0 */
9594
414abe0a
JS
9595 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9596 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9597 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9598 else
9599 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9600
b5c53958 9601 if (phba->fcp_embed_io) {
c490850a 9602 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9603 struct sli4_sge *sgl;
b5c53958
JS
9604 struct fcp_cmnd *fcp_cmnd;
9605 uint32_t *ptr;
9606
9607 /* 128 byte wqe support here */
b5c53958
JS
9608
9609 lpfc_cmd = iocbq->context1;
0794d601 9610 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9611 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9612
9613 /* Word 0-2 - FCP_CMND */
205e8240 9614 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9615 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9616 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9617 wqe->generic.bde.addrHigh = 0;
9618 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9619
205e8240
JS
9620 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9621 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
b5c53958
JS
9622
9623 /* Word 22-29 FCP CMND Payload */
205e8240 9624 ptr = &wqe->words[22];
b5c53958
JS
9625 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9626 }
7851fe2c 9627 break;
4f774513 9628 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
9629 /* word3 iocb=iotag wqe=payload_offset_len */
9630 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9631 bf_set(payload_offset_len, &wqe->fcp_icmd,
9632 xmit_len + sizeof(struct fcp_rsp));
9633 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9634 0);
f0d9bccc 9635 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 9636 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 9637 /* Always open the exchange */
f0d9bccc
JS
9638 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9639 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9640 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9641 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9642 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
9643 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9644 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
9645 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9646 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
9647 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9648 if (iocbq->priority) {
9649 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9650 (iocbq->priority << 1));
9651 } else {
1ba981fd
JS
9652 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9653 (phba->cfg_XLanePriority << 1));
9654 }
9655 }
b5c53958
JS
9656 /* Note, word 10 is already initialized to 0 */
9657
9658 if (phba->fcp_embed_io) {
c490850a 9659 struct lpfc_io_buf *lpfc_cmd;
b5c53958 9660 struct sli4_sge *sgl;
b5c53958
JS
9661 struct fcp_cmnd *fcp_cmnd;
9662 uint32_t *ptr;
9663
9664 /* 128 byte wqe support here */
b5c53958
JS
9665
9666 lpfc_cmd = iocbq->context1;
0794d601 9667 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9668 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9669
9670 /* Word 0-2 - FCP_CMND */
205e8240 9671 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9672 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9673 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9674 wqe->generic.bde.addrHigh = 0;
9675 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9676
205e8240
JS
9677 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9678 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
b5c53958
JS
9679
9680 /* Word 22-29 FCP CMND Payload */
205e8240 9681 ptr = &wqe->words[22];
b5c53958
JS
9682 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9683 }
7851fe2c 9684 break;
4f774513 9685 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
9686 /* For this command calculate the xmit length of the
9687 * request bde.
9688 */
9689 xmit_len = 0;
9690 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9691 sizeof(struct ulp_bde64);
9692 for (i = 0; i < numBdes; i++) {
63e801ce 9693 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
9694 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9695 break;
63e801ce
JS
9696 xmit_len += bde.tus.f.bdeSize;
9697 }
f0d9bccc
JS
9698 /* word3 iocb=IO_TAG wqe=request_payload_len */
9699 wqe->gen_req.request_payload_len = xmit_len;
9700 /* word4 iocb=parameter wqe=relative_offset memcpy */
9701 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
9702 /* word6 context tag copied in memcpy */
9703 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9704 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9705 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9706 "2015 Invalid CT %x command 0x%x\n",
9707 ct, iocbq->iocb.ulpCommand);
9708 return IOCB_ERROR;
9709 }
f0d9bccc
JS
9710 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9711 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9712 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9713 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9714 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9715 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9716 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9717 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 9718 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 9719 command_type = OTHER_COMMAND;
7851fe2c 9720 break;
4f774513 9721 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 9722 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 9723 /* words0-2 BDE memcpy */
f0d9bccc
JS
9724 /* word3 iocb=iotag32 wqe=response_payload_len */
9725 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
9726 /* word4 */
9727 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
9728 /* word5 iocb=rsvd wge=did */
9729 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
9730 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9731
9732 if_type = bf_get(lpfc_sli_intf_if_type,
9733 &phba->sli4_hba.sli_intf);
27d6ac0a 9734 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
939723a4
JS
9735 if (iocbq->vport->fc_flag & FC_PT2PT) {
9736 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9737 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9738 iocbq->vport->fc_myDID);
9739 if (iocbq->vport->fc_myDID == Fabric_DID) {
9740 bf_set(wqe_els_did,
9741 &wqe->xmit_els_rsp.wqe_dest, 0);
9742 }
9743 }
9744 }
f0d9bccc
JS
9745 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9746 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9747 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9748 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9749 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9750 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9751 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9752 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9753 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9754 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9755 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9756 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9757 LPFC_WQE_LENLOC_WORD3);
9758 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9759 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9760 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9761 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9762 iocbq->context2)->virt);
9763 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9764 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9765 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9766 iocbq->vport->fc_myDID);
939723a4
JS
9767 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9768 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9769 phba->vpi_ids[phba->pport->vpi]);
9770 }
4f774513 9771 command_type = OTHER_COMMAND;
7851fe2c 9772 break;
4f774513
JS
9773 case CMD_CLOSE_XRI_CN:
9774 case CMD_ABORT_XRI_CN:
9775 case CMD_ABORT_XRI_CX:
9776 /* words 0-2 memcpy should be 0 rserved */
9777 /* port will send abts */
dcf2a4e0
JS
9778 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9779 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9780 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9781 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9782 } else
9783 fip = 0;
9784
9785 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9786 /*
dcf2a4e0
JS
9787 * The link is down, or the command was ELS_FIP
9788 * so the fw does not need to send abts
4f774513
JS
9789 * on the wire.
9790 */
9791 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9792 else
9793 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9794 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9795 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9796 wqe->abort_cmd.rsrvd5 = 0;
9797 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9798 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9799 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9800 /*
9801 * The abort handler will send us CMD_ABORT_XRI_CN or
9802 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9803 */
f0d9bccc
JS
9804 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9805 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9806 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9807 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9808 cmnd = CMD_ABORT_XRI_CX;
9809 command_type = OTHER_COMMAND;
9810 xritag = 0;
7851fe2c 9811 break;
6669f9bb 9812 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9813 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9814 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9815 * we re-construct this WQE here based on information in
9816 * iocbq from scratch.
9817 */
d9f492a1 9818 memset(wqe, 0, sizeof(*wqe));
5ffc266e 9819 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9820 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9821 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9822 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9823 LPFC_ABTS_UNSOL_INT) {
9824 /* ABTS sent by initiator to CT exchange, the
9825 * RX_ID field will be filled with the newly
9826 * allocated responder XRI.
9827 */
9828 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9829 iocbq->sli4_xritag);
9830 } else {
9831 /* ABTS sent by responder to CT exchange, the
9832 * RX_ID field will be filled with the responder
9833 * RX_ID from ABTS.
9834 */
9835 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9836 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9837 }
6669f9bb
JS
9838 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9839 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9840
9841 /* Use CT=VPI */
9842 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9843 ndlp->nlp_DID);
9844 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9845 iocbq->iocb.ulpContext);
9846 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9847 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9848 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9849 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9850 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9851 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9852 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9853 command_type = OTHER_COMMAND;
546fc854
JS
9854 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9855 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9856 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9857 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9858 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9859 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9860 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9861 }
9862
7851fe2c 9863 break;
ae9e28f3
JS
9864 case CMD_SEND_FRAME:
9865 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9866 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9867 return 0;
4f774513
JS
9868 case CMD_XRI_ABORTED_CX:
9869 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
9870 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9871 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9872 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9873 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9874 default:
9875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9876 "2014 Invalid command 0x%x\n",
9877 iocbq->iocb.ulpCommand);
9878 return IOCB_ERROR;
7851fe2c 9879 break;
4f774513 9880 }
6d368e53 9881
8012cc38
JS
9882 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9883 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9884 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9885 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9886 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9887 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9888 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9889 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
9890 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9891 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9892 wqe->generic.wqe_com.abort_tag = abort_tag;
9893 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9894 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9895 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9896 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
9897 return 0;
9898}
9899
9900/**
9901 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9902 * @phba: Pointer to HBA context object.
9903 * @ring_number: SLI ring number to issue iocb on.
9904 * @piocb: Pointer to command iocb.
9905 * @flag: Flag indicating if this command can be put into txq.
9906 *
9907 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9908 * an iocb command to an HBA with SLI-4 interface spec.
9909 *
9910 * This function is called with hbalock held. The function will return success
9911 * after it successfully submit the iocb to firmware or after adding to the
9912 * txq.
9913 **/
9914static int
9915__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9916 struct lpfc_iocbq *piocb, uint32_t flag)
9917{
9918 struct lpfc_sglq *sglq;
205e8240 9919 union lpfc_wqe128 wqe;
1ba981fd 9920 struct lpfc_queue *wq;
895427bd 9921 struct lpfc_sli_ring *pring;
4f774513 9922
895427bd
JS
9923 /* Get the WQ */
9924 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9925 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
cdb42bec 9926 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
895427bd
JS
9927 } else {
9928 wq = phba->sli4_hba.els_wq;
9929 }
9930
9931 /* Get corresponding ring */
9932 pring = wq->pring;
1c2ba475 9933
b5c53958
JS
9934 /*
9935 * The WQE can be either 64 or 128 bytes,
b5c53958 9936 */
b5c53958 9937
cda7fa18 9938 lockdep_assert_held(&pring->ring_lock);
895427bd 9939
4f774513
JS
9940 if (piocb->sli4_xritag == NO_XRI) {
9941 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 9942 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
9943 sglq = NULL;
9944 else {
0e9bb8d7 9945 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
9946 if (!(flag & SLI_IOCB_RET_IOCB)) {
9947 __lpfc_sli_ringtx_put(phba,
9948 pring, piocb);
9949 return IOCB_SUCCESS;
9950 } else {
9951 return IOCB_BUSY;
9952 }
9953 } else {
895427bd 9954 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
9955 if (!sglq) {
9956 if (!(flag & SLI_IOCB_RET_IOCB)) {
9957 __lpfc_sli_ringtx_put(phba,
9958 pring,
9959 piocb);
9960 return IOCB_SUCCESS;
9961 } else
9962 return IOCB_BUSY;
9963 }
9964 }
4f774513 9965 }
2ea259ee 9966 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
9967 /* These IO's already have an XRI and a mapped sgl. */
9968 sglq = NULL;
2ea259ee 9969 else {
6d368e53
JS
9970 /*
9971 * This is a continuation of a commandi,(CX) so this
4f774513
JS
9972 * sglq is on the active list
9973 */
edccdc17 9974 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
9975 if (!sglq)
9976 return IOCB_ERROR;
9977 }
9978
9979 if (sglq) {
6d368e53 9980 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 9981 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 9982 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
9983 return IOCB_ERROR;
9984 }
9985
205e8240 9986 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
4f774513
JS
9987 return IOCB_ERROR;
9988
205e8240 9989 if (lpfc_sli4_wq_put(wq, &wqe))
895427bd 9990 return IOCB_ERROR;
4f774513
JS
9991 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9992
9993 return 0;
9994}
9995
9996/**
9997 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9998 *
9999 * This routine wraps the actual lockless version for issusing IOCB function
10000 * pointer from the lpfc_hba struct.
10001 *
10002 * Return codes:
b5c53958
JS
10003 * IOCB_ERROR - Error
10004 * IOCB_SUCCESS - Success
10005 * IOCB_BUSY - Busy
4f774513 10006 **/
2a9bf3d0 10007int
4f774513
JS
10008__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10009 struct lpfc_iocbq *piocb, uint32_t flag)
10010{
10011 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10012}
10013
10014/**
25985edc 10015 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
10016 * @phba: The hba struct for which this call is being executed.
10017 * @dev_grp: The HBA PCI-Device group number.
10018 *
10019 * This routine sets up the SLI interface API function jump table in @phba
10020 * struct.
10021 * Returns: 0 - success, -ENODEV - failure.
10022 **/
10023int
10024lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10025{
10026
10027 switch (dev_grp) {
10028 case LPFC_PCI_DEV_LP:
10029 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10030 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10031 break;
10032 case LPFC_PCI_DEV_OC:
10033 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10034 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10035 break;
10036 default:
10037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10038 "1419 Invalid HBA PCI-device group: 0x%x\n",
10039 dev_grp);
10040 return -ENODEV;
10041 break;
10042 }
10043 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10044 return 0;
10045}
10046
a1efe163 10047/**
895427bd 10048 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 10049 * @phba: Pointer to HBA context object.
a1efe163
JS
10050 * @piocb: Pointer to command iocb.
10051 *
895427bd
JS
10052 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10053 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 10054 * Since ABORTS must go on the same WQ of the command they are
895427bd 10055 * aborting, we use command's hba_wqidx.
a1efe163 10056 */
895427bd
JS
10057struct lpfc_sli_ring *
10058lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 10059{
c490850a 10060 struct lpfc_io_buf *lpfc_cmd;
5e5b511d 10061
895427bd 10062 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
cdb42bec 10063 if (unlikely(!phba->sli4_hba.hdwq))
7370d10a
JS
10064 return NULL;
10065 /*
10066 * for abort iocb hba_wqidx should already
10067 * be setup based on what work queue we used.
10068 */
10069 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
c490850a 10070 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
1fbf9742 10071 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
9bd2bff5 10072 }
cdb42bec 10073 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
895427bd
JS
10074 } else {
10075 if (unlikely(!phba->sli4_hba.els_wq))
10076 return NULL;
10077 piocb->hba_wqidx = 0;
10078 return phba->sli4_hba.els_wq->pring;
9bd2bff5 10079 }
9bd2bff5
JS
10080}
10081
4f774513
JS
10082/**
10083 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10084 * @phba: Pointer to HBA context object.
10085 * @pring: Pointer to driver SLI ring object.
10086 * @piocb: Pointer to command iocb.
10087 * @flag: Flag indicating if this command can be put into txq.
10088 *
10089 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10090 * function. This function gets the hbalock and calls
10091 * __lpfc_sli_issue_iocb function and will return the error returned
10092 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10093 * functions which do not hold hbalock.
10094 **/
10095int
10096lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10097 struct lpfc_iocbq *piocb, uint32_t flag)
10098{
2a76a283 10099 struct lpfc_sli_ring *pring;
4f774513 10100 unsigned long iflags;
6a828b0f 10101 int rc;
4f774513 10102
7e56aa25 10103 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
10104 pring = lpfc_sli4_calc_ring(phba, piocb);
10105 if (unlikely(pring == NULL))
9bd2bff5 10106 return IOCB_ERROR;
ba20c853 10107
9bd2bff5
JS
10108 spin_lock_irqsave(&pring->ring_lock, iflags);
10109 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10110 spin_unlock_irqrestore(&pring->ring_lock, iflags);
7e56aa25
JS
10111 } else {
10112 /* For now, SLI2/3 will still use hbalock */
10113 spin_lock_irqsave(&phba->hbalock, iflags);
10114 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10115 spin_unlock_irqrestore(&phba->hbalock, iflags);
10116 }
4f774513
JS
10117 return rc;
10118}
10119
10120/**
10121 * lpfc_extra_ring_setup - Extra ring setup function
10122 * @phba: Pointer to HBA context object.
10123 *
10124 * This function is called while driver attaches with the
10125 * HBA to setup the extra ring. The extra ring is used
10126 * only when driver needs to support target mode functionality
10127 * or IP over FC functionalities.
10128 *
895427bd 10129 * This function is called with no lock held. SLI3 only.
4f774513
JS
10130 **/
10131static int
10132lpfc_extra_ring_setup( struct lpfc_hba *phba)
10133{
10134 struct lpfc_sli *psli;
10135 struct lpfc_sli_ring *pring;
10136
10137 psli = &phba->sli;
10138
10139 /* Adjust cmd/rsp ring iocb entries more evenly */
10140
10141 /* Take some away from the FCP ring */
895427bd 10142 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
10143 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10144 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10145 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10146 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 10147
a4bc3379 10148 /* and give them to the extra ring */
895427bd 10149 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 10150
7e56aa25
JS
10151 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10152 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10153 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10154 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
10155
10156 /* Setup default profile for this ring */
10157 pring->iotag_max = 4096;
10158 pring->num_mask = 1;
10159 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
10160 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10161 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
10162 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10163 return 0;
10164}
10165
cb69f7de
JS
10166/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10167 * @phba: Pointer to HBA context object.
10168 * @iocbq: Pointer to iocb object.
10169 *
10170 * The async_event handler calls this routine when it receives
10171 * an ASYNC_STATUS_CN event from the port. The port generates
10172 * this event when an Abort Sequence request to an rport fails
10173 * twice in succession. The abort could be originated by the
10174 * driver or by the port. The ABTS could have been for an ELS
10175 * or FCP IO. The port only generates this event when an ABTS
10176 * fails to complete after one retry.
10177 */
10178static void
10179lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10180 struct lpfc_iocbq *iocbq)
10181{
10182 struct lpfc_nodelist *ndlp = NULL;
10183 uint16_t rpi = 0, vpi = 0;
10184 struct lpfc_vport *vport = NULL;
10185
10186 /* The rpi in the ulpContext is vport-sensitive. */
10187 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10188 rpi = iocbq->iocb.ulpContext;
10189
10190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10191 "3092 Port generated ABTS async event "
10192 "on vpi %d rpi %d status 0x%x\n",
10193 vpi, rpi, iocbq->iocb.ulpStatus);
10194
10195 vport = lpfc_find_vport_by_vpid(phba, vpi);
10196 if (!vport)
10197 goto err_exit;
10198 ndlp = lpfc_findnode_rpi(vport, rpi);
10199 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10200 goto err_exit;
10201
10202 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10203 lpfc_sli_abts_recover_port(vport, ndlp);
10204 return;
10205
10206 err_exit:
10207 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10208 "3095 Event Context not found, no "
10209 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10210 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10211 vpi, rpi);
10212}
10213
10214/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10215 * @phba: pointer to HBA context object.
10216 * @ndlp: nodelist pointer for the impacted rport.
10217 * @axri: pointer to the wcqe containing the failed exchange.
10218 *
10219 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10220 * port. The port generates this event when an abort exchange request to an
10221 * rport fails twice in succession with no reply. The abort could be originated
10222 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10223 */
10224void
10225lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10226 struct lpfc_nodelist *ndlp,
10227 struct sli4_wcqe_xri_aborted *axri)
10228{
10229 struct lpfc_vport *vport;
5c1db2ac 10230 uint32_t ext_status = 0;
cb69f7de 10231
6b5151fd 10232 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
10233 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10234 "3115 Node Context not found, driver "
10235 "ignoring abts err event\n");
6b5151fd
JS
10236 return;
10237 }
10238
cb69f7de
JS
10239 vport = ndlp->vport;
10240 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10241 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 10242 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 10243 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 10244 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
10245 bf_get(lpfc_wcqe_xa_status, axri),
10246 axri->parameter);
cb69f7de 10247
5c1db2ac
JS
10248 /*
10249 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10250 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10251 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10252 */
e3d2b802 10253 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
10254 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10255 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
10256 lpfc_sli_abts_recover_port(vport, ndlp);
10257}
10258
e59058c4 10259/**
3621a710 10260 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
10261 * @phba: Pointer to HBA context object.
10262 * @pring: Pointer to driver SLI ring object.
10263 * @iocbq: Pointer to iocb object.
10264 *
10265 * This function is called by the slow ring event handler
10266 * function when there is an ASYNC event iocb in the ring.
10267 * This function is called with no lock held.
10268 * Currently this function handles only temperature related
10269 * ASYNC events. The function decodes the temperature sensor
10270 * event message and posts events for the management applications.
10271 **/
98c9ea5c 10272static void
57127f15
JS
10273lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10274 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10275{
10276 IOCB_t *icmd;
10277 uint16_t evt_code;
57127f15
JS
10278 struct temp_event temp_event_data;
10279 struct Scsi_Host *shost;
a257bf90 10280 uint32_t *iocb_w;
57127f15
JS
10281
10282 icmd = &iocbq->iocb;
10283 evt_code = icmd->un.asyncstat.evt_code;
57127f15 10284
cb69f7de
JS
10285 switch (evt_code) {
10286 case ASYNC_TEMP_WARN:
10287 case ASYNC_TEMP_SAFE:
10288 temp_event_data.data = (uint32_t) icmd->ulpContext;
10289 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10290 if (evt_code == ASYNC_TEMP_WARN) {
10291 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10292 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10293 "0347 Adapter is very hot, please take "
10294 "corrective action. temperature : %d Celsius\n",
10295 (uint32_t) icmd->ulpContext);
10296 } else {
10297 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10298 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10299 "0340 Adapter temperature is OK now. "
10300 "temperature : %d Celsius\n",
10301 (uint32_t) icmd->ulpContext);
10302 }
10303
10304 /* Send temperature change event to applications */
10305 shost = lpfc_shost_from_vport(phba->pport);
10306 fc_host_post_vendor_event(shost, fc_get_event_number(),
10307 sizeof(temp_event_data), (char *) &temp_event_data,
10308 LPFC_NL_VENDOR_ID);
10309 break;
10310 case ASYNC_STATUS_CN:
10311 lpfc_sli_abts_err_handler(phba, iocbq);
10312 break;
10313 default:
a257bf90 10314 iocb_w = (uint32_t *) icmd;
cb69f7de 10315 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 10316 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 10317 " evt_code 0x%x\n"
a257bf90
JS
10318 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10319 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10320 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10321 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 10322 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
10323 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10324 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10325 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10326 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10327
cb69f7de 10328 break;
57127f15 10329 }
57127f15
JS
10330}
10331
10332
e59058c4 10333/**
895427bd 10334 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
10335 * @phba: Pointer to HBA context object.
10336 *
10337 * lpfc_sli_setup sets up rings of the SLI interface with
10338 * number of iocbs per ring and iotags. This function is
10339 * called while driver attach to the HBA and before the
10340 * interrupts are enabled. So there is no need for locking.
10341 *
10342 * This function always returns 0.
10343 **/
dea3101e 10344int
895427bd
JS
10345lpfc_sli4_setup(struct lpfc_hba *phba)
10346{
10347 struct lpfc_sli_ring *pring;
10348
10349 pring = phba->sli4_hba.els_wq->pring;
10350 pring->num_mask = LPFC_MAX_RING_MASK;
10351 pring->prt[0].profile = 0; /* Mask 0 */
10352 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10353 pring->prt[0].type = FC_TYPE_ELS;
10354 pring->prt[0].lpfc_sli_rcv_unsol_event =
10355 lpfc_els_unsol_event;
10356 pring->prt[1].profile = 0; /* Mask 1 */
10357 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10358 pring->prt[1].type = FC_TYPE_ELS;
10359 pring->prt[1].lpfc_sli_rcv_unsol_event =
10360 lpfc_els_unsol_event;
10361 pring->prt[2].profile = 0; /* Mask 2 */
10362 /* NameServer Inquiry */
10363 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10364 /* NameServer */
10365 pring->prt[2].type = FC_TYPE_CT;
10366 pring->prt[2].lpfc_sli_rcv_unsol_event =
10367 lpfc_ct_unsol_event;
10368 pring->prt[3].profile = 0; /* Mask 3 */
10369 /* NameServer response */
10370 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10371 /* NameServer */
10372 pring->prt[3].type = FC_TYPE_CT;
10373 pring->prt[3].lpfc_sli_rcv_unsol_event =
10374 lpfc_ct_unsol_event;
10375 return 0;
10376}
10377
10378/**
10379 * lpfc_sli_setup - SLI ring setup function
10380 * @phba: Pointer to HBA context object.
10381 *
10382 * lpfc_sli_setup sets up rings of the SLI interface with
10383 * number of iocbs per ring and iotags. This function is
10384 * called while driver attach to the HBA and before the
10385 * interrupts are enabled. So there is no need for locking.
10386 *
10387 * This function always returns 0. SLI3 only.
10388 **/
10389int
dea3101e 10390lpfc_sli_setup(struct lpfc_hba *phba)
10391{
ed957684 10392 int i, totiocbsize = 0;
dea3101e 10393 struct lpfc_sli *psli = &phba->sli;
10394 struct lpfc_sli_ring *pring;
10395
2a76a283 10396 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 10397 psli->sli_flag = 0;
dea3101e 10398
604a3e30
JB
10399 psli->iocbq_lookup = NULL;
10400 psli->iocbq_lookup_len = 0;
10401 psli->last_iotag = 0;
10402
dea3101e 10403 for (i = 0; i < psli->num_rings; i++) {
895427bd 10404 pring = &psli->sli3_ring[i];
dea3101e 10405 switch (i) {
10406 case LPFC_FCP_RING: /* ring 0 - FCP */
10407 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10408 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10409 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10410 pring->sli.sli3.numCiocb +=
10411 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10412 pring->sli.sli3.numRiocb +=
10413 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10414 pring->sli.sli3.numCiocb +=
10415 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10416 pring->sli.sli3.numRiocb +=
10417 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10418 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10419 SLI3_IOCB_CMD_SIZE :
10420 SLI2_IOCB_CMD_SIZE;
7e56aa25 10421 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10422 SLI3_IOCB_RSP_SIZE :
10423 SLI2_IOCB_RSP_SIZE;
dea3101e 10424 pring->iotag_ctr = 0;
10425 pring->iotag_max =
92d7f7b0 10426 (phba->cfg_hba_queue_depth * 2);
dea3101e 10427 pring->fast_iotag = pring->iotag_max;
10428 pring->num_mask = 0;
10429 break;
a4bc3379 10430 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 10431 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10432 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10433 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10434 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10435 SLI3_IOCB_CMD_SIZE :
10436 SLI2_IOCB_CMD_SIZE;
7e56aa25 10437 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10438 SLI3_IOCB_RSP_SIZE :
10439 SLI2_IOCB_RSP_SIZE;
2e0fef85 10440 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 10441 pring->num_mask = 0;
10442 break;
10443 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10444 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10445 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10446 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10447 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10448 SLI3_IOCB_CMD_SIZE :
10449 SLI2_IOCB_CMD_SIZE;
7e56aa25 10450 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10451 SLI3_IOCB_RSP_SIZE :
10452 SLI2_IOCB_RSP_SIZE;
dea3101e 10453 pring->fast_iotag = 0;
10454 pring->iotag_ctr = 0;
10455 pring->iotag_max = 4096;
57127f15
JS
10456 pring->lpfc_sli_rcv_async_status =
10457 lpfc_sli_async_event_handler;
6669f9bb 10458 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 10459 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
10460 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10461 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 10462 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 10463 lpfc_els_unsol_event;
dea3101e 10464 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
10465 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10466 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 10467 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 10468 lpfc_els_unsol_event;
dea3101e 10469 pring->prt[2].profile = 0; /* Mask 2 */
10470 /* NameServer Inquiry */
6a9c52cf 10471 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 10472 /* NameServer */
6a9c52cf 10473 pring->prt[2].type = FC_TYPE_CT;
dea3101e 10474 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 10475 lpfc_ct_unsol_event;
dea3101e 10476 pring->prt[3].profile = 0; /* Mask 3 */
10477 /* NameServer response */
6a9c52cf 10478 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 10479 /* NameServer */
6a9c52cf 10480 pring->prt[3].type = FC_TYPE_CT;
dea3101e 10481 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 10482 lpfc_ct_unsol_event;
dea3101e 10483 break;
10484 }
7e56aa25
JS
10485 totiocbsize += (pring->sli.sli3.numCiocb *
10486 pring->sli.sli3.sizeCiocb) +
10487 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 10488 }
ed957684 10489 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 10490 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
10491 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10492 "SLI2 SLIM Data: x%x x%lx\n",
10493 phba->brd_no, totiocbsize,
10494 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 10495 }
cf5bf97e
JW
10496 if (phba->cfg_multi_ring_support == 2)
10497 lpfc_extra_ring_setup(phba);
dea3101e 10498
10499 return 0;
10500}
10501
e59058c4 10502/**
895427bd 10503 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
10504 * @phba: Pointer to HBA context object.
10505 *
895427bd 10506 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
10507 * ring. This function also initializes ring indices of each ring.
10508 * This function is called during the initialization of the SLI
10509 * interface of an HBA.
10510 * This function is called with no lock held and always returns
10511 * 1.
10512 **/
895427bd
JS
10513void
10514lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e 10515{
10516 struct lpfc_sli *psli;
10517 struct lpfc_sli_ring *pring;
604a3e30 10518 int i;
dea3101e 10519
10520 psli = &phba->sli;
2e0fef85 10521 spin_lock_irq(&phba->hbalock);
dea3101e 10522 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10523 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10524 /* Initialize list headers for txq and txcmplq as double linked lists */
cdb42bec
JS
10525 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10526 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
895427bd
JS
10527 pring->flag = 0;
10528 pring->ringno = LPFC_FCP_RING;
c490850a 10529 pring->txcmplq_cnt = 0;
895427bd
JS
10530 INIT_LIST_HEAD(&pring->txq);
10531 INIT_LIST_HEAD(&pring->txcmplq);
10532 INIT_LIST_HEAD(&pring->iocb_continueq);
10533 spin_lock_init(&pring->ring_lock);
10534 }
10535 pring = phba->sli4_hba.els_wq->pring;
10536 pring->flag = 0;
10537 pring->ringno = LPFC_ELS_RING;
c490850a 10538 pring->txcmplq_cnt = 0;
895427bd
JS
10539 INIT_LIST_HEAD(&pring->txq);
10540 INIT_LIST_HEAD(&pring->txcmplq);
10541 INIT_LIST_HEAD(&pring->iocb_continueq);
10542 spin_lock_init(&pring->ring_lock);
dea3101e 10543
cdb42bec
JS
10544 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10545 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10546 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
c490850a 10547 pring->flag = 0;
cdb42bec 10548 pring->ringno = LPFC_FCP_RING;
c490850a 10549 pring->txcmplq_cnt = 0;
cdb42bec
JS
10550 INIT_LIST_HEAD(&pring->txq);
10551 INIT_LIST_HEAD(&pring->txcmplq);
10552 INIT_LIST_HEAD(&pring->iocb_continueq);
10553 spin_lock_init(&pring->ring_lock);
10554 }
895427bd
JS
10555 pring = phba->sli4_hba.nvmels_wq->pring;
10556 pring->flag = 0;
10557 pring->ringno = LPFC_ELS_RING;
c490850a 10558 pring->txcmplq_cnt = 0;
895427bd
JS
10559 INIT_LIST_HEAD(&pring->txq);
10560 INIT_LIST_HEAD(&pring->txcmplq);
10561 INIT_LIST_HEAD(&pring->iocb_continueq);
10562 spin_lock_init(&pring->ring_lock);
10563 }
10564
10565 spin_unlock_irq(&phba->hbalock);
10566}
10567
10568/**
10569 * lpfc_sli_queue_init - Queue initialization function
10570 * @phba: Pointer to HBA context object.
10571 *
10572 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10573 * ring. This function also initializes ring indices of each ring.
10574 * This function is called during the initialization of the SLI
10575 * interface of an HBA.
10576 * This function is called with no lock held and always returns
10577 * 1.
10578 **/
10579void
10580lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e 10581{
10582 struct lpfc_sli *psli;
10583 struct lpfc_sli_ring *pring;
604a3e30 10584 int i;
dea3101e 10585
10586 psli = &phba->sli;
2e0fef85 10587 spin_lock_irq(&phba->hbalock);
dea3101e 10588 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10589 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10590 /* Initialize list headers for txq and txcmplq as double linked lists */
10591 for (i = 0; i < psli->num_rings; i++) {
895427bd 10592 pring = &psli->sli3_ring[i];
dea3101e 10593 pring->ringno = i;
7e56aa25
JS
10594 pring->sli.sli3.next_cmdidx = 0;
10595 pring->sli.sli3.local_getidx = 0;
10596 pring->sli.sli3.cmdidx = 0;
dea3101e 10597 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 10598 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 10599 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
10600 pring->flag = 0;
10601 INIT_LIST_HEAD(&pring->txq);
10602 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 10603 spin_lock_init(&pring->ring_lock);
dea3101e 10604 }
2e0fef85 10605 spin_unlock_irq(&phba->hbalock);
dea3101e 10606}
10607
04c68496
JS
10608/**
10609 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10610 * @phba: Pointer to HBA context object.
10611 *
10612 * This routine flushes the mailbox command subsystem. It will unconditionally
10613 * flush all the mailbox commands in the three possible stages in the mailbox
10614 * command sub-system: pending mailbox command queue; the outstanding mailbox
10615 * command; and completed mailbox command queue. It is caller's responsibility
10616 * to make sure that the driver is in the proper state to flush the mailbox
10617 * command sub-system. Namely, the posting of mailbox commands into the
10618 * pending mailbox command queue from the various clients must be stopped;
10619 * either the HBA is in a state that it will never works on the outstanding
10620 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10621 * mailbox command has been completed.
10622 **/
10623static void
10624lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10625{
10626 LIST_HEAD(completions);
10627 struct lpfc_sli *psli = &phba->sli;
10628 LPFC_MBOXQ_t *pmb;
10629 unsigned long iflag;
10630
523128e5
JS
10631 /* Disable softirqs, including timers from obtaining phba->hbalock */
10632 local_bh_disable();
10633
04c68496
JS
10634 /* Flush all the mailbox commands in the mbox system */
10635 spin_lock_irqsave(&phba->hbalock, iflag);
523128e5 10636
04c68496
JS
10637 /* The pending mailbox command queue */
10638 list_splice_init(&phba->sli.mboxq, &completions);
10639 /* The outstanding active mailbox command */
10640 if (psli->mbox_active) {
10641 list_add_tail(&psli->mbox_active->list, &completions);
10642 psli->mbox_active = NULL;
10643 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10644 }
10645 /* The completed mailbox command queue */
10646 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10647 spin_unlock_irqrestore(&phba->hbalock, iflag);
10648
523128e5
JS
10649 /* Enable softirqs again, done with phba->hbalock */
10650 local_bh_enable();
10651
04c68496
JS
10652 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10653 while (!list_empty(&completions)) {
10654 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10655 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10656 if (pmb->mbox_cmpl)
10657 pmb->mbox_cmpl(phba, pmb);
10658 }
10659}
10660
e59058c4 10661/**
3621a710 10662 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
10663 * @vport: Pointer to virtual port object.
10664 *
10665 * lpfc_sli_host_down is called to clean up the resources
10666 * associated with a vport before destroying virtual
10667 * port data structures.
10668 * This function does following operations:
10669 * - Free discovery resources associated with this virtual
10670 * port.
10671 * - Free iocbs associated with this virtual port in
10672 * the txq.
10673 * - Send abort for all iocb commands associated with this
10674 * vport in txcmplq.
10675 *
10676 * This function is called with no lock held and always returns 1.
10677 **/
92d7f7b0
JS
10678int
10679lpfc_sli_host_down(struct lpfc_vport *vport)
10680{
858c9f6c 10681 LIST_HEAD(completions);
92d7f7b0
JS
10682 struct lpfc_hba *phba = vport->phba;
10683 struct lpfc_sli *psli = &phba->sli;
895427bd 10684 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
10685 struct lpfc_sli_ring *pring;
10686 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
10687 int i;
10688 unsigned long flags = 0;
10689 uint16_t prev_pring_flag;
10690
10691 lpfc_cleanup_discovery_resources(vport);
10692
10693 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 10694
895427bd
JS
10695 /*
10696 * Error everything on the txq since these iocbs
10697 * have not been given to the FW yet.
10698 * Also issue ABTS for everything on the txcmplq
10699 */
10700 if (phba->sli_rev != LPFC_SLI_REV4) {
10701 for (i = 0; i < psli->num_rings; i++) {
10702 pring = &psli->sli3_ring[i];
10703 prev_pring_flag = pring->flag;
10704 /* Only slow rings */
10705 if (pring->ringno == LPFC_ELS_RING) {
10706 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10707 /* Set the lpfc data pending flag */
10708 set_bit(LPFC_DATA_READY, &phba->data_flags);
10709 }
10710 list_for_each_entry_safe(iocb, next_iocb,
10711 &pring->txq, list) {
10712 if (iocb->vport != vport)
10713 continue;
10714 list_move_tail(&iocb->list, &completions);
10715 }
10716 list_for_each_entry_safe(iocb, next_iocb,
10717 &pring->txcmplq, list) {
10718 if (iocb->vport != vport)
10719 continue;
10720 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10721 }
10722 pring->flag = prev_pring_flag;
10723 }
10724 } else {
10725 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10726 pring = qp->pring;
10727 if (!pring)
92d7f7b0 10728 continue;
895427bd
JS
10729 if (pring == phba->sli4_hba.els_wq->pring) {
10730 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10731 /* Set the lpfc data pending flag */
10732 set_bit(LPFC_DATA_READY, &phba->data_flags);
10733 }
10734 prev_pring_flag = pring->flag;
10735 spin_lock_irq(&pring->ring_lock);
10736 list_for_each_entry_safe(iocb, next_iocb,
10737 &pring->txq, list) {
10738 if (iocb->vport != vport)
10739 continue;
10740 list_move_tail(&iocb->list, &completions);
10741 }
10742 spin_unlock_irq(&pring->ring_lock);
10743 list_for_each_entry_safe(iocb, next_iocb,
10744 &pring->txcmplq, list) {
10745 if (iocb->vport != vport)
10746 continue;
10747 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10748 }
10749 pring->flag = prev_pring_flag;
92d7f7b0 10750 }
92d7f7b0 10751 }
92d7f7b0
JS
10752 spin_unlock_irqrestore(&phba->hbalock, flags);
10753
a257bf90
JS
10754 /* Cancel all the IOCBs from the completions list */
10755 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10756 IOERR_SLI_DOWN);
92d7f7b0
JS
10757 return 1;
10758}
10759
e59058c4 10760/**
3621a710 10761 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10762 * @phba: Pointer to HBA context object.
10763 *
10764 * This function cleans up all iocb, buffers, mailbox commands
10765 * while shutting down the HBA. This function is called with no
10766 * lock held and always returns 1.
10767 * This function does the following to cleanup driver resources:
10768 * - Free discovery resources for each virtual port
10769 * - Cleanup any pending fabric iocbs
10770 * - Iterate through the iocb txq and free each entry
10771 * in the list.
10772 * - Free up any buffer posted to the HBA
10773 * - Free mailbox commands in the mailbox queue.
10774 **/
dea3101e 10775int
2e0fef85 10776lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10777{
2534ba75 10778 LIST_HEAD(completions);
2e0fef85 10779 struct lpfc_sli *psli = &phba->sli;
895427bd 10780 struct lpfc_queue *qp = NULL;
dea3101e 10781 struct lpfc_sli_ring *pring;
0ff10d46 10782 struct lpfc_dmabuf *buf_ptr;
dea3101e 10783 unsigned long flags = 0;
04c68496
JS
10784 int i;
10785
10786 /* Shutdown the mailbox command sub-system */
618a5230 10787 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10788
dea3101e 10789 lpfc_hba_down_prep(phba);
10790
523128e5
JS
10791 /* Disable softirqs, including timers from obtaining phba->hbalock */
10792 local_bh_disable();
10793
92d7f7b0
JS
10794 lpfc_fabric_abort_hba(phba);
10795
2e0fef85 10796 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10797
895427bd
JS
10798 /*
10799 * Error everything on the txq since these iocbs
10800 * have not been given to the FW yet.
10801 */
10802 if (phba->sli_rev != LPFC_SLI_REV4) {
10803 for (i = 0; i < psli->num_rings; i++) {
10804 pring = &psli->sli3_ring[i];
10805 /* Only slow rings */
10806 if (pring->ringno == LPFC_ELS_RING) {
10807 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10808 /* Set the lpfc data pending flag */
10809 set_bit(LPFC_DATA_READY, &phba->data_flags);
10810 }
10811 list_splice_init(&pring->txq, &completions);
10812 }
10813 } else {
10814 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10815 pring = qp->pring;
10816 if (!pring)
10817 continue;
4b0a42be 10818 spin_lock(&pring->ring_lock);
895427bd 10819 list_splice_init(&pring->txq, &completions);
4b0a42be 10820 spin_unlock(&pring->ring_lock);
895427bd
JS
10821 if (pring == phba->sli4_hba.els_wq->pring) {
10822 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10823 /* Set the lpfc data pending flag */
10824 set_bit(LPFC_DATA_READY, &phba->data_flags);
10825 }
10826 }
2534ba75 10827 }
2e0fef85 10828 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10829
a257bf90
JS
10830 /* Cancel all the IOCBs from the completions list */
10831 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10832 IOERR_SLI_DOWN);
dea3101e 10833
0ff10d46
JS
10834 spin_lock_irqsave(&phba->hbalock, flags);
10835 list_splice_init(&phba->elsbuf, &completions);
10836 phba->elsbuf_cnt = 0;
10837 phba->elsbuf_prev_cnt = 0;
10838 spin_unlock_irqrestore(&phba->hbalock, flags);
10839
10840 while (!list_empty(&completions)) {
10841 list_remove_head(&completions, buf_ptr,
10842 struct lpfc_dmabuf, list);
10843 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10844 kfree(buf_ptr);
10845 }
10846
523128e5
JS
10847 /* Enable softirqs again, done with phba->hbalock */
10848 local_bh_enable();
10849
dea3101e 10850 /* Return any active mbox cmds */
10851 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10852
da0436e9 10853 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10854 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10855 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10856
da0436e9
JS
10857 return 1;
10858}
10859
e59058c4 10860/**
3621a710 10861 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10862 * @srcp: Source memory pointer.
10863 * @destp: Destination memory pointer.
10864 * @cnt: Number of words required to be copied.
10865 *
10866 * This function is used for copying data between driver memory
10867 * and the SLI memory. This function also changes the endianness
10868 * of each word if native endianness is different from SLI
10869 * endianness. This function can be called with or without
10870 * lock.
10871 **/
dea3101e 10872void
10873lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10874{
10875 uint32_t *src = srcp;
10876 uint32_t *dest = destp;
10877 uint32_t ldata;
10878 int i;
10879
10880 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10881 ldata = *src;
10882 ldata = le32_to_cpu(ldata);
10883 *dest = ldata;
10884 src++;
10885 dest++;
10886 }
10887}
10888
e59058c4 10889
a0c87cbd
JS
10890/**
10891 * lpfc_sli_bemem_bcopy - SLI memory copy function
10892 * @srcp: Source memory pointer.
10893 * @destp: Destination memory pointer.
10894 * @cnt: Number of words required to be copied.
10895 *
10896 * This function is used for copying data between a data structure
10897 * with big endian representation to local endianness.
10898 * This function can be called with or without lock.
10899 **/
10900void
10901lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10902{
10903 uint32_t *src = srcp;
10904 uint32_t *dest = destp;
10905 uint32_t ldata;
10906 int i;
10907
10908 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10909 ldata = *src;
10910 ldata = be32_to_cpu(ldata);
10911 *dest = ldata;
10912 src++;
10913 dest++;
10914 }
10915}
10916
e59058c4 10917/**
3621a710 10918 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
10919 * @phba: Pointer to HBA context object.
10920 * @pring: Pointer to driver SLI ring object.
10921 * @mp: Pointer to driver buffer object.
10922 *
10923 * This function is called with no lock held.
10924 * It always return zero after adding the buffer to the postbufq
10925 * buffer list.
10926 **/
dea3101e 10927int
2e0fef85
JS
10928lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10929 struct lpfc_dmabuf *mp)
dea3101e 10930{
10931 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10932 later */
2e0fef85 10933 spin_lock_irq(&phba->hbalock);
dea3101e 10934 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 10935 pring->postbufq_cnt++;
2e0fef85 10936 spin_unlock_irq(&phba->hbalock);
dea3101e 10937 return 0;
10938}
10939
e59058c4 10940/**
3621a710 10941 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
10942 * @phba: Pointer to HBA context object.
10943 *
10944 * When HBQ is enabled, buffers are searched based on tags. This function
10945 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10946 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10947 * does not conflict with tags of buffer posted for unsolicited events.
10948 * The function returns the allocated tag. The function is called with
10949 * no locks held.
10950 **/
76bb24ef
JS
10951uint32_t
10952lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10953{
10954 spin_lock_irq(&phba->hbalock);
10955 phba->buffer_tag_count++;
10956 /*
10957 * Always set the QUE_BUFTAG_BIT to distiguish between
10958 * a tag assigned by HBQ.
10959 */
10960 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10961 spin_unlock_irq(&phba->hbalock);
10962 return phba->buffer_tag_count;
10963}
10964
e59058c4 10965/**
3621a710 10966 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
10967 * @phba: Pointer to HBA context object.
10968 * @pring: Pointer to driver SLI ring object.
10969 * @tag: Buffer tag.
10970 *
10971 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10972 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10973 * iocb is posted to the response ring with the tag of the buffer.
10974 * This function searches the pring->postbufq list using the tag
10975 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10976 * iocb. If the buffer is found then lpfc_dmabuf object of the
10977 * buffer is returned to the caller else NULL is returned.
10978 * This function is called with no lock held.
10979 **/
76bb24ef
JS
10980struct lpfc_dmabuf *
10981lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10982 uint32_t tag)
10983{
10984 struct lpfc_dmabuf *mp, *next_mp;
10985 struct list_head *slp = &pring->postbufq;
10986
25985edc 10987 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
10988 spin_lock_irq(&phba->hbalock);
10989 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10990 if (mp->buffer_tag == tag) {
10991 list_del_init(&mp->list);
10992 pring->postbufq_cnt--;
10993 spin_unlock_irq(&phba->hbalock);
10994 return mp;
10995 }
10996 }
10997
10998 spin_unlock_irq(&phba->hbalock);
10999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 11000 "0402 Cannot find virtual addr for buffer tag on "
32350664 11001 "ring %d Data x%lx x%px x%px x%x\n",
76bb24ef
JS
11002 pring->ringno, (unsigned long) tag,
11003 slp->next, slp->prev, pring->postbufq_cnt);
11004
11005 return NULL;
11006}
dea3101e 11007
e59058c4 11008/**
3621a710 11009 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
11010 * @phba: Pointer to HBA context object.
11011 * @pring: Pointer to driver SLI ring object.
11012 * @phys: DMA address of the buffer.
11013 *
11014 * This function searches the buffer list using the dma_address
11015 * of unsolicited event to find the driver's lpfc_dmabuf object
11016 * corresponding to the dma_address. The function returns the
11017 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11018 * This function is called by the ct and els unsolicited event
11019 * handlers to get the buffer associated with the unsolicited
11020 * event.
11021 *
11022 * This function is called with no lock held.
11023 **/
dea3101e 11024struct lpfc_dmabuf *
11025lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11026 dma_addr_t phys)
11027{
11028 struct lpfc_dmabuf *mp, *next_mp;
11029 struct list_head *slp = &pring->postbufq;
11030
25985edc 11031 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 11032 spin_lock_irq(&phba->hbalock);
dea3101e 11033 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11034 if (mp->phys == phys) {
11035 list_del_init(&mp->list);
11036 pring->postbufq_cnt--;
2e0fef85 11037 spin_unlock_irq(&phba->hbalock);
dea3101e 11038 return mp;
11039 }
11040 }
11041
2e0fef85 11042 spin_unlock_irq(&phba->hbalock);
dea3101e 11043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 11044 "0410 Cannot find virtual addr for mapped buf on "
32350664 11045 "ring %d Data x%llx x%px x%px x%x\n",
e8b62011 11046 pring->ringno, (unsigned long long)phys,
dea3101e 11047 slp->next, slp->prev, pring->postbufq_cnt);
11048 return NULL;
11049}
11050
e59058c4 11051/**
3621a710 11052 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
11053 * @phba: Pointer to HBA context object.
11054 * @cmdiocb: Pointer to driver command iocb object.
11055 * @rspiocb: Pointer to driver response iocb object.
11056 *
11057 * This function is the completion handler for the abort iocbs for
11058 * ELS commands. This function is called from the ELS ring event
11059 * handler with no lock held. This function frees memory resources
11060 * associated with the abort iocb.
11061 **/
dea3101e 11062static void
2e0fef85
JS
11063lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11064 struct lpfc_iocbq *rspiocb)
dea3101e 11065{
2e0fef85 11066 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 11067 uint16_t abort_iotag, abort_context;
ff78d8f9 11068 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
11069
11070 if (irsp->ulpStatus) {
ff78d8f9
JS
11071
11072 /*
11073 * Assume that the port already completed and returned, or
11074 * will return the iocb. Just Log the message.
11075 */
2680eeaa
JS
11076 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11077 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11078
2e0fef85 11079 spin_lock_irq(&phba->hbalock);
45ed1190 11080 if (phba->sli_rev < LPFC_SLI_REV4) {
faa832e9
JS
11081 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11082 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11083 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11084 spin_unlock_irq(&phba->hbalock);
11085 goto release_iocb;
11086 }
45ed1190
JS
11087 if (abort_iotag != 0 &&
11088 abort_iotag <= phba->sli.last_iotag)
11089 abort_iocb =
11090 phba->sli.iocbq_lookup[abort_iotag];
11091 } else
11092 /* For sli4 the abort_tag is the XRI,
11093 * so the abort routine puts the iotag of the iocb
11094 * being aborted in the context field of the abort
11095 * IOCB.
11096 */
11097 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 11098
2a9bf3d0 11099 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
32350664 11100 "0327 Cannot abort els iocb x%px "
2a9bf3d0
JS
11101 "with tag %x context %x, abort status %x, "
11102 "abort code %x\n",
11103 abort_iocb, abort_iotag, abort_context,
11104 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 11105
ff78d8f9 11106 spin_unlock_irq(&phba->hbalock);
29601228
JS
11107 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11108 irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
11109 lpfc_sli_release_iocbq(phba, abort_iocb);
2680eeaa 11110 }
faa832e9 11111release_iocb:
604a3e30 11112 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 11113 return;
11114}
11115
e59058c4 11116/**
3621a710 11117 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
11118 * @phba: Pointer to HBA context object.
11119 * @cmdiocb: Pointer to driver command iocb object.
11120 * @rspiocb: Pointer to driver response iocb object.
11121 *
11122 * The function is called from SLI ring event handler with no
11123 * lock held. This function is the completion handler for ELS commands
11124 * which are aborted. The function frees memory resources used for
11125 * the aborted ELS commands.
11126 **/
92d7f7b0
JS
11127static void
11128lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11129 struct lpfc_iocbq *rspiocb)
11130{
11131 IOCB_t *irsp = &rspiocb->iocb;
11132
11133 /* ELS cmd tag <ulpIoTag> completes */
11134 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 11135 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 11136 "x%x x%x x%x\n",
e8b62011 11137 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 11138 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
11139 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11140 lpfc_ct_free_iocb(phba, cmdiocb);
11141 else
11142 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
11143 return;
11144}
11145
e59058c4 11146/**
5af5eee7 11147 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
11148 * @phba: Pointer to HBA context object.
11149 * @pring: Pointer to driver SLI ring object.
11150 * @cmdiocb: Pointer to driver command iocb object.
11151 *
5af5eee7
JS
11152 * This function issues an abort iocb for the provided command iocb down to
11153 * the port. Other than the case the outstanding command iocb is an abort
11154 * request, this function issues abort out unconditionally. This function is
11155 * called with hbalock held. The function returns 0 when it fails due to
11156 * memory allocation failure or when the command iocb is an abort request.
e59058c4 11157 **/
5af5eee7
JS
11158static int
11159lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 11160 struct lpfc_iocbq *cmdiocb)
dea3101e 11161{
2e0fef85 11162 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 11163 struct lpfc_iocbq *abtsiocbp;
dea3101e 11164 IOCB_t *icmd = NULL;
11165 IOCB_t *iabt = NULL;
5af5eee7 11166 int retval;
7e56aa25 11167 unsigned long iflags;
faa832e9 11168 struct lpfc_nodelist *ndlp;
07951076 11169
1c2ba475
JT
11170 lockdep_assert_held(&phba->hbalock);
11171
92d7f7b0
JS
11172 /*
11173 * There are certain command types we don't want to abort. And we
11174 * don't want to abort commands that are already in the process of
11175 * being aborted.
07951076
JS
11176 */
11177 icmd = &cmdiocb->iocb;
2e0fef85 11178 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
11179 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11180 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
11181 return 0;
11182
dea3101e 11183 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 11184 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 11185 if (abtsiocbp == NULL)
11186 return 0;
dea3101e 11187
07951076 11188 /* This signals the response to set the correct status
341af102 11189 * before calling the completion handler
07951076
JS
11190 */
11191 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11192
dea3101e 11193 iabt = &abtsiocbp->iocb;
07951076
JS
11194 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11195 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 11196 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 11197 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190 11198 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
faa832e9 11199 } else {
da0436e9 11200 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
faa832e9
JS
11201 if (pring->ringno == LPFC_ELS_RING) {
11202 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11203 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11204 }
11205 }
07951076
JS
11206 iabt->ulpLe = 1;
11207 iabt->ulpClass = icmd->ulpClass;
dea3101e 11208
5ffc266e 11209 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11210 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
11211 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11212 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11213 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11214 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11215
2e0fef85 11216 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
11217 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11218 else
11219 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 11220
07951076 11221 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 11222 abtsiocbp->vport = vport;
5b8bd0c9 11223
e8b62011
JS
11224 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11225 "0339 Abort xri x%x, original iotag x%x, "
11226 "abort cmd iotag x%x\n",
2a9bf3d0 11227 iabt->un.acxri.abortIoTag,
e8b62011 11228 iabt->un.acxri.abortContextTag,
2a9bf3d0 11229 abtsiocbp->iotag);
7e56aa25
JS
11230
11231 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11232 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11233 if (unlikely(pring == NULL))
9bd2bff5 11234 return 0;
7e56aa25
JS
11235 /* Note: both hbalock and ring_lock need to be set here */
11236 spin_lock_irqsave(&pring->ring_lock, iflags);
11237 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11238 abtsiocbp, 0);
11239 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11240 } else {
11241 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11242 abtsiocbp, 0);
11243 }
dea3101e 11244
d7c255b2
JS
11245 if (retval)
11246 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
11247
11248 /*
11249 * Caller to this routine should check for IOCB_ERROR
11250 * and handle it properly. This routine no longer removes
11251 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11252 */
11253 return retval;
11254}
11255
11256/**
11257 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11258 * @phba: Pointer to HBA context object.
11259 * @pring: Pointer to driver SLI ring object.
11260 * @cmdiocb: Pointer to driver command iocb object.
11261 *
11262 * This function issues an abort iocb for the provided command iocb. In case
11263 * of unloading, the abort iocb will not be issued to commands on the ELS
11264 * ring. Instead, the callback function shall be changed to those commands
11265 * so that nothing happens when them finishes. This function is called with
11266 * hbalock held. The function returns 0 when the command iocb is an abort
11267 * request.
11268 **/
11269int
11270lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11271 struct lpfc_iocbq *cmdiocb)
11272{
11273 struct lpfc_vport *vport = cmdiocb->vport;
11274 int retval = IOCB_ERROR;
11275 IOCB_t *icmd = NULL;
11276
1c2ba475
JT
11277 lockdep_assert_held(&phba->hbalock);
11278
5af5eee7
JS
11279 /*
11280 * There are certain command types we don't want to abort. And we
11281 * don't want to abort commands that are already in the process of
11282 * being aborted.
11283 */
11284 icmd = &cmdiocb->iocb;
11285 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11286 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11287 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11288 return 0;
11289
1234a6d5
DK
11290 if (!pring) {
11291 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11292 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11293 else
11294 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11295 goto abort_iotag_exit;
11296 }
11297
5af5eee7
JS
11298 /*
11299 * If we're unloading, don't abort iocb on the ELS ring, but change
11300 * the callback so that nothing happens when it finishes.
11301 */
11302 if ((vport->load_flag & FC_UNLOADING) &&
11303 (pring->ringno == LPFC_ELS_RING)) {
11304 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11305 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11306 else
11307 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11308 goto abort_iotag_exit;
11309 }
11310
11311 /* Now, we try to issue the abort to the cmdiocb out */
11312 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11313
07951076 11314abort_iotag_exit:
2e0fef85
JS
11315 /*
11316 * Caller to this routine should check for IOCB_ERROR
11317 * and handle it properly. This routine no longer removes
11318 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 11319 */
2e0fef85 11320 return retval;
dea3101e 11321}
11322
5af5eee7
JS
11323/**
11324 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11325 * @phba: pointer to lpfc HBA data structure.
11326 *
11327 * This routine will abort all pending and outstanding iocbs to an HBA.
11328 **/
11329void
11330lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11331{
11332 struct lpfc_sli *psli = &phba->sli;
11333 struct lpfc_sli_ring *pring;
895427bd 11334 struct lpfc_queue *qp = NULL;
5af5eee7
JS
11335 int i;
11336
895427bd
JS
11337 if (phba->sli_rev != LPFC_SLI_REV4) {
11338 for (i = 0; i < psli->num_rings; i++) {
11339 pring = &psli->sli3_ring[i];
11340 lpfc_sli_abort_iocb_ring(phba, pring);
11341 }
11342 return;
11343 }
11344 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11345 pring = qp->pring;
11346 if (!pring)
11347 continue;
db55fba8 11348 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
11349 }
11350}
11351
e59058c4 11352/**
3621a710 11353 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
11354 * @iocbq: Pointer to driver iocb object.
11355 * @vport: Pointer to driver virtual port object.
11356 * @tgt_id: SCSI ID of the target.
11357 * @lun_id: LUN ID of the scsi device.
11358 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11359 *
3621a710 11360 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
11361 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11362 * 0 if the filtering criteria is met for the given iocb and will return
11363 * 1 if the filtering criteria is not met.
11364 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11365 * given iocb is for the SCSI device specified by vport, tgt_id and
11366 * lun_id parameter.
11367 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11368 * given iocb is for the SCSI target specified by vport and tgt_id
11369 * parameters.
11370 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11371 * given iocb is for the SCSI host associated with the given vport.
11372 * This function is called with no locks held.
11373 **/
dea3101e 11374static int
51ef4c26
JS
11375lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11376 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 11377 lpfc_ctx_cmd ctx_cmd)
dea3101e 11378{
c490850a 11379 struct lpfc_io_buf *lpfc_cmd;
dea3101e 11380 int rc = 1;
11381
b0e83012 11382 if (iocbq->vport != vport)
0bd4ca25
JSEC
11383 return rc;
11384
b0e83012
JS
11385 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11386 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
51ef4c26
JS
11387 return rc;
11388
c490850a 11389 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
0bd4ca25 11390
495a714c 11391 if (lpfc_cmd->pCmd == NULL)
dea3101e 11392 return rc;
11393
11394 switch (ctx_cmd) {
11395 case LPFC_CTX_LUN:
b0e83012 11396 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c
JS
11397 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11398 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 11399 rc = 0;
11400 break;
11401 case LPFC_CTX_TGT:
b0e83012 11402 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c 11403 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 11404 rc = 0;
11405 break;
dea3101e 11406 case LPFC_CTX_HOST:
11407 rc = 0;
11408 break;
11409 default:
11410 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 11411 __func__, ctx_cmd);
dea3101e 11412 break;
11413 }
11414
11415 return rc;
11416}
11417
e59058c4 11418/**
3621a710 11419 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
11420 * @vport: Pointer to virtual port.
11421 * @tgt_id: SCSI ID of the target.
11422 * @lun_id: LUN ID of the scsi device.
11423 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11424 *
11425 * This function returns number of FCP commands pending for the vport.
11426 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11427 * commands pending on the vport associated with SCSI device specified
11428 * by tgt_id and lun_id parameters.
11429 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11430 * commands pending on the vport associated with SCSI target specified
11431 * by tgt_id parameter.
11432 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11433 * commands pending on the vport.
11434 * This function returns the number of iocbs which satisfy the filter.
11435 * This function is called without any lock held.
11436 **/
dea3101e 11437int
51ef4c26
JS
11438lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11439 lpfc_ctx_cmd ctx_cmd)
dea3101e 11440{
51ef4c26 11441 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11442 struct lpfc_iocbq *iocbq;
11443 int sum, i;
dea3101e 11444
31979008 11445 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
11446 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11447 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11448
51ef4c26
JS
11449 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11450 ctx_cmd) == 0)
0bd4ca25 11451 sum++;
dea3101e 11452 }
31979008 11453 spin_unlock_irq(&phba->hbalock);
0bd4ca25 11454
dea3101e 11455 return sum;
11456}
11457
e59058c4 11458/**
3621a710 11459 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
11460 * @phba: Pointer to HBA context object
11461 * @cmdiocb: Pointer to command iocb object.
11462 * @rspiocb: Pointer to response iocb object.
11463 *
11464 * This function is called when an aborted FCP iocb completes. This
11465 * function is called by the ring event handler with no lock held.
11466 * This function frees the iocb.
11467 **/
5eb95af0 11468void
2e0fef85
JS
11469lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11470 struct lpfc_iocbq *rspiocb)
5eb95af0 11471{
cb69f7de 11472 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 11473 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
11474 "original iotag x%x, abort cmd iotag x%x "
11475 "status 0x%x, reason 0x%x\n",
11476 cmdiocb->iocb.un.acxri.abortContextTag,
11477 cmdiocb->iocb.un.acxri.abortIoTag,
11478 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11479 rspiocb->iocb.un.ulpWord[4]);
604a3e30 11480 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
11481 return;
11482}
11483
e59058c4 11484/**
3621a710 11485 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
11486 * @vport: Pointer to virtual port.
11487 * @pring: Pointer to driver SLI ring object.
11488 * @tgt_id: SCSI ID of the target.
11489 * @lun_id: LUN ID of the scsi device.
11490 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11491 *
11492 * This function sends an abort command for every SCSI command
11493 * associated with the given virtual port pending on the ring
11494 * filtered by lpfc_sli_validate_fcp_iocb function.
11495 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11496 * FCP iocbs associated with lun specified by tgt_id and lun_id
11497 * parameters
11498 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11499 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11500 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11501 * FCP iocbs associated with virtual port.
11502 * This function returns number of iocbs it failed to abort.
11503 * This function is called with no locks held.
11504 **/
dea3101e 11505int
51ef4c26
JS
11506lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11507 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 11508{
51ef4c26 11509 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11510 struct lpfc_iocbq *iocbq;
11511 struct lpfc_iocbq *abtsiocb;
ecbb227e 11512 struct lpfc_sli_ring *pring_s4;
dea3101e 11513 IOCB_t *cmd = NULL;
dea3101e 11514 int errcnt = 0, ret_val = 0;
0bd4ca25 11515 int i;
dea3101e 11516
b0e83012
JS
11517 /* all I/Os are in process of being flushed */
11518 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11519 return errcnt;
11520
0bd4ca25
JSEC
11521 for (i = 1; i <= phba->sli.last_iotag; i++) {
11522 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11523
51ef4c26 11524 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 11525 abort_cmd) != 0)
dea3101e 11526 continue;
11527
afbd8d88
JS
11528 /*
11529 * If the iocbq is already being aborted, don't take a second
11530 * action, but do count it.
11531 */
11532 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11533 continue;
11534
dea3101e 11535 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 11536 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 11537 if (abtsiocb == NULL) {
11538 errcnt++;
11539 continue;
11540 }
dea3101e 11541
afbd8d88
JS
11542 /* indicate the IO is being aborted by the driver. */
11543 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11544
0bd4ca25 11545 cmd = &iocbq->iocb;
dea3101e 11546 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11547 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
11548 if (phba->sli_rev == LPFC_SLI_REV4)
11549 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11550 else
11551 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 11552 abtsiocb->iocb.ulpLe = 1;
11553 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 11554 abtsiocb->vport = vport;
dea3101e 11555
5ffc266e 11556 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11557 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
11558 if (iocbq->iocb_flag & LPFC_IO_FCP)
11559 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11560 if (iocbq->iocb_flag & LPFC_IO_FOF)
11561 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11562
2e0fef85 11563 if (lpfc_is_link_up(phba))
dea3101e 11564 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11565 else
11566 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11567
5eb95af0
JSEC
11568 /* Setup callback routine and issue the command. */
11569 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ecbb227e
JS
11570 if (phba->sli_rev == LPFC_SLI_REV4) {
11571 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11572 if (!pring_s4)
11573 continue;
11574 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11575 abtsiocb, 0);
11576 } else
11577 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11578 abtsiocb, 0);
dea3101e 11579 if (ret_val == IOCB_ERROR) {
604a3e30 11580 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 11581 errcnt++;
11582 continue;
11583 }
11584 }
11585
11586 return errcnt;
11587}
11588
98912dda
JS
11589/**
11590 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11591 * @vport: Pointer to virtual port.
11592 * @pring: Pointer to driver SLI ring object.
11593 * @tgt_id: SCSI ID of the target.
11594 * @lun_id: LUN ID of the scsi device.
11595 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11596 *
11597 * This function sends an abort command for every SCSI command
11598 * associated with the given virtual port pending on the ring
11599 * filtered by lpfc_sli_validate_fcp_iocb function.
11600 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11601 * FCP iocbs associated with lun specified by tgt_id and lun_id
11602 * parameters
11603 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11604 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11605 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11606 * FCP iocbs associated with virtual port.
11607 * This function returns number of iocbs it aborted .
11608 * This function is called with no locks held right after a taskmgmt
11609 * command is sent.
11610 **/
11611int
11612lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11613 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11614{
11615 struct lpfc_hba *phba = vport->phba;
c490850a 11616 struct lpfc_io_buf *lpfc_cmd;
98912dda 11617 struct lpfc_iocbq *abtsiocbq;
8c50d25c 11618 struct lpfc_nodelist *ndlp;
98912dda
JS
11619 struct lpfc_iocbq *iocbq;
11620 IOCB_t *icmd;
11621 int sum, i, ret_val;
11622 unsigned long iflags;
c2017260 11623 struct lpfc_sli_ring *pring_s4 = NULL;
98912dda 11624
59c68eaa 11625 spin_lock_irqsave(&phba->hbalock, iflags);
98912dda
JS
11626
11627 /* all I/Os are in process of being flushed */
11628 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
59c68eaa 11629 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11630 return 0;
11631 }
11632 sum = 0;
11633
11634 for (i = 1; i <= phba->sli.last_iotag; i++) {
11635 iocbq = phba->sli.iocbq_lookup[i];
11636
11637 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11638 cmd) != 0)
11639 continue;
11640
c2017260
JS
11641 /* Guard against IO completion being called at same time */
11642 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11643 spin_lock(&lpfc_cmd->buf_lock);
11644
11645 if (!lpfc_cmd->pCmd) {
11646 spin_unlock(&lpfc_cmd->buf_lock);
11647 continue;
11648 }
11649
11650 if (phba->sli_rev == LPFC_SLI_REV4) {
11651 pring_s4 =
11652 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11653 if (!pring_s4) {
11654 spin_unlock(&lpfc_cmd->buf_lock);
11655 continue;
11656 }
11657 /* Note: both hbalock and ring_lock must be set here */
11658 spin_lock(&pring_s4->ring_lock);
11659 }
11660
98912dda
JS
11661 /*
11662 * If the iocbq is already being aborted, don't take a second
11663 * action, but do count it.
11664 */
c2017260
JS
11665 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11666 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11667 if (phba->sli_rev == LPFC_SLI_REV4)
11668 spin_unlock(&pring_s4->ring_lock);
11669 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11670 continue;
c2017260 11671 }
98912dda
JS
11672
11673 /* issue ABTS for this IOCB based on iotag */
11674 abtsiocbq = __lpfc_sli_get_iocbq(phba);
c2017260
JS
11675 if (!abtsiocbq) {
11676 if (phba->sli_rev == LPFC_SLI_REV4)
11677 spin_unlock(&pring_s4->ring_lock);
11678 spin_unlock(&lpfc_cmd->buf_lock);
98912dda 11679 continue;
c2017260 11680 }
98912dda
JS
11681
11682 icmd = &iocbq->iocb;
11683 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11684 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11685 if (phba->sli_rev == LPFC_SLI_REV4)
11686 abtsiocbq->iocb.un.acxri.abortIoTag =
11687 iocbq->sli4_xritag;
11688 else
11689 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11690 abtsiocbq->iocb.ulpLe = 1;
11691 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11692 abtsiocbq->vport = vport;
11693
11694 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11695 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11696 if (iocbq->iocb_flag & LPFC_IO_FCP)
11697 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11698 if (iocbq->iocb_flag & LPFC_IO_FOF)
11699 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11700
8c50d25c
JS
11701 ndlp = lpfc_cmd->rdata->pnode;
11702
11703 if (lpfc_is_link_up(phba) &&
11704 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11705 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11706 else
11707 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11708
11709 /* Setup callback routine and issue the command. */
11710 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11711
11712 /*
11713 * Indicate the IO is being aborted by the driver and set
11714 * the caller's flag into the aborted IO.
11715 */
11716 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11717
11718 if (phba->sli_rev == LPFC_SLI_REV4) {
98912dda
JS
11719 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11720 abtsiocbq, 0);
59c68eaa 11721 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
11722 } else {
11723 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11724 abtsiocbq, 0);
11725 }
11726
c2017260 11727 spin_unlock(&lpfc_cmd->buf_lock);
98912dda
JS
11728
11729 if (ret_val == IOCB_ERROR)
11730 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11731 else
11732 sum++;
11733 }
59c68eaa 11734 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11735 return sum;
11736}
11737
e59058c4 11738/**
3621a710 11739 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11740 * @phba: Pointer to HBA context object.
11741 * @cmdiocbq: Pointer to command iocb.
11742 * @rspiocbq: Pointer to response iocb.
11743 *
11744 * This function is the completion handler for iocbs issued using
11745 * lpfc_sli_issue_iocb_wait function. This function is called by the
11746 * ring event handler function without any lock held. This function
11747 * can be called from both worker thread context and interrupt
11748 * context. This function also can be called from other thread which
11749 * cleans up the SLI layer objects.
11750 * This function copy the contents of the response iocb to the
11751 * response iocb memory object provided by the caller of
11752 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11753 * sleeps for the iocb completion.
11754 **/
68876920
JSEC
11755static void
11756lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11757 struct lpfc_iocbq *cmdiocbq,
11758 struct lpfc_iocbq *rspiocbq)
dea3101e 11759{
68876920
JSEC
11760 wait_queue_head_t *pdone_q;
11761 unsigned long iflags;
c490850a 11762 struct lpfc_io_buf *lpfc_cmd;
dea3101e 11763
2e0fef85 11764 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11765 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11766
11767 /*
11768 * A time out has occurred for the iocb. If a time out
11769 * completion handler has been supplied, call it. Otherwise,
11770 * just free the iocbq.
11771 */
11772
11773 spin_unlock_irqrestore(&phba->hbalock, iflags);
11774 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11775 cmdiocbq->wait_iocb_cmpl = NULL;
11776 if (cmdiocbq->iocb_cmpl)
11777 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11778 else
11779 lpfc_sli_release_iocbq(phba, cmdiocbq);
11780 return;
11781 }
11782
68876920
JSEC
11783 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11784 if (cmdiocbq->context2 && rspiocbq)
11785 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11786 &rspiocbq->iocb, sizeof(IOCB_t));
11787
0f65ff68
JS
11788 /* Set the exchange busy flag for task management commands */
11789 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11790 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
c490850a 11791 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
0f65ff68
JS
11792 cur_iocbq);
11793 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11794 }
11795
68876920 11796 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11797 if (pdone_q)
11798 wake_up(pdone_q);
858c9f6c 11799 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 11800 return;
11801}
11802
d11e31dd
JS
11803/**
11804 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11805 * @phba: Pointer to HBA context object..
11806 * @piocbq: Pointer to command iocb.
11807 * @flag: Flag to test.
11808 *
11809 * This routine grabs the hbalock and then test the iocb_flag to
11810 * see if the passed in flag is set.
11811 * Returns:
11812 * 1 if flag is set.
11813 * 0 if flag is not set.
11814 **/
11815static int
11816lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11817 struct lpfc_iocbq *piocbq, uint32_t flag)
11818{
11819 unsigned long iflags;
11820 int ret;
11821
11822 spin_lock_irqsave(&phba->hbalock, iflags);
11823 ret = piocbq->iocb_flag & flag;
11824 spin_unlock_irqrestore(&phba->hbalock, iflags);
11825 return ret;
11826
11827}
11828
e59058c4 11829/**
3621a710 11830 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11831 * @phba: Pointer to HBA context object..
11832 * @pring: Pointer to sli ring.
11833 * @piocb: Pointer to command iocb.
11834 * @prspiocbq: Pointer to response iocb.
11835 * @timeout: Timeout in number of seconds.
11836 *
11837 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11838 * iocb to complete. The iocb_cmpl field of the shall be used
11839 * to handle iocbs which time out. If the field is NULL, the
11840 * function shall free the iocbq structure. If more clean up is
11841 * needed, the caller is expected to provide a completion function
11842 * that will provide the needed clean up. If the iocb command is
11843 * not completed within timeout seconds, the function will either
11844 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11845 * completion function set in the iocb_cmpl field and then return
11846 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11847 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11848 * The function waits for the iocb completion using an
11849 * non-interruptible wait.
11850 * This function will sleep while waiting for iocb completion.
11851 * So, this function should not be called from any context which
11852 * does not allow sleeping. Due to the same reason, this function
11853 * cannot be called with interrupt disabled.
11854 * This function assumes that the iocb completions occur while
11855 * this function sleep. So, this function cannot be called from
11856 * the thread which process iocb completion for this ring.
11857 * This function clears the iocb_flag of the iocb object before
11858 * issuing the iocb and the iocb completion handler sets this
11859 * flag and wakes this thread when the iocb completes.
11860 * The contents of the response iocb will be copied to prspiocbq
11861 * by the completion handler when the command completes.
11862 * This function returns IOCB_SUCCESS when success.
11863 * This function is called with no lock held.
11864 **/
dea3101e 11865int
2e0fef85 11866lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11867 uint32_t ring_number,
2e0fef85
JS
11868 struct lpfc_iocbq *piocb,
11869 struct lpfc_iocbq *prspiocbq,
68876920 11870 uint32_t timeout)
dea3101e 11871{
7259f0d0 11872 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
11873 long timeleft, timeout_req = 0;
11874 int retval = IOCB_SUCCESS;
875fbdfe 11875 uint32_t creg_val;
0e9bb8d7
JS
11876 struct lpfc_iocbq *iocb;
11877 int txq_cnt = 0;
11878 int txcmplq_cnt = 0;
895427bd 11879 struct lpfc_sli_ring *pring;
5a0916b4
JS
11880 unsigned long iflags;
11881 bool iocb_completed = true;
11882
895427bd
JS
11883 if (phba->sli_rev >= LPFC_SLI_REV4)
11884 pring = lpfc_sli4_calc_ring(phba, piocb);
11885 else
11886 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 11887 /*
68876920
JSEC
11888 * If the caller has provided a response iocbq buffer, then context2
11889 * is NULL or its an error.
dea3101e 11890 */
68876920
JSEC
11891 if (prspiocbq) {
11892 if (piocb->context2)
11893 return IOCB_ERROR;
11894 piocb->context2 = prspiocbq;
dea3101e 11895 }
11896
5a0916b4 11897 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
11898 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11899 piocb->context_un.wait_queue = &done_q;
5a0916b4 11900 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 11901
875fbdfe 11902 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11903 if (lpfc_readl(phba->HCregaddr, &creg_val))
11904 return IOCB_ERROR;
875fbdfe
JSEC
11905 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11906 writel(creg_val, phba->HCregaddr);
11907 readl(phba->HCregaddr); /* flush */
11908 }
11909
2a9bf3d0
JS
11910 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11911 SLI_IOCB_RET_IOCB);
68876920 11912 if (retval == IOCB_SUCCESS) {
256ec0d0 11913 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 11914 timeleft = wait_event_timeout(done_q,
d11e31dd 11915 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 11916 timeout_req);
5a0916b4
JS
11917 spin_lock_irqsave(&phba->hbalock, iflags);
11918 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11919
11920 /*
11921 * IOCB timed out. Inform the wake iocb wait
11922 * completion function and set local status
11923 */
dea3101e 11924
5a0916b4
JS
11925 iocb_completed = false;
11926 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11927 }
11928 spin_unlock_irqrestore(&phba->hbalock, iflags);
11929 if (iocb_completed) {
7054a606 11930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 11931 "0331 IOCB wake signaled\n");
53151bbb
JS
11932 /* Note: we are not indicating if the IOCB has a success
11933 * status or not - that's for the caller to check.
11934 * IOCB_SUCCESS means just that the command was sent and
11935 * completed. Not that it completed successfully.
11936 * */
7054a606 11937 } else if (timeleft == 0) {
68876920 11938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11939 "0338 IOCB wait timeout error - no "
11940 "wake response Data x%x\n", timeout);
68876920 11941 retval = IOCB_TIMEDOUT;
7054a606 11942 } else {
68876920 11943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11944 "0330 IOCB wake NOT set, "
11945 "Data x%x x%lx\n",
68876920
JSEC
11946 timeout, (timeleft / jiffies));
11947 retval = IOCB_TIMEDOUT;
dea3101e 11948 }
2a9bf3d0 11949 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
11950 if (phba->cfg_log_verbose & LOG_SLI) {
11951 list_for_each_entry(iocb, &pring->txq, list) {
11952 txq_cnt++;
11953 }
11954 list_for_each_entry(iocb, &pring->txcmplq, list) {
11955 txcmplq_cnt++;
11956 }
11957 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11958 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11959 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11960 }
2a9bf3d0 11961 return retval;
68876920
JSEC
11962 } else {
11963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 11964 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 11965 retval);
68876920 11966 retval = IOCB_ERROR;
dea3101e 11967 }
11968
875fbdfe 11969 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11970 if (lpfc_readl(phba->HCregaddr, &creg_val))
11971 return IOCB_ERROR;
875fbdfe
JSEC
11972 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11973 writel(creg_val, phba->HCregaddr);
11974 readl(phba->HCregaddr); /* flush */
11975 }
11976
68876920
JSEC
11977 if (prspiocbq)
11978 piocb->context2 = NULL;
11979
11980 piocb->context_un.wait_queue = NULL;
11981 piocb->iocb_cmpl = NULL;
dea3101e 11982 return retval;
11983}
68876920 11984
e59058c4 11985/**
3621a710 11986 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
11987 * @phba: Pointer to HBA context object.
11988 * @pmboxq: Pointer to driver mailbox object.
11989 * @timeout: Timeout in number of seconds.
11990 *
11991 * This function issues the mailbox to firmware and waits for the
11992 * mailbox command to complete. If the mailbox command is not
11993 * completed within timeout seconds, it returns MBX_TIMEOUT.
11994 * The function waits for the mailbox completion using an
11995 * interruptible wait. If the thread is woken up due to a
11996 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11997 * should not free the mailbox resources, if this function returns
11998 * MBX_TIMEOUT.
11999 * This function will sleep while waiting for mailbox completion.
12000 * So, this function should not be called from any context which
12001 * does not allow sleeping. Due to the same reason, this function
12002 * cannot be called with interrupt disabled.
12003 * This function assumes that the mailbox completion occurs while
12004 * this function sleep. So, this function cannot be called from
12005 * the worker thread which processes mailbox completion.
12006 * This function is called in the context of HBA management
12007 * applications.
12008 * This function returns MBX_SUCCESS when successful.
12009 * This function is called with no lock held.
12010 **/
dea3101e 12011int
2e0fef85 12012lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 12013 uint32_t timeout)
12014{
e29d74f8 12015 struct completion mbox_done;
dea3101e 12016 int retval;
858c9f6c 12017 unsigned long flag;
dea3101e 12018
495a714c 12019 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 12020 /* setup wake call as IOCB callback */
12021 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea3101e 12022
e29d74f8
JS
12023 /* setup context3 field to pass wait_queue pointer to wake function */
12024 init_completion(&mbox_done);
12025 pmboxq->context3 = &mbox_done;
dea3101e 12026 /* now issue the command */
12027 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 12028 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
e29d74f8
JS
12029 wait_for_completion_timeout(&mbox_done,
12030 msecs_to_jiffies(timeout * 1000));
7054a606 12031
858c9f6c 12032 spin_lock_irqsave(&phba->hbalock, flag);
e29d74f8 12033 pmboxq->context3 = NULL;
7054a606
JS
12034 /*
12035 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12036 * else do not free the resources.
12037 */
d7c47992 12038 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 12039 retval = MBX_SUCCESS;
d7c47992 12040 } else {
7054a606 12041 retval = MBX_TIMEOUT;
858c9f6c
JS
12042 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12043 }
12044 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 12045 }
dea3101e 12046 return retval;
12047}
12048
e59058c4 12049/**
3772a991 12050 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
12051 * @phba: Pointer to HBA context.
12052 *
3772a991
JS
12053 * This function is called to shutdown the driver's mailbox sub-system.
12054 * It first marks the mailbox sub-system is in a block state to prevent
12055 * the asynchronous mailbox command from issued off the pending mailbox
12056 * command queue. If the mailbox command sub-system shutdown is due to
12057 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12058 * the mailbox sub-system flush routine to forcefully bring down the
12059 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12060 * as with offline or HBA function reset), this routine will wait for the
12061 * outstanding mailbox command to complete before invoking the mailbox
12062 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 12063 **/
3772a991 12064void
618a5230 12065lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 12066{
3772a991 12067 struct lpfc_sli *psli = &phba->sli;
3772a991 12068 unsigned long timeout;
b4c02652 12069
618a5230
JS
12070 if (mbx_action == LPFC_MBX_NO_WAIT) {
12071 /* delay 100ms for port state */
12072 msleep(100);
12073 lpfc_sli_mbox_sys_flush(phba);
12074 return;
12075 }
a183a15f 12076 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 12077
523128e5
JS
12078 /* Disable softirqs, including timers from obtaining phba->hbalock */
12079 local_bh_disable();
12080
3772a991
JS
12081 spin_lock_irq(&phba->hbalock);
12082 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 12083
3772a991 12084 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
12085 /* Determine how long we might wait for the active mailbox
12086 * command to be gracefully completed by firmware.
12087 */
a183a15f
JS
12088 if (phba->sli.mbox_active)
12089 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12090 phba->sli.mbox_active) *
12091 1000) + jiffies;
12092 spin_unlock_irq(&phba->hbalock);
12093
523128e5
JS
12094 /* Enable softirqs again, done with phba->hbalock */
12095 local_bh_enable();
12096
3772a991
JS
12097 while (phba->sli.mbox_active) {
12098 /* Check active mailbox complete status every 2ms */
12099 msleep(2);
12100 if (time_after(jiffies, timeout))
12101 /* Timeout, let the mailbox flush routine to
12102 * forcefully release active mailbox command
12103 */
12104 break;
12105 }
523128e5 12106 } else {
d7069f09
JS
12107 spin_unlock_irq(&phba->hbalock);
12108
523128e5
JS
12109 /* Enable softirqs again, done with phba->hbalock */
12110 local_bh_enable();
12111 }
12112
3772a991
JS
12113 lpfc_sli_mbox_sys_flush(phba);
12114}
ed957684 12115
3772a991
JS
12116/**
12117 * lpfc_sli_eratt_read - read sli-3 error attention events
12118 * @phba: Pointer to HBA context.
12119 *
12120 * This function is called to read the SLI3 device error attention registers
12121 * for possible error attention events. The caller must hold the hostlock
12122 * with spin_lock_irq().
12123 *
25985edc 12124 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
12125 * Register and returns 0 otherwise.
12126 **/
12127static int
12128lpfc_sli_eratt_read(struct lpfc_hba *phba)
12129{
12130 uint32_t ha_copy;
b4c02652 12131
3772a991 12132 /* Read chip Host Attention (HA) register */
9940b97b
JS
12133 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12134 goto unplug_err;
12135
3772a991
JS
12136 if (ha_copy & HA_ERATT) {
12137 /* Read host status register to retrieve error event */
9940b97b
JS
12138 if (lpfc_sli_read_hs(phba))
12139 goto unplug_err;
b4c02652 12140
3772a991
JS
12141 /* Check if there is a deferred error condition is active */
12142 if ((HS_FFER1 & phba->work_hs) &&
12143 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 12144 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 12145 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
12146 /* Clear all interrupt enable conditions */
12147 writel(0, phba->HCregaddr);
12148 readl(phba->HCregaddr);
12149 }
12150
12151 /* Set the driver HA work bitmap */
3772a991
JS
12152 phba->work_ha |= HA_ERATT;
12153 /* Indicate polling handles this ERATT */
12154 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
12155 return 1;
12156 }
12157 return 0;
9940b97b
JS
12158
12159unplug_err:
12160 /* Set the driver HS work bitmap */
12161 phba->work_hs |= UNPLUG_ERR;
12162 /* Set the driver HA work bitmap */
12163 phba->work_ha |= HA_ERATT;
12164 /* Indicate polling handles this ERATT */
12165 phba->hba_flag |= HBA_ERATT_HANDLED;
12166 return 1;
b4c02652
JS
12167}
12168
da0436e9
JS
12169/**
12170 * lpfc_sli4_eratt_read - read sli-4 error attention events
12171 * @phba: Pointer to HBA context.
12172 *
12173 * This function is called to read the SLI4 device error attention registers
12174 * for possible error attention events. The caller must hold the hostlock
12175 * with spin_lock_irq().
12176 *
25985edc 12177 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
12178 * Register and returns 0 otherwise.
12179 **/
12180static int
12181lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12182{
12183 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
12184 uint32_t if_type, portsmphr;
12185 struct lpfc_register portstat_reg;
da0436e9 12186
2fcee4bf
JS
12187 /*
12188 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
12189 * registers for error attention. This can be changed later.
12190 */
2fcee4bf
JS
12191 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12192 switch (if_type) {
12193 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
12194 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12195 &uerr_sta_lo) ||
12196 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12197 &uerr_sta_hi)) {
12198 phba->work_hs |= UNPLUG_ERR;
12199 phba->work_ha |= HA_ERATT;
12200 phba->hba_flag |= HBA_ERATT_HANDLED;
12201 return 1;
12202 }
2fcee4bf
JS
12203 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12204 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12206 "1423 HBA Unrecoverable error: "
12207 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12208 "ue_mask_lo_reg=0x%x, "
12209 "ue_mask_hi_reg=0x%x\n",
12210 uerr_sta_lo, uerr_sta_hi,
12211 phba->sli4_hba.ue_mask_lo,
12212 phba->sli4_hba.ue_mask_hi);
12213 phba->work_status[0] = uerr_sta_lo;
12214 phba->work_status[1] = uerr_sta_hi;
12215 phba->work_ha |= HA_ERATT;
12216 phba->hba_flag |= HBA_ERATT_HANDLED;
12217 return 1;
12218 }
12219 break;
12220 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 12221 case LPFC_SLI_INTF_IF_TYPE_6:
9940b97b
JS
12222 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12223 &portstat_reg.word0) ||
12224 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12225 &portsmphr)){
12226 phba->work_hs |= UNPLUG_ERR;
12227 phba->work_ha |= HA_ERATT;
12228 phba->hba_flag |= HBA_ERATT_HANDLED;
12229 return 1;
12230 }
2fcee4bf
JS
12231 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12232 phba->work_status[0] =
12233 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12234 phba->work_status[1] =
12235 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 12237 "2885 Port Status Event: "
2fcee4bf
JS
12238 "port status reg 0x%x, "
12239 "port smphr reg 0x%x, "
12240 "error 1=0x%x, error 2=0x%x\n",
12241 portstat_reg.word0,
12242 portsmphr,
12243 phba->work_status[0],
12244 phba->work_status[1]);
12245 phba->work_ha |= HA_ERATT;
12246 phba->hba_flag |= HBA_ERATT_HANDLED;
12247 return 1;
12248 }
12249 break;
12250 case LPFC_SLI_INTF_IF_TYPE_1:
12251 default:
a747c9ce 12252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
12253 "2886 HBA Error Attention on unsupported "
12254 "if type %d.", if_type);
a747c9ce 12255 return 1;
da0436e9 12256 }
2fcee4bf 12257
da0436e9
JS
12258 return 0;
12259}
12260
e59058c4 12261/**
3621a710 12262 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
12263 * @phba: Pointer to HBA context.
12264 *
3772a991 12265 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
12266 * error attention register bit for error attention events.
12267 *
25985edc 12268 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
12269 * Register and returns 0 otherwise.
12270 **/
12271int
12272lpfc_sli_check_eratt(struct lpfc_hba *phba)
12273{
12274 uint32_t ha_copy;
12275
12276 /* If somebody is waiting to handle an eratt, don't process it
12277 * here. The brdkill function will do this.
12278 */
12279 if (phba->link_flag & LS_IGNORE_ERATT)
12280 return 0;
12281
12282 /* Check if interrupt handler handles this ERATT */
12283 spin_lock_irq(&phba->hbalock);
12284 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12285 /* Interrupt handler has handled ERATT */
12286 spin_unlock_irq(&phba->hbalock);
12287 return 0;
12288 }
12289
a257bf90
JS
12290 /*
12291 * If there is deferred error attention, do not check for error
12292 * attention
12293 */
12294 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12295 spin_unlock_irq(&phba->hbalock);
12296 return 0;
12297 }
12298
3772a991
JS
12299 /* If PCI channel is offline, don't process it */
12300 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 12301 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12302 return 0;
12303 }
12304
12305 switch (phba->sli_rev) {
12306 case LPFC_SLI_REV2:
12307 case LPFC_SLI_REV3:
12308 /* Read chip Host Attention (HA) register */
12309 ha_copy = lpfc_sli_eratt_read(phba);
12310 break;
da0436e9 12311 case LPFC_SLI_REV4:
2fcee4bf 12312 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
12313 ha_copy = lpfc_sli4_eratt_read(phba);
12314 break;
3772a991
JS
12315 default:
12316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12317 "0299 Invalid SLI revision (%d)\n",
12318 phba->sli_rev);
12319 ha_copy = 0;
12320 break;
9399627f
JS
12321 }
12322 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12323
12324 return ha_copy;
12325}
12326
12327/**
12328 * lpfc_intr_state_check - Check device state for interrupt handling
12329 * @phba: Pointer to HBA context.
12330 *
12331 * This inline routine checks whether a device or its PCI slot is in a state
12332 * that the interrupt should be handled.
12333 *
12334 * This function returns 0 if the device or the PCI slot is in a state that
12335 * interrupt should be handled, otherwise -EIO.
12336 */
12337static inline int
12338lpfc_intr_state_check(struct lpfc_hba *phba)
12339{
12340 /* If the pci channel is offline, ignore all the interrupts */
12341 if (unlikely(pci_channel_offline(phba->pcidev)))
12342 return -EIO;
12343
12344 /* Update device level interrupt statistics */
12345 phba->sli.slistat.sli_intr++;
12346
12347 /* Ignore all interrupts during initialization. */
12348 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12349 return -EIO;
12350
9399627f
JS
12351 return 0;
12352}
12353
12354/**
3772a991 12355 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
12356 * @irq: Interrupt number.
12357 * @dev_id: The device context pointer.
12358 *
9399627f 12359 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12360 * service routine when device with SLI-3 interface spec is enabled with
12361 * MSI-X multi-message interrupt mode and there are slow-path events in
12362 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12363 * interrupt mode, this function is called as part of the device-level
12364 * interrupt handler. When the PCI slot is in error recovery or the HBA
12365 * is undergoing initialization, the interrupt handler will not process
12366 * the interrupt. The link attention and ELS ring attention events are
12367 * handled by the worker thread. The interrupt handler signals the worker
12368 * thread and returns for these events. This function is called without
12369 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
12370 * structures.
12371 *
12372 * This function returns IRQ_HANDLED when interrupt is handled else it
12373 * returns IRQ_NONE.
e59058c4 12374 **/
dea3101e 12375irqreturn_t
3772a991 12376lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 12377{
2e0fef85 12378 struct lpfc_hba *phba;
a747c9ce 12379 uint32_t ha_copy, hc_copy;
dea3101e 12380 uint32_t work_ha_copy;
12381 unsigned long status;
5b75da2f 12382 unsigned long iflag;
dea3101e 12383 uint32_t control;
12384
92d7f7b0 12385 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
12386 struct lpfc_vport *vport;
12387 struct lpfc_nodelist *ndlp;
12388 struct lpfc_dmabuf *mp;
92d7f7b0
JS
12389 LPFC_MBOXQ_t *pmb;
12390 int rc;
12391
dea3101e 12392 /*
12393 * Get the driver's phba structure from the dev_id and
12394 * assume the HBA is not interrupting.
12395 */
9399627f 12396 phba = (struct lpfc_hba *)dev_id;
dea3101e 12397
12398 if (unlikely(!phba))
12399 return IRQ_NONE;
12400
dea3101e 12401 /*
9399627f
JS
12402 * Stuff needs to be attented to when this function is invoked as an
12403 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 12404 */
9399627f 12405 if (phba->intr_type == MSIX) {
3772a991
JS
12406 /* Check device state for handling interrupt */
12407 if (lpfc_intr_state_check(phba))
9399627f
JS
12408 return IRQ_NONE;
12409 /* Need to read HA REG for slow-path events */
5b75da2f 12410 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12411 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12412 goto unplug_error;
9399627f
JS
12413 /* If somebody is waiting to handle an eratt don't process it
12414 * here. The brdkill function will do this.
12415 */
12416 if (phba->link_flag & LS_IGNORE_ERATT)
12417 ha_copy &= ~HA_ERATT;
12418 /* Check the need for handling ERATT in interrupt handler */
12419 if (ha_copy & HA_ERATT) {
12420 if (phba->hba_flag & HBA_ERATT_HANDLED)
12421 /* ERATT polling has handled ERATT */
12422 ha_copy &= ~HA_ERATT;
12423 else
12424 /* Indicate interrupt handler handles ERATT */
12425 phba->hba_flag |= HBA_ERATT_HANDLED;
12426 }
a257bf90
JS
12427
12428 /*
12429 * If there is deferred error attention, do not check for any
12430 * interrupt.
12431 */
12432 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12433 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12434 return IRQ_NONE;
12435 }
12436
9399627f 12437 /* Clear up only attention source related to slow-path */
9940b97b
JS
12438 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12439 goto unplug_error;
12440
a747c9ce
JS
12441 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12442 HC_LAINT_ENA | HC_ERINT_ENA),
12443 phba->HCregaddr);
9399627f
JS
12444 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12445 phba->HAregaddr);
a747c9ce 12446 writel(hc_copy, phba->HCregaddr);
9399627f 12447 readl(phba->HAregaddr); /* flush */
5b75da2f 12448 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12449 } else
12450 ha_copy = phba->ha_copy;
dea3101e 12451
dea3101e 12452 work_ha_copy = ha_copy & phba->work_ha_mask;
12453
9399627f 12454 if (work_ha_copy) {
dea3101e 12455 if (work_ha_copy & HA_LATT) {
12456 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12457 /*
12458 * Turn off Link Attention interrupts
12459 * until CLEAR_LA done
12460 */
5b75da2f 12461 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12462 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
12463 if (lpfc_readl(phba->HCregaddr, &control))
12464 goto unplug_error;
dea3101e 12465 control &= ~HC_LAINT_ENA;
12466 writel(control, phba->HCregaddr);
12467 readl(phba->HCregaddr); /* flush */
5b75da2f 12468 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12469 }
12470 else
12471 work_ha_copy &= ~HA_LATT;
12472 }
12473
9399627f 12474 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
12475 /*
12476 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12477 * the only slow ring.
12478 */
12479 status = (work_ha_copy &
12480 (HA_RXMASK << (4*LPFC_ELS_RING)));
12481 status >>= (4*LPFC_ELS_RING);
12482 if (status & HA_RXMASK) {
5b75da2f 12483 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12484 if (lpfc_readl(phba->HCregaddr, &control))
12485 goto unplug_error;
a58cbd52
JS
12486
12487 lpfc_debugfs_slow_ring_trc(phba,
12488 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12489 control, status,
12490 (uint32_t)phba->sli.slistat.sli_intr);
12491
858c9f6c 12492 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
12493 lpfc_debugfs_slow_ring_trc(phba,
12494 "ISR Disable ring:"
12495 "pwork:x%x hawork:x%x wait:x%x",
12496 phba->work_ha, work_ha_copy,
12497 (uint32_t)((unsigned long)
5e9d9b82 12498 &phba->work_waitq));
a58cbd52 12499
858c9f6c
JS
12500 control &=
12501 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 12502 writel(control, phba->HCregaddr);
12503 readl(phba->HCregaddr); /* flush */
dea3101e 12504 }
a58cbd52
JS
12505 else {
12506 lpfc_debugfs_slow_ring_trc(phba,
12507 "ISR slow ring: pwork:"
12508 "x%x hawork:x%x wait:x%x",
12509 phba->work_ha, work_ha_copy,
12510 (uint32_t)((unsigned long)
5e9d9b82 12511 &phba->work_waitq));
a58cbd52 12512 }
5b75da2f 12513 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12514 }
12515 }
5b75da2f 12516 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 12517 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
12518 if (lpfc_sli_read_hs(phba))
12519 goto unplug_error;
a257bf90
JS
12520 /*
12521 * Check if there is a deferred error condition
12522 * is active
12523 */
12524 if ((HS_FFER1 & phba->work_hs) &&
12525 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
12526 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12527 phba->work_hs)) {
a257bf90
JS
12528 phba->hba_flag |= DEFER_ERATT;
12529 /* Clear all interrupt enable conditions */
12530 writel(0, phba->HCregaddr);
12531 readl(phba->HCregaddr);
12532 }
12533 }
12534
9399627f 12535 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 12536 pmb = phba->sli.mbox_active;
04c68496 12537 pmbox = &pmb->u.mb;
34b02dcd 12538 mbox = phba->mbox;
858c9f6c 12539 vport = pmb->vport;
92d7f7b0
JS
12540
12541 /* First check out the status word */
12542 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12543 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 12544 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
12545 /*
12546 * Stray Mailbox Interrupt, mbxCommand <cmd>
12547 * mbxStatus <status>
12548 */
09372820 12549 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 12550 LOG_SLI,
e8b62011 12551 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
12552 "Interrupt mbxCommand x%x "
12553 "mbxStatus x%x\n",
e8b62011 12554 (vport ? vport->vpi : 0),
92d7f7b0
JS
12555 pmbox->mbxCommand,
12556 pmbox->mbxStatus);
09372820
JS
12557 /* clear mailbox attention bit */
12558 work_ha_copy &= ~HA_MBATT;
12559 } else {
97eab634 12560 phba->sli.mbox_active = NULL;
5b75da2f 12561 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
12562 phba->last_completion_time = jiffies;
12563 del_timer(&phba->sli.mbox_tmo);
09372820
JS
12564 if (pmb->mbox_cmpl) {
12565 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12566 MAILBOX_CMD_SIZE);
7a470277 12567 if (pmb->out_ext_byte_len &&
3e1f0718 12568 pmb->ctx_buf)
7a470277
JS
12569 lpfc_sli_pcimem_bcopy(
12570 phba->mbox_ext,
3e1f0718 12571 pmb->ctx_buf,
7a470277 12572 pmb->out_ext_byte_len);
09372820
JS
12573 }
12574 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12575 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12576
12577 lpfc_debugfs_disc_trc(vport,
12578 LPFC_DISC_TRC_MBOX_VPORT,
12579 "MBOX dflt rpi: : "
12580 "status:x%x rpi:x%x",
12581 (uint32_t)pmbox->mbxStatus,
12582 pmbox->un.varWords[0], 0);
12583
12584 if (!pmbox->mbxStatus) {
12585 mp = (struct lpfc_dmabuf *)
3e1f0718 12586 (pmb->ctx_buf);
09372820 12587 ndlp = (struct lpfc_nodelist *)
3e1f0718 12588 pmb->ctx_ndlp;
09372820
JS
12589
12590 /* Reg_LOGIN of dflt RPI was
12591 * successful. new lets get
12592 * rid of the RPI using the
12593 * same mbox buffer.
12594 */
12595 lpfc_unreg_login(phba,
12596 vport->vpi,
12597 pmbox->un.varWords[0],
12598 pmb);
12599 pmb->mbox_cmpl =
12600 lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
12601 pmb->ctx_buf = mp;
12602 pmb->ctx_ndlp = ndlp;
09372820 12603 pmb->vport = vport;
58da1ffb
JS
12604 rc = lpfc_sli_issue_mbox(phba,
12605 pmb,
12606 MBX_NOWAIT);
12607 if (rc != MBX_BUSY)
12608 lpfc_printf_log(phba,
12609 KERN_ERR,
12610 LOG_MBOX | LOG_SLI,
d7c255b2 12611 "0350 rc should have"
6a9c52cf 12612 "been MBX_BUSY\n");
3772a991
JS
12613 if (rc != MBX_NOT_FINISHED)
12614 goto send_current_mbox;
09372820 12615 }
858c9f6c 12616 }
5b75da2f
JS
12617 spin_lock_irqsave(
12618 &phba->pport->work_port_lock,
12619 iflag);
09372820
JS
12620 phba->pport->work_port_events &=
12621 ~WORKER_MBOX_TMO;
5b75da2f
JS
12622 spin_unlock_irqrestore(
12623 &phba->pport->work_port_lock,
12624 iflag);
09372820 12625 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 12626 }
97eab634 12627 } else
5b75da2f 12628 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 12629
92d7f7b0
JS
12630 if ((work_ha_copy & HA_MBATT) &&
12631 (phba->sli.mbox_active == NULL)) {
858c9f6c 12632send_current_mbox:
92d7f7b0 12633 /* Process next mailbox command if there is one */
58da1ffb
JS
12634 do {
12635 rc = lpfc_sli_issue_mbox(phba, NULL,
12636 MBX_NOWAIT);
12637 } while (rc == MBX_NOT_FINISHED);
12638 if (rc != MBX_SUCCESS)
12639 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12640 LOG_SLI, "0349 rc should be "
6a9c52cf 12641 "MBX_SUCCESS\n");
92d7f7b0
JS
12642 }
12643
5b75da2f 12644 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12645 phba->work_ha |= work_ha_copy;
5b75da2f 12646 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 12647 lpfc_worker_wake_up(phba);
dea3101e 12648 }
9399627f 12649 return IRQ_HANDLED;
9940b97b
JS
12650unplug_error:
12651 spin_unlock_irqrestore(&phba->hbalock, iflag);
12652 return IRQ_HANDLED;
dea3101e 12653
3772a991 12654} /* lpfc_sli_sp_intr_handler */
9399627f
JS
12655
12656/**
3772a991 12657 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
12658 * @irq: Interrupt number.
12659 * @dev_id: The device context pointer.
12660 *
12661 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12662 * service routine when device with SLI-3 interface spec is enabled with
12663 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12664 * ring event in the HBA. However, when the device is enabled with either
12665 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12666 * device-level interrupt handler. When the PCI slot is in error recovery
12667 * or the HBA is undergoing initialization, the interrupt handler will not
12668 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12669 * the intrrupt context. This function is called without any lock held.
12670 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
12671 *
12672 * This function returns IRQ_HANDLED when interrupt is handled else it
12673 * returns IRQ_NONE.
12674 **/
12675irqreturn_t
3772a991 12676lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12677{
12678 struct lpfc_hba *phba;
12679 uint32_t ha_copy;
12680 unsigned long status;
5b75da2f 12681 unsigned long iflag;
895427bd 12682 struct lpfc_sli_ring *pring;
9399627f
JS
12683
12684 /* Get the driver's phba structure from the dev_id and
12685 * assume the HBA is not interrupting.
12686 */
12687 phba = (struct lpfc_hba *) dev_id;
12688
12689 if (unlikely(!phba))
12690 return IRQ_NONE;
12691
12692 /*
12693 * Stuff needs to be attented to when this function is invoked as an
12694 * individual interrupt handler in MSI-X multi-message interrupt mode
12695 */
12696 if (phba->intr_type == MSIX) {
3772a991
JS
12697 /* Check device state for handling interrupt */
12698 if (lpfc_intr_state_check(phba))
9399627f
JS
12699 return IRQ_NONE;
12700 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12701 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12702 return IRQ_HANDLED;
9399627f 12703 /* Clear up only attention source related to fast-path */
5b75da2f 12704 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12705 /*
12706 * If there is deferred error attention, do not check for
12707 * any interrupt.
12708 */
12709 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12710 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12711 return IRQ_NONE;
12712 }
9399627f
JS
12713 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12714 phba->HAregaddr);
12715 readl(phba->HAregaddr); /* flush */
5b75da2f 12716 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12717 } else
12718 ha_copy = phba->ha_copy;
dea3101e 12719
12720 /*
9399627f 12721 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12722 */
9399627f
JS
12723 ha_copy &= ~(phba->work_ha_mask);
12724
12725 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12726 status >>= (4*LPFC_FCP_RING);
895427bd 12727 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12728 if (status & HA_RXMASK)
895427bd 12729 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12730
12731 if (phba->cfg_multi_ring_support == 2) {
12732 /*
9399627f
JS
12733 * Process all events on extra ring. Take the optimized path
12734 * for extra ring IO.
a4bc3379 12735 */
9399627f 12736 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12737 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12738 if (status & HA_RXMASK) {
a4bc3379 12739 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12740 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12741 status);
12742 }
12743 }
dea3101e 12744 return IRQ_HANDLED;
3772a991 12745} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12746
12747/**
3772a991 12748 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12749 * @irq: Interrupt number.
12750 * @dev_id: The device context pointer.
12751 *
3772a991
JS
12752 * This function is the HBA device-level interrupt handler to device with
12753 * SLI-3 interface spec, called from the PCI layer when either MSI or
12754 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12755 * requires driver attention. This function invokes the slow-path interrupt
12756 * attention handling function and fast-path interrupt attention handling
12757 * function in turn to process the relevant HBA attention events. This
12758 * function is called without any lock held. It gets the hbalock to access
12759 * and update SLI data structures.
9399627f
JS
12760 *
12761 * This function returns IRQ_HANDLED when interrupt is handled, else it
12762 * returns IRQ_NONE.
12763 **/
12764irqreturn_t
3772a991 12765lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12766{
12767 struct lpfc_hba *phba;
12768 irqreturn_t sp_irq_rc, fp_irq_rc;
12769 unsigned long status1, status2;
a747c9ce 12770 uint32_t hc_copy;
9399627f
JS
12771
12772 /*
12773 * Get the driver's phba structure from the dev_id and
12774 * assume the HBA is not interrupting.
12775 */
12776 phba = (struct lpfc_hba *) dev_id;
12777
12778 if (unlikely(!phba))
12779 return IRQ_NONE;
12780
3772a991
JS
12781 /* Check device state for handling interrupt */
12782 if (lpfc_intr_state_check(phba))
9399627f
JS
12783 return IRQ_NONE;
12784
12785 spin_lock(&phba->hbalock);
9940b97b
JS
12786 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12787 spin_unlock(&phba->hbalock);
12788 return IRQ_HANDLED;
12789 }
12790
9399627f
JS
12791 if (unlikely(!phba->ha_copy)) {
12792 spin_unlock(&phba->hbalock);
12793 return IRQ_NONE;
12794 } else if (phba->ha_copy & HA_ERATT) {
12795 if (phba->hba_flag & HBA_ERATT_HANDLED)
12796 /* ERATT polling has handled ERATT */
12797 phba->ha_copy &= ~HA_ERATT;
12798 else
12799 /* Indicate interrupt handler handles ERATT */
12800 phba->hba_flag |= HBA_ERATT_HANDLED;
12801 }
12802
a257bf90
JS
12803 /*
12804 * If there is deferred error attention, do not check for any interrupt.
12805 */
12806 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12807 spin_unlock(&phba->hbalock);
a257bf90
JS
12808 return IRQ_NONE;
12809 }
12810
9399627f 12811 /* Clear attention sources except link and error attentions */
9940b97b
JS
12812 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12813 spin_unlock(&phba->hbalock);
12814 return IRQ_HANDLED;
12815 }
a747c9ce
JS
12816 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12817 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12818 phba->HCregaddr);
9399627f 12819 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12820 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12821 readl(phba->HAregaddr); /* flush */
12822 spin_unlock(&phba->hbalock);
12823
12824 /*
12825 * Invokes slow-path host attention interrupt handling as appropriate.
12826 */
12827
12828 /* status of events with mailbox and link attention */
12829 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12830
12831 /* status of events with ELS ring */
12832 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12833 status2 >>= (4*LPFC_ELS_RING);
12834
12835 if (status1 || (status2 & HA_RXMASK))
3772a991 12836 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12837 else
12838 sp_irq_rc = IRQ_NONE;
12839
12840 /*
12841 * Invoke fast-path host attention interrupt handling as appropriate.
12842 */
12843
12844 /* status of events with FCP ring */
12845 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12846 status1 >>= (4*LPFC_FCP_RING);
12847
12848 /* status of events with extra ring */
12849 if (phba->cfg_multi_ring_support == 2) {
12850 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12851 status2 >>= (4*LPFC_EXTRA_RING);
12852 } else
12853 status2 = 0;
12854
12855 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12856 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12857 else
12858 fp_irq_rc = IRQ_NONE;
dea3101e 12859
9399627f
JS
12860 /* Return device-level interrupt handling status */
12861 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12862} /* lpfc_sli_intr_handler */
4f774513
JS
12863
12864/**
4f774513 12865 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
4f774513
JS
12866 * @phba: pointer to lpfc hba data structure.
12867 *
12868 * This routine is invoked by the worker thread to process all the pending
4f774513 12869 * SLI4 els abort xri events.
4f774513 12870 **/
4f774513 12871void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
4f774513
JS
12872{
12873 struct lpfc_cq_event *cq_event;
12874
4f774513 12875 /* First, declare the els xri abort event has been handled */
4f774513 12876 spin_lock_irq(&phba->hbalock);
4f774513 12877 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
4f774513 12878 spin_unlock_irq(&phba->hbalock);
4f774513
JS
12879 /* Now, handle all the els xri abort events */
12880 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12881 /* Get the first event from the head of the event queue */
12882 spin_lock_irq(&phba->hbalock);
12883 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12884 cq_event, struct lpfc_cq_event, list);
12885 spin_unlock_irq(&phba->hbalock);
12886 /* Notify aborted XRI for ELS work queue */
12887 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12888 /* Free the event processed back to the free pool */
12889 lpfc_sli4_cq_event_release(phba, cq_event);
12890 }
12891}
12892
341af102
JS
12893/**
12894 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12895 * @phba: pointer to lpfc hba data structure
12896 * @pIocbIn: pointer to the rspiocbq
12897 * @pIocbOut: pointer to the cmdiocbq
12898 * @wcqe: pointer to the complete wcqe
12899 *
12900 * This routine transfers the fields of a command iocbq to a response iocbq
12901 * by copying all the IOCB fields from command iocbq and transferring the
12902 * completion status information from the complete wcqe.
12903 **/
4f774513 12904static void
341af102
JS
12905lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12906 struct lpfc_iocbq *pIocbIn,
4f774513
JS
12907 struct lpfc_iocbq *pIocbOut,
12908 struct lpfc_wcqe_complete *wcqe)
12909{
af22741c 12910 int numBdes, i;
341af102 12911 unsigned long iflags;
af22741c
JS
12912 uint32_t status, max_response;
12913 struct lpfc_dmabuf *dmabuf;
12914 struct ulp_bde64 *bpl, bde;
4f774513
JS
12915 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12916
12917 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12918 sizeof(struct lpfc_iocbq) - offset);
4f774513 12919 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
12920 status = bf_get(lpfc_wcqe_c_status, wcqe);
12921 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
12922 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12923 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12924 pIocbIn->iocb.un.fcpi.fcpi_parm =
12925 pIocbOut->iocb.un.fcpi.fcpi_parm -
12926 wcqe->total_data_placed;
12927 else
12928 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 12929 else {
4f774513 12930 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
12931 switch (pIocbOut->iocb.ulpCommand) {
12932 case CMD_ELS_REQUEST64_CR:
12933 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12934 bpl = (struct ulp_bde64 *)dmabuf->virt;
12935 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12936 max_response = bde.tus.f.bdeSize;
12937 break;
12938 case CMD_GEN_REQUEST64_CR:
12939 max_response = 0;
12940 if (!pIocbOut->context3)
12941 break;
12942 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12943 sizeof(struct ulp_bde64);
12944 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12945 bpl = (struct ulp_bde64 *)dmabuf->virt;
12946 for (i = 0; i < numBdes; i++) {
12947 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12948 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12949 max_response += bde.tus.f.bdeSize;
12950 }
12951 break;
12952 default:
12953 max_response = wcqe->total_data_placed;
12954 break;
12955 }
12956 if (max_response < wcqe->total_data_placed)
12957 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12958 else
12959 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12960 wcqe->total_data_placed;
695a814e 12961 }
341af102 12962
acd6859b
JS
12963 /* Convert BG errors for completion status */
12964 if (status == CQE_STATUS_DI_ERROR) {
12965 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12966
12967 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12968 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12969 else
12970 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12971
12972 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12973 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12974 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12975 BGS_GUARD_ERR_MASK;
12976 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12977 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12978 BGS_APPTAG_ERR_MASK;
12979 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12980 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12981 BGS_REFTAG_ERR_MASK;
12982
12983 /* Check to see if there was any good data before the error */
12984 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12985 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12986 BGS_HI_WATER_MARK_PRESENT_MASK;
12987 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12988 wcqe->total_data_placed;
12989 }
12990
12991 /*
12992 * Set ALL the error bits to indicate we don't know what
12993 * type of error it is.
12994 */
12995 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12996 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12997 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12998 BGS_GUARD_ERR_MASK);
12999 }
13000
341af102
JS
13001 /* Pick up HBA exchange busy condition */
13002 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13003 spin_lock_irqsave(&phba->hbalock, iflags);
13004 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13005 spin_unlock_irqrestore(&phba->hbalock, iflags);
13006 }
4f774513
JS
13007}
13008
45ed1190
JS
13009/**
13010 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13011 * @phba: Pointer to HBA context object.
13012 * @wcqe: Pointer to work-queue completion queue entry.
13013 *
13014 * This routine handles an ELS work-queue completion event and construct
13015 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13016 * discovery engine to handle.
13017 *
13018 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13019 **/
13020static struct lpfc_iocbq *
13021lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13022 struct lpfc_iocbq *irspiocbq)
13023{
895427bd 13024 struct lpfc_sli_ring *pring;
45ed1190
JS
13025 struct lpfc_iocbq *cmdiocbq;
13026 struct lpfc_wcqe_complete *wcqe;
13027 unsigned long iflags;
13028
895427bd 13029 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
13030 if (unlikely(!pring))
13031 return NULL;
895427bd 13032
45ed1190 13033 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
45ed1190
JS
13034 pring->stats.iocb_event++;
13035 /* Look up the ELS command IOCB and create pseudo response IOCB */
13036 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13037 bf_get(lpfc_wcqe_c_request_tag, wcqe));
45ed1190
JS
13038 if (unlikely(!cmdiocbq)) {
13039 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13040 "0386 ELS complete with no corresponding "
401bb416
DK
13041 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13042 wcqe->word0, wcqe->total_data_placed,
13043 wcqe->parameter, wcqe->word3);
45ed1190
JS
13044 lpfc_sli_release_iocbq(phba, irspiocbq);
13045 return NULL;
13046 }
13047
e2a8be56 13048 spin_lock_irqsave(&pring->ring_lock, iflags);
401bb416
DK
13049 /* Put the iocb back on the txcmplq */
13050 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13051 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13052
45ed1190 13053 /* Fake the irspiocbq and copy necessary response information */
341af102 13054 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
13055
13056 return irspiocbq;
13057}
13058
8a5ca109
JS
13059inline struct lpfc_cq_event *
13060lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13061{
13062 struct lpfc_cq_event *cq_event;
13063
13064 /* Allocate a new internal CQ_EVENT entry */
13065 cq_event = lpfc_sli4_cq_event_alloc(phba);
13066 if (!cq_event) {
13067 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13068 "0602 Failed to alloc CQ_EVENT entry\n");
13069 return NULL;
13070 }
13071
13072 /* Move the CQE into the event */
13073 memcpy(&cq_event->cqe, entry, size);
13074 return cq_event;
13075}
13076
04c68496
JS
13077/**
13078 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13079 * @phba: Pointer to HBA context object.
13080 * @cqe: Pointer to mailbox completion queue entry.
13081 *
13082 * This routine process a mailbox completion queue entry with asynchrous
13083 * event.
13084 *
13085 * Return: true if work posted to worker thread, otherwise false.
13086 **/
13087static bool
13088lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13089{
13090 struct lpfc_cq_event *cq_event;
13091 unsigned long iflags;
13092
13093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13094 "0392 Async Event: word0:x%x, word1:x%x, "
13095 "word2:x%x, word3:x%x\n", mcqe->word0,
13096 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13097
8a5ca109
JS
13098 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13099 if (!cq_event)
04c68496 13100 return false;
04c68496
JS
13101 spin_lock_irqsave(&phba->hbalock, iflags);
13102 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13103 /* Set the async event flag */
13104 phba->hba_flag |= ASYNC_EVENT;
13105 spin_unlock_irqrestore(&phba->hbalock, iflags);
13106
13107 return true;
13108}
13109
13110/**
13111 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13112 * @phba: Pointer to HBA context object.
13113 * @cqe: Pointer to mailbox completion queue entry.
13114 *
13115 * This routine process a mailbox completion queue entry with mailbox
13116 * completion event.
13117 *
13118 * Return: true if work posted to worker thread, otherwise false.
13119 **/
13120static bool
13121lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13122{
13123 uint32_t mcqe_status;
13124 MAILBOX_t *mbox, *pmbox;
13125 struct lpfc_mqe *mqe;
13126 struct lpfc_vport *vport;
13127 struct lpfc_nodelist *ndlp;
13128 struct lpfc_dmabuf *mp;
13129 unsigned long iflags;
13130 LPFC_MBOXQ_t *pmb;
13131 bool workposted = false;
13132 int rc;
13133
13134 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13135 if (!bf_get(lpfc_trailer_completed, mcqe))
13136 goto out_no_mqe_complete;
13137
13138 /* Get the reference to the active mbox command */
13139 spin_lock_irqsave(&phba->hbalock, iflags);
13140 pmb = phba->sli.mbox_active;
13141 if (unlikely(!pmb)) {
13142 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13143 "1832 No pending MBOX command to handle\n");
13144 spin_unlock_irqrestore(&phba->hbalock, iflags);
13145 goto out_no_mqe_complete;
13146 }
13147 spin_unlock_irqrestore(&phba->hbalock, iflags);
13148 mqe = &pmb->u.mqe;
13149 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13150 mbox = phba->mbox;
13151 vport = pmb->vport;
13152
13153 /* Reset heartbeat timer */
13154 phba->last_completion_time = jiffies;
13155 del_timer(&phba->sli.mbox_tmo);
13156
13157 /* Move mbox data to caller's mailbox region, do endian swapping */
13158 if (pmb->mbox_cmpl && mbox)
48f8fdb4 13159 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 13160
73d91e50
JS
13161 /*
13162 * For mcqe errors, conditionally move a modified error code to
13163 * the mbox so that the error will not be missed.
13164 */
13165 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13166 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13167 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13168 bf_set(lpfc_mqe_status, mqe,
13169 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13170 }
04c68496
JS
13171 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13172 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13173 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13174 "MBOX dflt rpi: status:x%x rpi:x%x",
13175 mcqe_status,
13176 pmbox->un.varWords[0], 0);
13177 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
3e1f0718
JS
13178 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13179 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
04c68496
JS
13180 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13181 * RID of the PPI using the same mbox buffer.
13182 */
13183 lpfc_unreg_login(phba, vport->vpi,
13184 pmbox->un.varWords[0], pmb);
13185 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
13186 pmb->ctx_buf = mp;
13187 pmb->ctx_ndlp = ndlp;
04c68496
JS
13188 pmb->vport = vport;
13189 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13190 if (rc != MBX_BUSY)
13191 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13192 LOG_SLI, "0385 rc should "
13193 "have been MBX_BUSY\n");
13194 if (rc != MBX_NOT_FINISHED)
13195 goto send_current_mbox;
13196 }
13197 }
13198 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13199 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13200 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13201
13202 /* There is mailbox completion work to do */
13203 spin_lock_irqsave(&phba->hbalock, iflags);
13204 __lpfc_mbox_cmpl_put(phba, pmb);
13205 phba->work_ha |= HA_MBATT;
13206 spin_unlock_irqrestore(&phba->hbalock, iflags);
13207 workposted = true;
13208
13209send_current_mbox:
13210 spin_lock_irqsave(&phba->hbalock, iflags);
13211 /* Release the mailbox command posting token */
13212 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13213 /* Setting active mailbox pointer need to be in sync to flag clear */
13214 phba->sli.mbox_active = NULL;
13215 spin_unlock_irqrestore(&phba->hbalock, iflags);
13216 /* Wake up worker thread to post the next pending mailbox command */
13217 lpfc_worker_wake_up(phba);
13218out_no_mqe_complete:
13219 if (bf_get(lpfc_trailer_consumed, mcqe))
13220 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13221 return workposted;
13222}
13223
13224/**
13225 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13226 * @phba: Pointer to HBA context object.
13227 * @cqe: Pointer to mailbox completion queue entry.
13228 *
13229 * This routine process a mailbox completion queue entry, it invokes the
13230 * proper mailbox complete handling or asynchrous event handling routine
13231 * according to the MCQE's async bit.
13232 *
13233 * Return: true if work posted to worker thread, otherwise false.
13234 **/
13235static bool
32517fc0
JS
13236lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13237 struct lpfc_cqe *cqe)
04c68496
JS
13238{
13239 struct lpfc_mcqe mcqe;
13240 bool workposted;
13241
32517fc0
JS
13242 cq->CQ_mbox++;
13243
04c68496 13244 /* Copy the mailbox MCQE and convert endian order as needed */
48f8fdb4 13245 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
04c68496
JS
13246
13247 /* Invoke the proper event handling routine */
13248 if (!bf_get(lpfc_trailer_async, &mcqe))
13249 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13250 else
13251 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13252 return workposted;
13253}
13254
4f774513
JS
13255/**
13256 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13257 * @phba: Pointer to HBA context object.
2a76a283 13258 * @cq: Pointer to associated CQ
4f774513
JS
13259 * @wcqe: Pointer to work-queue completion queue entry.
13260 *
13261 * This routine handles an ELS work-queue completion event.
13262 *
13263 * Return: true if work posted to worker thread, otherwise false.
13264 **/
13265static bool
2a76a283 13266lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13267 struct lpfc_wcqe_complete *wcqe)
13268{
4f774513
JS
13269 struct lpfc_iocbq *irspiocbq;
13270 unsigned long iflags;
2a76a283 13271 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
13272 int txq_cnt = 0;
13273 int txcmplq_cnt = 0;
13274 int fcp_txcmplq_cnt = 0;
4f774513 13275
11f0e34f
JS
13276 /* Check for response status */
13277 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13278 /* Log the error status */
13279 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13280 "0357 ELS CQE error: status=x%x: "
13281 "CQE: %08x %08x %08x %08x\n",
13282 bf_get(lpfc_wcqe_c_status, wcqe),
13283 wcqe->word0, wcqe->total_data_placed,
13284 wcqe->parameter, wcqe->word3);
13285 }
13286
45ed1190 13287 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
13288 irspiocbq = lpfc_sli_get_iocbq(phba);
13289 if (!irspiocbq) {
0e9bb8d7
JS
13290 if (!list_empty(&pring->txq))
13291 txq_cnt++;
13292 if (!list_empty(&pring->txcmplq))
13293 txcmplq_cnt++;
4f774513 13294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
13295 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13296 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
13297 txq_cnt, phba->iocb_cnt,
13298 fcp_txcmplq_cnt,
13299 txcmplq_cnt);
45ed1190 13300 return false;
4f774513 13301 }
4f774513 13302
45ed1190
JS
13303 /* Save off the slow-path queue event for work thread to process */
13304 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 13305 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 13306 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
13307 &phba->sli4_hba.sp_queue_event);
13308 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 13309 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 13310
45ed1190 13311 return true;
4f774513
JS
13312}
13313
13314/**
13315 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13316 * @phba: Pointer to HBA context object.
13317 * @wcqe: Pointer to work-queue completion queue entry.
13318 *
3f8b6fb7 13319 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
13320 * proper WQ release routine to the slow-path WQ.
13321 **/
13322static void
13323lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13324 struct lpfc_wcqe_release *wcqe)
13325{
2e90f4b5
JS
13326 /* sanity check on queue memory */
13327 if (unlikely(!phba->sli4_hba.els_wq))
13328 return;
4f774513
JS
13329 /* Check for the slow-path ELS work queue */
13330 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13331 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13332 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13333 else
13334 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13335 "2579 Slow-path wqe consume event carries "
13336 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13337 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13338 phba->sli4_hba.els_wq->queue_id);
13339}
13340
13341/**
13342 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13343 * @phba: Pointer to HBA context object.
13344 * @cq: Pointer to a WQ completion queue.
13345 * @wcqe: Pointer to work-queue completion queue entry.
13346 *
13347 * This routine handles an XRI abort event.
13348 *
13349 * Return: true if work posted to worker thread, otherwise false.
13350 **/
13351static bool
13352lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13353 struct lpfc_queue *cq,
13354 struct sli4_wcqe_xri_aborted *wcqe)
13355{
13356 bool workposted = false;
13357 struct lpfc_cq_event *cq_event;
13358 unsigned long iflags;
13359
4f774513
JS
13360 switch (cq->subtype) {
13361 case LPFC_FCP:
5e5b511d
JS
13362 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13363 workposted = false;
4f774513 13364 break;
422c4cb7 13365 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
4f774513 13366 case LPFC_ELS:
8a5ca109
JS
13367 cq_event = lpfc_cq_event_setup(
13368 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13369 if (!cq_event)
13370 return false;
5e5b511d 13371 cq_event->hdwq = cq->hdwq;
4f774513
JS
13372 spin_lock_irqsave(&phba->hbalock, iflags);
13373 list_add_tail(&cq_event->list,
13374 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13375 /* Set the els xri abort event flag */
13376 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13377 spin_unlock_irqrestore(&phba->hbalock, iflags);
13378 workposted = true;
13379 break;
318083ad 13380 case LPFC_NVME:
8a5ca109
JS
13381 /* Notify aborted XRI for NVME work queue */
13382 if (phba->nvmet_support)
13383 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13384 else
5e5b511d 13385 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
8a5ca109
JS
13386
13387 workposted = false;
318083ad 13388 break;
4f774513
JS
13389 default:
13390 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
13391 "0603 Invalid CQ subtype %d: "
13392 "%08x %08x %08x %08x\n",
13393 cq->subtype, wcqe->word0, wcqe->parameter,
13394 wcqe->word2, wcqe->word3);
4f774513
JS
13395 workposted = false;
13396 break;
13397 }
13398 return workposted;
13399}
13400
e817e5d7
JS
13401#define FC_RCTL_MDS_DIAGS 0xF4
13402
4f774513
JS
13403/**
13404 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13405 * @phba: Pointer to HBA context object.
13406 * @rcqe: Pointer to receive-queue completion queue entry.
13407 *
13408 * This routine process a receive-queue completion queue entry.
13409 *
13410 * Return: true if work posted to worker thread, otherwise false.
13411 **/
13412static bool
4d9ab994 13413lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 13414{
4f774513 13415 bool workposted = false;
e817e5d7 13416 struct fc_frame_header *fc_hdr;
4f774513
JS
13417 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13418 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 13419 struct lpfc_nvmet_tgtport *tgtp;
4f774513 13420 struct hbq_dmabuf *dma_buf;
7851fe2c 13421 uint32_t status, rq_id;
4f774513
JS
13422 unsigned long iflags;
13423
2e90f4b5
JS
13424 /* sanity check on queue memory */
13425 if (unlikely(!hrq) || unlikely(!drq))
13426 return workposted;
13427
7851fe2c
JS
13428 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13429 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13430 else
13431 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13432 if (rq_id != hrq->queue_id)
4f774513
JS
13433 goto out;
13434
4d9ab994 13435 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
13436 switch (status) {
13437 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13438 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13439 "2537 Receive Frame Truncated!!\n");
5bd5f66c 13440 /* fall through */
4f774513
JS
13441 case FC_STATUS_RQ_SUCCESS:
13442 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13443 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
13444 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13445 if (!dma_buf) {
b84daac9 13446 hrq->RQ_no_buf_found++;
4f774513
JS
13447 spin_unlock_irqrestore(&phba->hbalock, iflags);
13448 goto out;
13449 }
b84daac9 13450 hrq->RQ_rcv_buf++;
547077a4 13451 hrq->RQ_buf_posted--;
4d9ab994 13452 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd 13453
e817e5d7
JS
13454 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13455
13456 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13457 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13458 spin_unlock_irqrestore(&phba->hbalock, iflags);
13459 /* Handle MDS Loopback frames */
13460 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13461 break;
13462 }
13463
13464 /* save off the frame for the work thread to process */
4d9ab994 13465 list_add_tail(&dma_buf->cq_event.list,
45ed1190 13466 &phba->sli4_hba.sp_queue_event);
4f774513 13467 /* Frame received */
45ed1190 13468 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
13469 spin_unlock_irqrestore(&phba->hbalock, iflags);
13470 workposted = true;
13471 break;
4f774513 13472 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13473 if (phba->nvmet_support) {
13474 tgtp = phba->targetport->private;
13475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13476 "6402 RQE Error x%x, posted %d err_cnt "
13477 "%d: %x %x %x\n",
13478 status, hrq->RQ_buf_posted,
13479 hrq->RQ_no_posted_buf,
13480 atomic_read(&tgtp->rcv_fcp_cmd_in),
13481 atomic_read(&tgtp->rcv_fcp_cmd_out),
13482 atomic_read(&tgtp->xmt_fcp_release));
13483 }
13484 /* fallthrough */
13485
13486 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 13487 hrq->RQ_no_posted_buf++;
4f774513
JS
13488 /* Post more buffers if possible */
13489 spin_lock_irqsave(&phba->hbalock, iflags);
13490 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13491 spin_unlock_irqrestore(&phba->hbalock, iflags);
13492 workposted = true;
13493 break;
13494 }
13495out:
13496 return workposted;
4f774513
JS
13497}
13498
4d9ab994
JS
13499/**
13500 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13501 * @phba: Pointer to HBA context object.
13502 * @cq: Pointer to the completion queue.
32517fc0 13503 * @cqe: Pointer to a completion queue entry.
4d9ab994 13504 *
25985edc 13505 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
13506 * entry.
13507 *
13508 * Return: true if work posted to worker thread, otherwise false.
13509 **/
13510static bool
13511lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13512 struct lpfc_cqe *cqe)
13513{
45ed1190 13514 struct lpfc_cqe cqevt;
4d9ab994
JS
13515 bool workposted = false;
13516
13517 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13518 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
13519
13520 /* Check and process for different type of WCQE and dispatch */
45ed1190 13521 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 13522 case CQE_CODE_COMPL_WQE:
45ed1190 13523 /* Process the WQ/RQ complete event */
bc73905a 13524 phba->last_completion_time = jiffies;
2a76a283 13525 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 13526 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
13527 break;
13528 case CQE_CODE_RELEASE_WQE:
13529 /* Process the WQ release event */
13530 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 13531 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
13532 break;
13533 case CQE_CODE_XRI_ABORTED:
13534 /* Process the WQ XRI abort event */
bc73905a 13535 phba->last_completion_time = jiffies;
4d9ab994 13536 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 13537 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
13538 break;
13539 case CQE_CODE_RECEIVE:
7851fe2c 13540 case CQE_CODE_RECEIVE_V1:
4d9ab994 13541 /* Process the RQ event */
bc73905a 13542 phba->last_completion_time = jiffies;
4d9ab994 13543 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 13544 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
13545 break;
13546 default:
13547 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13548 "0388 Not a valid WCQE code: x%x\n",
45ed1190 13549 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
13550 break;
13551 }
13552 return workposted;
13553}
13554
4f774513
JS
13555/**
13556 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13557 * @phba: Pointer to HBA context object.
13558 * @eqe: Pointer to fast-path event queue entry.
13559 *
13560 * This routine process a event queue entry from the slow-path event queue.
13561 * It will check the MajorCode and MinorCode to determine this is for a
13562 * completion event on a completion queue, if not, an error shall be logged
13563 * and just return. Otherwise, it will get to the corresponding completion
13564 * queue and process all the entries on that completion queue, rearm the
13565 * completion queue, and then return.
13566 *
13567 **/
f485c18d 13568static void
67d12733
JS
13569lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13570 struct lpfc_queue *speq)
4f774513 13571{
67d12733 13572 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
13573 uint16_t cqid;
13574
4f774513 13575 /* Get the reference to the corresponding CQ */
cb5172ea 13576 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 13577
4f774513
JS
13578 list_for_each_entry(childq, &speq->child_list, list) {
13579 if (childq->queue_id == cqid) {
13580 cq = childq;
13581 break;
13582 }
13583 }
13584 if (unlikely(!cq)) {
75baf696
JS
13585 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13587 "0365 Slow-path CQ identifier "
13588 "(%d) does not exist\n", cqid);
f485c18d 13589 return;
4f774513
JS
13590 }
13591
895427bd
JS
13592 /* Save EQ associated with this CQ */
13593 cq->assoc_qp = speq;
13594
6a828b0f 13595 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
f485c18d
DK
13596 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13597 "0390 Cannot schedule soft IRQ "
13598 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
d6d189ce 13599 cqid, cq->queue_id, raw_smp_processor_id());
f485c18d
DK
13600}
13601
13602/**
32517fc0 13603 * __lpfc_sli4_process_cq - Process elements of a CQ
f485c18d 13604 * @phba: Pointer to HBA context object.
32517fc0
JS
13605 * @cq: Pointer to CQ to be processed
13606 * @handler: Routine to process each cqe
13607 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
f485c18d 13608 *
32517fc0
JS
13609 * This routine processes completion queue entries in a CQ. While a valid
13610 * queue element is found, the handler is called. During processing checks
13611 * are made for periodic doorbell writes to let the hardware know of
13612 * element consumption.
13613 *
13614 * If the max limit on cqes to process is hit, or there are no more valid
13615 * entries, the loop stops. If we processed a sufficient number of elements,
13616 * meaning there is sufficient load, rather than rearming and generating
13617 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13618 * indicates no rescheduling.
f485c18d 13619 *
32517fc0 13620 * Returns True if work scheduled, False otherwise.
f485c18d 13621 **/
32517fc0
JS
13622static bool
13623__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13624 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13625 struct lpfc_cqe *), unsigned long *delay)
f485c18d 13626{
f485c18d
DK
13627 struct lpfc_cqe *cqe;
13628 bool workposted = false;
32517fc0
JS
13629 int count = 0, consumed = 0;
13630 bool arm = true;
13631
13632 /* default - no reschedule */
13633 *delay = 0;
13634
13635 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13636 goto rearm_and_exit;
f485c18d 13637
4f774513 13638 /* Process all the entries to the CQ */
d74a89aa 13639 cq->q_flag = 0;
32517fc0
JS
13640 cqe = lpfc_sli4_cq_get(cq);
13641 while (cqe) {
32517fc0
JS
13642 workposted |= handler(phba, cq, cqe);
13643 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13644
13645 consumed++;
13646 if (!(++count % cq->max_proc_limit))
13647 break;
13648
13649 if (!(count % cq->notify_interval)) {
13650 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13651 LPFC_QUEUE_NOARM);
13652 consumed = 0;
13653 }
13654
d74a89aa
JS
13655 if (count == LPFC_NVMET_CQ_NOTIFY)
13656 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13657
32517fc0
JS
13658 cqe = lpfc_sli4_cq_get(cq);
13659 }
13660 if (count >= phba->cfg_cq_poll_threshold) {
13661 *delay = 1;
13662 arm = false;
13663 }
13664
13665 /* Track the max number of CQEs processed in 1 EQ */
13666 if (count > cq->CQ_max_cqe)
13667 cq->CQ_max_cqe = count;
13668
13669 cq->assoc_qp->EQ_cqe_cnt += count;
13670
13671 /* Catch the no cq entry condition */
13672 if (unlikely(count == 0))
13673 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13674 "0369 No entry from completion queue "
13675 "qid=%d\n", cq->queue_id);
13676
13677 cq->queue_claimed = 0;
13678
13679rearm_and_exit:
13680 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13681 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13682
13683 return workposted;
13684}
13685
13686/**
13687 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13688 * @cq: pointer to CQ to process
13689 *
13690 * This routine calls the cq processing routine with a handler specific
13691 * to the type of queue bound to it.
13692 *
13693 * The CQ routine returns two values: the first is the calling status,
13694 * which indicates whether work was queued to the background discovery
13695 * thread. If true, the routine should wakeup the discovery thread;
13696 * the second is the delay parameter. If non-zero, rather than rearming
13697 * the CQ and yet another interrupt, the CQ handler should be queued so
13698 * that it is processed in a subsequent polling action. The value of
13699 * the delay indicates when to reschedule it.
13700 **/
13701static void
13702__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13703{
13704 struct lpfc_hba *phba = cq->phba;
13705 unsigned long delay;
13706 bool workposted = false;
13707
13708 /* Process and rearm the CQ */
4f774513
JS
13709 switch (cq->type) {
13710 case LPFC_MCQ:
32517fc0
JS
13711 workposted |= __lpfc_sli4_process_cq(phba, cq,
13712 lpfc_sli4_sp_handle_mcqe,
13713 &delay);
4f774513
JS
13714 break;
13715 case LPFC_WCQ:
32517fc0
JS
13716 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13717 workposted |= __lpfc_sli4_process_cq(phba, cq,
13718 lpfc_sli4_fp_handle_cqe,
13719 &delay);
13720 else
13721 workposted |= __lpfc_sli4_process_cq(phba, cq,
13722 lpfc_sli4_sp_handle_cqe,
13723 &delay);
4f774513
JS
13724 break;
13725 default:
13726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13727 "0370 Invalid completion queue type (%d)\n",
13728 cq->type);
f485c18d 13729 return;
4f774513
JS
13730 }
13731
32517fc0
JS
13732 if (delay) {
13733 if (!queue_delayed_work_on(cq->chann, phba->wq,
13734 &cq->sched_spwork, delay))
13735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13736 "0394 Cannot schedule soft IRQ "
13737 "for cqid=%d on CPU %d\n",
13738 cq->queue_id, cq->chann);
13739 }
4f774513
JS
13740
13741 /* wake up worker thread if there are works to be done */
13742 if (workposted)
13743 lpfc_worker_wake_up(phba);
13744}
13745
32517fc0
JS
13746/**
13747 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13748 * interrupt
13749 * @work: pointer to work element
13750 *
13751 * translates from the work handler and calls the slow-path handler.
13752 **/
13753static void
13754lpfc_sli4_sp_process_cq(struct work_struct *work)
13755{
13756 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13757
13758 __lpfc_sli4_sp_process_cq(cq);
13759}
13760
13761/**
13762 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13763 * @work: pointer to work element
13764 *
13765 * translates from the work handler and calls the slow-path handler.
13766 **/
13767static void
13768lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13769{
13770 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13771 struct lpfc_queue, sched_spwork);
13772
13773 __lpfc_sli4_sp_process_cq(cq);
13774}
13775
4f774513
JS
13776/**
13777 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13778 * @phba: Pointer to HBA context object.
13779 * @cq: Pointer to associated CQ
13780 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13781 *
13782 * This routine process a fast-path work queue completion entry from fast-path
13783 * event queue for FCP command response completion.
13784 **/
13785static void
2a76a283 13786lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13787 struct lpfc_wcqe_complete *wcqe)
13788{
2a76a283 13789 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13790 struct lpfc_iocbq *cmdiocbq;
13791 struct lpfc_iocbq irspiocbq;
13792 unsigned long iflags;
13793
4f774513
JS
13794 /* Check for response status */
13795 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13796 /* If resource errors reported from HBA, reduce queue
13797 * depth of the SCSI device.
13798 */
e3d2b802
JS
13799 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13800 IOSTAT_LOCAL_REJECT)) &&
13801 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13802 IOERR_NO_RESOURCES))
4f774513 13803 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13804
4f774513 13805 /* Log the error status */
11f0e34f
JS
13806 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13807 "0373 FCP CQE error: status=x%x: "
13808 "CQE: %08x %08x %08x %08x\n",
4f774513 13809 bf_get(lpfc_wcqe_c_status, wcqe),
11f0e34f
JS
13810 wcqe->word0, wcqe->total_data_placed,
13811 wcqe->parameter, wcqe->word3);
4f774513
JS
13812 }
13813
13814 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13815 spin_lock_irqsave(&pring->ring_lock, iflags);
13816 pring->stats.iocb_event++;
e2a8be56 13817 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13818 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13819 bf_get(lpfc_wcqe_c_request_tag, wcqe));
4f774513
JS
13820 if (unlikely(!cmdiocbq)) {
13821 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13822 "0374 FCP complete with no corresponding "
13823 "cmdiocb: iotag (%d)\n",
13824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13825 return;
13826 }
c8a4ce0b
DK
13827#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13828 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13829#endif
895427bd
JS
13830 if (cmdiocbq->iocb_cmpl == NULL) {
13831 if (cmdiocbq->wqe_cmpl) {
13832 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13833 spin_lock_irqsave(&phba->hbalock, iflags);
13834 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13835 spin_unlock_irqrestore(&phba->hbalock, iflags);
13836 }
13837
13838 /* Pass the cmd_iocb and the wcqe to the upper layer */
13839 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13840 return;
13841 }
4f774513
JS
13842 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13843 "0375 FCP cmdiocb not callback function "
13844 "iotag: (%d)\n",
13845 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13846 return;
13847 }
13848
13849 /* Fake the irspiocb and copy necessary response information */
341af102 13850 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13851
0f65ff68
JS
13852 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13853 spin_lock_irqsave(&phba->hbalock, iflags);
13854 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13855 spin_unlock_irqrestore(&phba->hbalock, iflags);
13856 }
13857
4f774513
JS
13858 /* Pass the cmd_iocb and the rsp state to the upper layer */
13859 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13860}
13861
13862/**
13863 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13864 * @phba: Pointer to HBA context object.
13865 * @cq: Pointer to completion queue.
13866 * @wcqe: Pointer to work-queue completion queue entry.
13867 *
3f8b6fb7 13868 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
13869 * proper WQ release routine to the slow-path WQ.
13870 **/
13871static void
13872lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13873 struct lpfc_wcqe_release *wcqe)
13874{
13875 struct lpfc_queue *childwq;
13876 bool wqid_matched = false;
895427bd 13877 uint16_t hba_wqid;
4f774513
JS
13878
13879 /* Check for fast-path FCP work queue release */
895427bd 13880 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 13881 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 13882 if (childwq->queue_id == hba_wqid) {
4f774513
JS
13883 lpfc_sli4_wq_release(childwq,
13884 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
6e8e1c14
JS
13885 if (childwq->q_flag & HBA_NVMET_WQFULL)
13886 lpfc_nvmet_wqfull_process(phba, childwq);
4f774513
JS
13887 wqid_matched = true;
13888 break;
13889 }
13890 }
13891 /* Report warning log message if no match found */
13892 if (wqid_matched != true)
13893 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13894 "2580 Fast-path wqe consume event carries "
895427bd 13895 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
13896}
13897
13898/**
2d7dbc4c
JS
13899 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13900 * @phba: Pointer to HBA context object.
13901 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 13902 *
2d7dbc4c
JS
13903 * This routine process a receive-queue completion queue entry.
13904 *
13905 * Return: true if work posted to worker thread, otherwise false.
13906 **/
13907static bool
13908lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13909 struct lpfc_rcqe *rcqe)
13910{
13911 bool workposted = false;
13912 struct lpfc_queue *hrq;
13913 struct lpfc_queue *drq;
13914 struct rqb_dmabuf *dma_buf;
13915 struct fc_frame_header *fc_hdr;
547077a4 13916 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
13917 uint32_t status, rq_id;
13918 unsigned long iflags;
13919 uint32_t fctl, idx;
13920
13921 if ((phba->nvmet_support == 0) ||
13922 (phba->sli4_hba.nvmet_cqset == NULL))
13923 return workposted;
13924
13925 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13926 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13927 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13928
13929 /* sanity check on queue memory */
13930 if (unlikely(!hrq) || unlikely(!drq))
13931 return workposted;
13932
13933 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13934 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13935 else
13936 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13937
13938 if ((phba->nvmet_support == 0) ||
13939 (rq_id != hrq->queue_id))
13940 return workposted;
13941
13942 status = bf_get(lpfc_rcqe_status, rcqe);
13943 switch (status) {
13944 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13945 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13946 "6126 Receive Frame Truncated!!\n");
5bd5f66c 13947 /* fall through */
2d7dbc4c 13948 case FC_STATUS_RQ_SUCCESS:
2d7dbc4c 13949 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13950 lpfc_sli4_rq_release(hrq, drq);
2d7dbc4c
JS
13951 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13952 if (!dma_buf) {
13953 hrq->RQ_no_buf_found++;
13954 spin_unlock_irqrestore(&phba->hbalock, iflags);
13955 goto out;
13956 }
13957 spin_unlock_irqrestore(&phba->hbalock, iflags);
13958 hrq->RQ_rcv_buf++;
547077a4 13959 hrq->RQ_buf_posted--;
2d7dbc4c
JS
13960 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13961
13962 /* Just some basic sanity checks on FCP Command frame */
13963 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13964 fc_hdr->fh_f_ctl[1] << 8 |
13965 fc_hdr->fh_f_ctl[2]);
13966 if (((fctl &
13967 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13968 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13969 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13970 goto drop;
13971
13972 if (fc_hdr->fh_type == FC_TYPE_FCP) {
d74a89aa 13973 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7 13974 lpfc_nvmet_unsol_fcp_event(
d74a89aa
JS
13975 phba, idx, dma_buf, cq->isr_timestamp,
13976 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
2d7dbc4c
JS
13977 return false;
13978 }
13979drop:
22b738ac 13980 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
2d7dbc4c 13981 break;
2d7dbc4c 13982 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13983 if (phba->nvmet_support) {
13984 tgtp = phba->targetport->private;
13985 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13986 "6401 RQE Error x%x, posted %d err_cnt "
13987 "%d: %x %x %x\n",
13988 status, hrq->RQ_buf_posted,
13989 hrq->RQ_no_posted_buf,
13990 atomic_read(&tgtp->rcv_fcp_cmd_in),
13991 atomic_read(&tgtp->rcv_fcp_cmd_out),
13992 atomic_read(&tgtp->xmt_fcp_release));
13993 }
13994 /* fallthrough */
13995
13996 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
13997 hrq->RQ_no_posted_buf++;
13998 /* Post more buffers if possible */
2d7dbc4c
JS
13999 break;
14000 }
14001out:
14002 return workposted;
14003}
14004
4f774513 14005/**
895427bd 14006 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
32517fc0 14007 * @phba: adapter with cq
4f774513
JS
14008 * @cq: Pointer to the completion queue.
14009 * @eqe: Pointer to fast-path completion queue entry.
14010 *
14011 * This routine process a fast-path work queue completion entry from fast-path
14012 * event queue for FCP command response completion.
32517fc0
JS
14013 *
14014 * Return: true if work posted to worker thread, otherwise false.
4f774513 14015 **/
32517fc0 14016static bool
895427bd 14017lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
14018 struct lpfc_cqe *cqe)
14019{
14020 struct lpfc_wcqe_release wcqe;
14021 bool workposted = false;
14022
14023 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 14024 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
4f774513
JS
14025
14026 /* Check and process for different type of WCQE and dispatch */
14027 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14028 case CQE_CODE_COMPL_WQE:
895427bd 14029 case CQE_CODE_NVME_ERSP:
b84daac9 14030 cq->CQ_wq++;
4f774513 14031 /* Process the WQ complete event */
98fc5dd9 14032 phba->last_completion_time = jiffies;
895427bd
JS
14033 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14034 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14035 (struct lpfc_wcqe_complete *)&wcqe);
14036 if (cq->subtype == LPFC_NVME_LS)
14037 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
14038 (struct lpfc_wcqe_complete *)&wcqe);
14039 break;
14040 case CQE_CODE_RELEASE_WQE:
b84daac9 14041 cq->CQ_release_wqe++;
4f774513
JS
14042 /* Process the WQ release event */
14043 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14044 (struct lpfc_wcqe_release *)&wcqe);
14045 break;
14046 case CQE_CODE_XRI_ABORTED:
b84daac9 14047 cq->CQ_xri_aborted++;
4f774513 14048 /* Process the WQ XRI abort event */
bc73905a 14049 phba->last_completion_time = jiffies;
4f774513
JS
14050 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14051 (struct sli4_wcqe_xri_aborted *)&wcqe);
14052 break;
895427bd
JS
14053 case CQE_CODE_RECEIVE_V1:
14054 case CQE_CODE_RECEIVE:
14055 phba->last_completion_time = jiffies;
2d7dbc4c
JS
14056 if (cq->subtype == LPFC_NVMET) {
14057 workposted = lpfc_sli4_nvmet_handle_rcqe(
14058 phba, cq, (struct lpfc_rcqe *)&wcqe);
14059 }
895427bd 14060 break;
4f774513
JS
14061 default:
14062 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 14063 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
14064 bf_get(lpfc_wcqe_c_code, &wcqe));
14065 break;
14066 }
14067 return workposted;
14068}
14069
14070/**
67d12733 14071 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
14072 * @phba: Pointer to HBA context object.
14073 * @eqe: Pointer to fast-path event queue entry.
14074 *
14075 * This routine process a event queue entry from the fast-path event queue.
14076 * It will check the MajorCode and MinorCode to determine this is for a
14077 * completion event on a completion queue, if not, an error shall be logged
14078 * and just return. Otherwise, it will get to the corresponding completion
14079 * queue and process all the entries on the completion queue, rearm the
14080 * completion queue, and then return.
14081 **/
f485c18d 14082static void
32517fc0
JS
14083lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14084 struct lpfc_eqe *eqe)
4f774513 14085{
895427bd 14086 struct lpfc_queue *cq = NULL;
32517fc0 14087 uint32_t qidx = eq->hdwq;
2d7dbc4c 14088 uint16_t cqid, id;
4f774513 14089
cb5172ea 14090 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 14091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 14092 "0366 Not a valid completion "
4f774513 14093 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
14094 bf_get_le32(lpfc_eqe_major_code, eqe),
14095 bf_get_le32(lpfc_eqe_minor_code, eqe));
f485c18d 14096 return;
4f774513
JS
14097 }
14098
67d12733
JS
14099 /* Get the reference to the corresponding CQ */
14100 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14101
6a828b0f
JS
14102 /* Use the fast lookup method first */
14103 if (cqid <= phba->sli4_hba.cq_max) {
14104 cq = phba->sli4_hba.cq_lookup[cqid];
14105 if (cq)
14106 goto work_cq;
cdb42bec
JS
14107 }
14108
14109 /* Next check for NVMET completion */
2d7dbc4c
JS
14110 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14111 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14112 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14113 /* Process NVMET unsol rcv */
14114 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14115 goto process_cq;
14116 }
67d12733
JS
14117 }
14118
895427bd
JS
14119 if (phba->sli4_hba.nvmels_cq &&
14120 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14121 /* Process NVME unsol rcv */
14122 cq = phba->sli4_hba.nvmels_cq;
14123 }
14124
14125 /* Otherwise this is a Slow path event */
14126 if (cq == NULL) {
cdb42bec
JS
14127 lpfc_sli4_sp_handle_eqe(phba, eqe,
14128 phba->sli4_hba.hdwq[qidx].hba_eq);
f485c18d 14129 return;
4f774513
JS
14130 }
14131
895427bd 14132process_cq:
4f774513
JS
14133 if (unlikely(cqid != cq->queue_id)) {
14134 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14135 "0368 Miss-matched fast-path completion "
14136 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14137 cqid, cq->queue_id);
f485c18d 14138 return;
4f774513
JS
14139 }
14140
6a828b0f 14141work_cq:
d74a89aa
JS
14142#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14143 if (phba->ktime_on)
14144 cq->isr_timestamp = ktime_get_ns();
14145 else
14146 cq->isr_timestamp = 0;
14147#endif
45aa312e 14148 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
f485c18d
DK
14149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14150 "0363 Cannot schedule soft IRQ "
14151 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
d6d189ce 14152 cqid, cq->queue_id, raw_smp_processor_id());
f485c18d
DK
14153}
14154
14155/**
32517fc0
JS
14156 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14157 * @cq: Pointer to CQ to be processed
f485c18d 14158 *
32517fc0
JS
14159 * This routine calls the cq processing routine with the handler for
14160 * fast path CQEs.
14161 *
14162 * The CQ routine returns two values: the first is the calling status,
14163 * which indicates whether work was queued to the background discovery
14164 * thread. If true, the routine should wakeup the discovery thread;
14165 * the second is the delay parameter. If non-zero, rather than rearming
14166 * the CQ and yet another interrupt, the CQ handler should be queued so
14167 * that it is processed in a subsequent polling action. The value of
14168 * the delay indicates when to reschedule it.
f485c18d
DK
14169 **/
14170static void
32517fc0 14171__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
f485c18d 14172{
f485c18d 14173 struct lpfc_hba *phba = cq->phba;
32517fc0 14174 unsigned long delay;
f485c18d 14175 bool workposted = false;
f485c18d 14176
32517fc0
JS
14177 /* process and rearm the CQ */
14178 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14179 &delay);
4f774513 14180
32517fc0
JS
14181 if (delay) {
14182 if (!queue_delayed_work_on(cq->chann, phba->wq,
14183 &cq->sched_irqwork, delay))
14184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14185 "0367 Cannot schedule soft IRQ "
14186 "for cqid=%d on CPU %d\n",
14187 cq->queue_id, cq->chann);
14188 }
4f774513
JS
14189
14190 /* wake up worker thread if there are works to be done */
14191 if (workposted)
14192 lpfc_worker_wake_up(phba);
14193}
14194
1ba981fd 14195/**
32517fc0
JS
14196 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14197 * interrupt
14198 * @work: pointer to work element
1ba981fd 14199 *
32517fc0 14200 * translates from the work handler and calls the fast-path handler.
1ba981fd
JS
14201 **/
14202static void
32517fc0 14203lpfc_sli4_hba_process_cq(struct work_struct *work)
1ba981fd 14204{
32517fc0 14205 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
1ba981fd 14206
32517fc0 14207 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14208}
14209
14210/**
32517fc0
JS
14211 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14212 * @work: pointer to work element
1ba981fd 14213 *
32517fc0 14214 * translates from the work handler and calls the fast-path handler.
1ba981fd 14215 **/
32517fc0
JS
14216static void
14217lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
1ba981fd 14218{
32517fc0
JS
14219 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14220 struct lpfc_queue, sched_irqwork);
1ba981fd 14221
32517fc0 14222 __lpfc_sli4_hba_process_cq(cq);
1ba981fd
JS
14223}
14224
4f774513 14225/**
67d12733 14226 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
14227 * @irq: Interrupt number.
14228 * @dev_id: The device context pointer.
14229 *
14230 * This function is directly called from the PCI layer as an interrupt
14231 * service routine when device with SLI-4 interface spec is enabled with
14232 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14233 * ring event in the HBA. However, when the device is enabled with either
14234 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14235 * device-level interrupt handler. When the PCI slot is in error recovery
14236 * or the HBA is undergoing initialization, the interrupt handler will not
14237 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14238 * the intrrupt context. This function is called without any lock held.
14239 * It gets the hbalock to access and update SLI data structures. Note that,
14240 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14241 * equal to that of FCP CQ index.
14242 *
67d12733
JS
14243 * The link attention and ELS ring attention events are handled
14244 * by the worker thread. The interrupt handler signals the worker thread
14245 * and returns for these events. This function is called without any lock
14246 * held. It gets the hbalock to access and update SLI data structures.
14247 *
4f774513
JS
14248 * This function returns IRQ_HANDLED when interrupt is handled else it
14249 * returns IRQ_NONE.
14250 **/
14251irqreturn_t
67d12733 14252lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
14253{
14254 struct lpfc_hba *phba;
895427bd 14255 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513 14256 struct lpfc_queue *fpeq;
4f774513
JS
14257 unsigned long iflag;
14258 int ecount = 0;
895427bd 14259 int hba_eqidx;
32517fc0
JS
14260 struct lpfc_eq_intr_info *eqi;
14261 uint32_t icnt;
4f774513
JS
14262
14263 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14264 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14265 phba = hba_eq_hdl->phba;
14266 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
14267
14268 if (unlikely(!phba))
14269 return IRQ_NONE;
cdb42bec 14270 if (unlikely(!phba->sli4_hba.hdwq))
5350d872 14271 return IRQ_NONE;
4f774513
JS
14272
14273 /* Get to the EQ struct associated with this vector */
657add4e 14274 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
2e90f4b5
JS
14275 if (unlikely(!fpeq))
14276 return IRQ_NONE;
4f774513
JS
14277
14278 /* Check device state for handling interrupt */
14279 if (unlikely(lpfc_intr_state_check(phba))) {
14280 /* Check again for link_state with lock held */
14281 spin_lock_irqsave(&phba->hbalock, iflag);
14282 if (phba->link_state < LPFC_LINK_DOWN)
14283 /* Flush, clear interrupt, and rearm the EQ */
14284 lpfc_sli4_eq_flush(phba, fpeq);
14285 spin_unlock_irqrestore(&phba->hbalock, iflag);
14286 return IRQ_NONE;
14287 }
14288
32517fc0
JS
14289 eqi = phba->sli4_hba.eq_info;
14290 icnt = this_cpu_inc_return(eqi->icnt);
d6d189ce 14291 fpeq->last_cpu = raw_smp_processor_id();
4f774513 14292
32517fc0
JS
14293 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14294 phba->cfg_irq_chann == 1 &&
14295 phba->cfg_auto_imax &&
14296 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14297 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14298 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
b84daac9 14299
32517fc0
JS
14300 /* process and rearm the EQ */
14301 ecount = lpfc_sli4_process_eq(phba, fpeq);
4f774513
JS
14302
14303 if (unlikely(ecount == 0)) {
b84daac9 14304 fpeq->EQ_no_entry++;
4f774513
JS
14305 if (phba->intr_type == MSIX)
14306 /* MSI-X treated interrupt served as no EQ share INT */
14307 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14308 "0358 MSI-X interrupt with no EQE\n");
14309 else
14310 /* Non MSI-X treated on interrupt as EQ share INT */
14311 return IRQ_NONE;
14312 }
14313
14314 return IRQ_HANDLED;
14315} /* lpfc_sli4_fp_intr_handler */
14316
14317/**
14318 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14319 * @irq: Interrupt number.
14320 * @dev_id: The device context pointer.
14321 *
14322 * This function is the device-level interrupt handler to device with SLI-4
14323 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14324 * interrupt mode is enabled and there is an event in the HBA which requires
14325 * driver attention. This function invokes the slow-path interrupt attention
14326 * handling function and fast-path interrupt attention handling function in
14327 * turn to process the relevant HBA attention events. This function is called
14328 * without any lock held. It gets the hbalock to access and update SLI data
14329 * structures.
14330 *
14331 * This function returns IRQ_HANDLED when interrupt is handled, else it
14332 * returns IRQ_NONE.
14333 **/
14334irqreturn_t
14335lpfc_sli4_intr_handler(int irq, void *dev_id)
14336{
14337 struct lpfc_hba *phba;
67d12733
JS
14338 irqreturn_t hba_irq_rc;
14339 bool hba_handled = false;
895427bd 14340 int qidx;
4f774513
JS
14341
14342 /* Get the driver's phba structure from the dev_id */
14343 phba = (struct lpfc_hba *)dev_id;
14344
14345 if (unlikely(!phba))
14346 return IRQ_NONE;
14347
4f774513
JS
14348 /*
14349 * Invoke fast-path host attention interrupt handling as appropriate.
14350 */
6a828b0f 14351 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
67d12733 14352 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 14353 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733
JS
14354 if (hba_irq_rc == IRQ_HANDLED)
14355 hba_handled |= true;
4f774513
JS
14356 }
14357
67d12733 14358 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
14359} /* lpfc_sli4_intr_handler */
14360
14361/**
14362 * lpfc_sli4_queue_free - free a queue structure and associated memory
14363 * @queue: The queue structure to free.
14364 *
b595076a 14365 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
14366 * the host resident queue. This function must be called after destroying the
14367 * queue on the HBA.
14368 **/
14369void
14370lpfc_sli4_queue_free(struct lpfc_queue *queue)
14371{
14372 struct lpfc_dmabuf *dmabuf;
14373
14374 if (!queue)
14375 return;
14376
4645f7b5
JS
14377 if (!list_empty(&queue->wq_list))
14378 list_del(&queue->wq_list);
14379
4f774513
JS
14380 while (!list_empty(&queue->page_list)) {
14381 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14382 list);
81b96eda 14383 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
4f774513
JS
14384 dmabuf->virt, dmabuf->phys);
14385 kfree(dmabuf);
14386 }
895427bd
JS
14387 if (queue->rqbp) {
14388 lpfc_free_rq_buffer(queue->phba, queue);
14389 kfree(queue->rqbp);
14390 }
d1f525aa 14391
32517fc0
JS
14392 if (!list_empty(&queue->cpu_list))
14393 list_del(&queue->cpu_list);
14394
4f774513
JS
14395 kfree(queue);
14396 return;
14397}
14398
14399/**
14400 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14401 * @phba: The HBA that this queue is being created on.
81b96eda 14402 * @page_size: The size of a queue page
4f774513
JS
14403 * @entry_size: The size of each queue entry for this queue.
14404 * @entry count: The number of entries that this queue will handle.
c1a21ebc 14405 * @cpu: The cpu that will primarily utilize this queue.
4f774513
JS
14406 *
14407 * This function allocates a queue structure and the DMAable memory used for
14408 * the host resident queue. This function must be called before creating the
14409 * queue on the HBA.
14410 **/
14411struct lpfc_queue *
81b96eda 14412lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
c1a21ebc 14413 uint32_t entry_size, uint32_t entry_count, int cpu)
4f774513
JS
14414{
14415 struct lpfc_queue *queue;
14416 struct lpfc_dmabuf *dmabuf;
cb5172ea 14417 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9afbee3d 14418 uint16_t x, pgcnt;
4f774513 14419
cb5172ea 14420 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 14421 hw_page_size = page_size;
cb5172ea 14422
9afbee3d
JS
14423 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14424
14425 /* If needed, Adjust page count to match the max the adapter supports */
14426 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14427 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14428
c1a21ebc
JS
14429 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14430 GFP_KERNEL, cpu_to_node(cpu));
4f774513
JS
14431 if (!queue)
14432 return NULL;
895427bd 14433
4f774513 14434 INIT_LIST_HEAD(&queue->list);
895427bd 14435 INIT_LIST_HEAD(&queue->wq_list);
6e8e1c14 14436 INIT_LIST_HEAD(&queue->wqfull_list);
4f774513
JS
14437 INIT_LIST_HEAD(&queue->page_list);
14438 INIT_LIST_HEAD(&queue->child_list);
32517fc0 14439 INIT_LIST_HEAD(&queue->cpu_list);
81b96eda
JS
14440
14441 /* Set queue parameters now. If the system cannot provide memory
14442 * resources, the free routine needs to know what was allocated.
14443 */
9afbee3d
JS
14444 queue->page_count = pgcnt;
14445 queue->q_pgs = (void **)&queue[1];
14446 queue->entry_cnt_per_pg = hw_page_size / entry_size;
81b96eda
JS
14447 queue->entry_size = entry_size;
14448 queue->entry_count = entry_count;
14449 queue->page_size = hw_page_size;
14450 queue->phba = phba;
14451
9afbee3d 14452 for (x = 0; x < queue->page_count; x++) {
c1a21ebc
JS
14453 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14454 dev_to_node(&phba->pcidev->dev));
4f774513
JS
14455 if (!dmabuf)
14456 goto out_fail;
750afb08
LC
14457 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14458 hw_page_size, &dmabuf->phys,
14459 GFP_KERNEL);
4f774513
JS
14460 if (!dmabuf->virt) {
14461 kfree(dmabuf);
14462 goto out_fail;
14463 }
14464 dmabuf->buffer_tag = x;
14465 list_add_tail(&dmabuf->list, &queue->page_list);
9afbee3d
JS
14466 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14467 queue->q_pgs[x] = dmabuf->virt;
4f774513 14468 }
f485c18d
DK
14469 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14470 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
32517fc0
JS
14471 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14472 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
4f774513 14473
32517fc0 14474 /* notify_interval will be set during q creation */
64eb4dcb 14475
4f774513
JS
14476 return queue;
14477out_fail:
14478 lpfc_sli4_queue_free(queue);
14479 return NULL;
14480}
14481
962bc51b
JS
14482/**
14483 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14484 * @phba: HBA structure that indicates port to create a queue on.
14485 * @pci_barset: PCI BAR set flag.
14486 *
14487 * This function shall perform iomap of the specified PCI BAR address to host
14488 * memory address if not already done so and return it. The returned host
14489 * memory address can be NULL.
14490 */
14491static void __iomem *
14492lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14493{
962bc51b
JS
14494 if (!phba->pcidev)
14495 return NULL;
962bc51b
JS
14496
14497 switch (pci_barset) {
14498 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
14499 return phba->pci_bar0_memmap_p;
14500 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
14501 return phba->pci_bar2_memmap_p;
14502 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
14503 return phba->pci_bar4_memmap_p;
14504 default:
14505 break;
14506 }
14507 return NULL;
14508}
14509
173edbb2 14510/**
cb733e35
JS
14511 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14512 * @phba: HBA structure that EQs are on.
14513 * @startq: The starting EQ index to modify
14514 * @numq: The number of EQs (consecutive indexes) to modify
14515 * @usdelay: amount of delay
173edbb2 14516 *
cb733e35
JS
14517 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14518 * is set either by writing to a register (if supported by the SLI Port)
14519 * or by mailbox command. The mailbox command allows several EQs to be
14520 * updated at once.
173edbb2 14521 *
cb733e35
JS
14522 * The @phba struct is used to send a mailbox command to HBA. The @startq
14523 * is used to get the starting EQ index to change. The @numq value is
14524 * used to specify how many consecutive EQ indexes, starting at EQ index,
14525 * are to be changed. This function is asynchronous and will wait for any
14526 * mailbox commands to finish before returning.
173edbb2 14527 *
cb733e35
JS
14528 * On success this function will return a zero. If unable to allocate
14529 * enough memory this function will return -ENOMEM. If a mailbox command
14530 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14531 * have had their delay multipler changed.
173edbb2 14532 **/
cb733e35 14533void
0cf07f84 14534lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
cb733e35 14535 uint32_t numq, uint32_t usdelay)
173edbb2
JS
14536{
14537 struct lpfc_mbx_modify_eq_delay *eq_delay;
14538 LPFC_MBOXQ_t *mbox;
14539 struct lpfc_queue *eq;
cb733e35 14540 int cnt = 0, rc, length;
173edbb2 14541 uint32_t shdr_status, shdr_add_status;
cb733e35 14542 uint32_t dmult;
895427bd 14543 int qidx;
173edbb2 14544 union lpfc_sli4_cfg_shdr *shdr;
173edbb2 14545
6a828b0f 14546 if (startq >= phba->cfg_irq_chann)
cb733e35
JS
14547 return;
14548
14549 if (usdelay > 0xFFFF) {
14550 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14551 "6429 usdelay %d too large. Scaled down to "
14552 "0xFFFF.\n", usdelay);
14553 usdelay = 0xFFFF;
14554 }
14555
14556 /* set values by EQ_DELAY register if supported */
14557 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14558 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
657add4e 14559 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
cb733e35
JS
14560 if (!eq)
14561 continue;
14562
32517fc0 14563 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
cb733e35
JS
14564
14565 if (++cnt >= numq)
14566 break;
14567 }
cb733e35
JS
14568 return;
14569 }
14570
14571 /* Otherwise, set values by mailbox cmd */
173edbb2
JS
14572
14573 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cb733e35
JS
14574 if (!mbox) {
14575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14576 "6428 Failed allocating mailbox cmd buffer."
14577 " EQ delay was not set.\n");
14578 return;
14579 }
173edbb2
JS
14580 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14581 sizeof(struct lpfc_sli4_cfg_mhdr));
14582 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14583 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14584 length, LPFC_SLI4_MBX_EMBED);
14585 eq_delay = &mbox->u.mqe.un.eq_delay;
14586
14587 /* Calculate delay multiper from maximum interrupt per second */
cb733e35
JS
14588 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14589 if (dmult)
14590 dmult--;
0cf07f84
JS
14591 if (dmult > LPFC_DMULT_MAX)
14592 dmult = LPFC_DMULT_MAX;
173edbb2 14593
6a828b0f 14594 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
657add4e 14595 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
173edbb2
JS
14596 if (!eq)
14597 continue;
cb733e35 14598 eq->q_mode = usdelay;
173edbb2
JS
14599 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14600 eq_delay->u.request.eq[cnt].phase = 0;
14601 eq_delay->u.request.eq[cnt].delay_multi = dmult;
0cf07f84 14602
cb733e35 14603 if (++cnt >= numq)
173edbb2
JS
14604 break;
14605 }
14606 eq_delay->u.request.num_eq = cnt;
14607
14608 mbox->vport = phba->pport;
14609 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14610 mbox->ctx_buf = NULL;
14611 mbox->ctx_ndlp = NULL;
173edbb2
JS
14612 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14613 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14614 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14615 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14616 if (shdr_status || shdr_add_status || rc) {
14617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14618 "2512 MODIFY_EQ_DELAY mailbox failed with "
14619 "status x%x add_status x%x, mbx status x%x\n",
14620 shdr_status, shdr_add_status, rc);
173edbb2
JS
14621 }
14622 mempool_free(mbox, phba->mbox_mem_pool);
cb733e35 14623 return;
173edbb2
JS
14624}
14625
4f774513
JS
14626/**
14627 * lpfc_eq_create - Create an Event Queue on the HBA
14628 * @phba: HBA structure that indicates port to create a queue on.
14629 * @eq: The queue structure to use to create the event queue.
14630 * @imax: The maximum interrupt per second limit.
14631 *
14632 * This function creates an event queue, as detailed in @eq, on a port,
14633 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14634 *
14635 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14636 * is used to get the entry count and entry size that are necessary to
14637 * determine the number of pages to allocate and use for this queue. This
14638 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14639 * event queue. This function is asynchronous and will wait for the mailbox
14640 * command to finish before continuing.
14641 *
14642 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14643 * memory this function will return -ENOMEM. If the queue create mailbox command
14644 * fails this function will return -ENXIO.
4f774513 14645 **/
a2fc4aef 14646int
ee02006b 14647lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14648{
14649 struct lpfc_mbx_eq_create *eq_create;
14650 LPFC_MBOXQ_t *mbox;
14651 int rc, length, status = 0;
14652 struct lpfc_dmabuf *dmabuf;
14653 uint32_t shdr_status, shdr_add_status;
14654 union lpfc_sli4_cfg_shdr *shdr;
14655 uint16_t dmult;
49198b37
JS
14656 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14657
2e90f4b5
JS
14658 /* sanity check on queue memory */
14659 if (!eq)
14660 return -ENODEV;
49198b37
JS
14661 if (!phba->sli4_hba.pc_sli4_params.supported)
14662 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14663
14664 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14665 if (!mbox)
14666 return -ENOMEM;
14667 length = (sizeof(struct lpfc_mbx_eq_create) -
14668 sizeof(struct lpfc_sli4_cfg_mhdr));
14669 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14670 LPFC_MBOX_OPCODE_EQ_CREATE,
14671 length, LPFC_SLI4_MBX_EMBED);
14672 eq_create = &mbox->u.mqe.un.eq_create;
7365f6fd 14673 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
4f774513
JS
14674 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14675 eq->page_count);
14676 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14677 LPFC_EQE_SIZE);
14678 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
7365f6fd
JS
14679
14680 /* Use version 2 of CREATE_EQ if eqav is set */
14681 if (phba->sli4_hba.pc_sli4_params.eqav) {
14682 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14683 LPFC_Q_CREATE_VERSION_2);
14684 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14685 phba->sli4_hba.pc_sli4_params.eqav);
14686 }
14687
2c9c5a00
JS
14688 /* don't setup delay multiplier using EQ_CREATE */
14689 dmult = 0;
4f774513
JS
14690 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14691 dmult);
14692 switch (eq->entry_count) {
14693 default:
14694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14695 "0360 Unsupported EQ count. (%d)\n",
14696 eq->entry_count);
04d210c9
JS
14697 if (eq->entry_count < 256) {
14698 status = -EINVAL;
14699 goto out;
14700 }
5bd5f66c 14701 /* fall through - otherwise default to smallest count */
4f774513
JS
14702 case 256:
14703 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14704 LPFC_EQ_CNT_256);
14705 break;
14706 case 512:
14707 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14708 LPFC_EQ_CNT_512);
14709 break;
14710 case 1024:
14711 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14712 LPFC_EQ_CNT_1024);
14713 break;
14714 case 2048:
14715 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14716 LPFC_EQ_CNT_2048);
14717 break;
14718 case 4096:
14719 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14720 LPFC_EQ_CNT_4096);
14721 break;
14722 }
14723 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14724 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14725 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14726 putPaddrLow(dmabuf->phys);
14727 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14728 putPaddrHigh(dmabuf->phys);
14729 }
14730 mbox->vport = phba->pport;
14731 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14732 mbox->ctx_buf = NULL;
14733 mbox->ctx_ndlp = NULL;
4f774513 14734 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4f774513
JS
14735 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14736 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14737 if (shdr_status || shdr_add_status || rc) {
14738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14739 "2500 EQ_CREATE mailbox failed with "
14740 "status x%x add_status x%x, mbx status x%x\n",
14741 shdr_status, shdr_add_status, rc);
14742 status = -ENXIO;
14743 }
14744 eq->type = LPFC_EQ;
14745 eq->subtype = LPFC_NONE;
14746 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14747 if (eq->queue_id == 0xFFFF)
14748 status = -ENXIO;
14749 eq->host_index = 0;
32517fc0
JS
14750 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14751 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
04d210c9 14752out:
8fa38513 14753 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14754 return status;
14755}
14756
14757/**
14758 * lpfc_cq_create - Create a Completion Queue on the HBA
14759 * @phba: HBA structure that indicates port to create a queue on.
14760 * @cq: The queue structure to use to create the completion queue.
14761 * @eq: The event queue to bind this completion queue to.
14762 *
14763 * This function creates a completion queue, as detailed in @wq, on a port,
14764 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14765 *
14766 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14767 * is used to get the entry count and entry size that are necessary to
14768 * determine the number of pages to allocate and use for this queue. The @eq
14769 * is used to indicate which event queue to bind this completion queue to. This
14770 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14771 * completion queue. This function is asynchronous and will wait for the mailbox
14772 * command to finish before continuing.
14773 *
14774 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14775 * memory this function will return -ENOMEM. If the queue create mailbox command
14776 * fails this function will return -ENXIO.
4f774513 14777 **/
a2fc4aef 14778int
4f774513
JS
14779lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14780 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14781{
14782 struct lpfc_mbx_cq_create *cq_create;
14783 struct lpfc_dmabuf *dmabuf;
14784 LPFC_MBOXQ_t *mbox;
14785 int rc, length, status = 0;
14786 uint32_t shdr_status, shdr_add_status;
14787 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14788
2e90f4b5
JS
14789 /* sanity check on queue memory */
14790 if (!cq || !eq)
14791 return -ENODEV;
49198b37 14792
4f774513
JS
14793 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14794 if (!mbox)
14795 return -ENOMEM;
14796 length = (sizeof(struct lpfc_mbx_cq_create) -
14797 sizeof(struct lpfc_sli4_cfg_mhdr));
14798 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14799 LPFC_MBOX_OPCODE_CQ_CREATE,
14800 length, LPFC_SLI4_MBX_EMBED);
14801 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 14802 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
14803 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14804 cq->page_count);
14805 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14806 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
14807 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14808 phba->sli4_hba.pc_sli4_params.cqv);
14809 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
81b96eda
JS
14810 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14811 (cq->page_size / SLI4_PAGE_SIZE));
5a6f133e
JS
14812 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14813 eq->queue_id);
7365f6fd
JS
14814 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14815 phba->sli4_hba.pc_sli4_params.cqav);
5a6f133e
JS
14816 } else {
14817 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14818 eq->queue_id);
14819 }
4f774513 14820 switch (cq->entry_count) {
81b96eda
JS
14821 case 2048:
14822 case 4096:
14823 if (phba->sli4_hba.pc_sli4_params.cqv ==
14824 LPFC_Q_CREATE_VERSION_2) {
14825 cq_create->u.request.context.lpfc_cq_context_count =
14826 cq->entry_count;
14827 bf_set(lpfc_cq_context_count,
14828 &cq_create->u.request.context,
14829 LPFC_CQ_CNT_WORD7);
14830 break;
14831 }
5bd5f66c 14832 /* fall through */
4f774513
JS
14833 default:
14834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee 14835 "0361 Unsupported CQ count: "
64eb4dcb 14836 "entry cnt %d sz %d pg cnt %d\n",
2ea259ee 14837 cq->entry_count, cq->entry_size,
64eb4dcb 14838 cq->page_count);
4f4c1863
JS
14839 if (cq->entry_count < 256) {
14840 status = -EINVAL;
14841 goto out;
14842 }
5bd5f66c 14843 /* fall through - otherwise default to smallest count */
4f774513
JS
14844 case 256:
14845 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14846 LPFC_CQ_CNT_256);
14847 break;
14848 case 512:
14849 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14850 LPFC_CQ_CNT_512);
14851 break;
14852 case 1024:
14853 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14854 LPFC_CQ_CNT_1024);
14855 break;
14856 }
14857 list_for_each_entry(dmabuf, &cq->page_list, list) {
81b96eda 14858 memset(dmabuf->virt, 0, cq->page_size);
4f774513
JS
14859 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14860 putPaddrLow(dmabuf->phys);
14861 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14862 putPaddrHigh(dmabuf->phys);
14863 }
14864 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14865
14866 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
14867 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14868 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14869 if (shdr_status || shdr_add_status || rc) {
14870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14871 "2501 CQ_CREATE mailbox failed with "
14872 "status x%x add_status x%x, mbx status x%x\n",
14873 shdr_status, shdr_add_status, rc);
14874 status = -ENXIO;
14875 goto out;
14876 }
14877 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14878 if (cq->queue_id == 0xFFFF) {
14879 status = -ENXIO;
14880 goto out;
14881 }
14882 /* link the cq onto the parent eq child list */
14883 list_add_tail(&cq->list, &eq->child_list);
14884 /* Set up completion queue's type and subtype */
14885 cq->type = type;
14886 cq->subtype = subtype;
14887 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 14888 cq->assoc_qid = eq->queue_id;
6a828b0f 14889 cq->assoc_qp = eq;
4f774513 14890 cq->host_index = 0;
32517fc0
JS
14891 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14892 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
4f774513 14893
6a828b0f
JS
14894 if (cq->queue_id > phba->sli4_hba.cq_max)
14895 phba->sli4_hba.cq_max = cq->queue_id;
8fa38513
JS
14896out:
14897 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14898 return status;
14899}
14900
2d7dbc4c
JS
14901/**
14902 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14903 * @phba: HBA structure that indicates port to create a queue on.
14904 * @cqp: The queue structure array to use to create the completion queues.
cdb42bec 14905 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
2d7dbc4c
JS
14906 *
14907 * This function creates a set of completion queue, s to support MRQ
14908 * as detailed in @cqp, on a port,
14909 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14910 *
14911 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14912 * is used to get the entry count and entry size that are necessary to
14913 * determine the number of pages to allocate and use for this queue. The @eq
14914 * is used to indicate which event queue to bind this completion queue to. This
14915 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14916 * completion queue. This function is asynchronous and will wait for the mailbox
14917 * command to finish before continuing.
14918 *
14919 * On success this function will return a zero. If unable to allocate enough
14920 * memory this function will return -ENOMEM. If the queue create mailbox command
14921 * fails this function will return -ENXIO.
14922 **/
14923int
14924lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cdb42bec
JS
14925 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14926 uint32_t subtype)
2d7dbc4c
JS
14927{
14928 struct lpfc_queue *cq;
14929 struct lpfc_queue *eq;
14930 struct lpfc_mbx_cq_create_set *cq_set;
14931 struct lpfc_dmabuf *dmabuf;
14932 LPFC_MBOXQ_t *mbox;
14933 int rc, length, alloclen, status = 0;
14934 int cnt, idx, numcq, page_idx = 0;
14935 uint32_t shdr_status, shdr_add_status;
14936 union lpfc_sli4_cfg_shdr *shdr;
14937 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14938
14939 /* sanity check on queue memory */
14940 numcq = phba->cfg_nvmet_mrq;
cdb42bec 14941 if (!cqp || !hdwq || !numcq)
2d7dbc4c 14942 return -ENODEV;
2d7dbc4c
JS
14943
14944 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14945 if (!mbox)
14946 return -ENOMEM;
14947
14948 length = sizeof(struct lpfc_mbx_cq_create_set);
14949 length += ((numcq * cqp[0]->page_count) *
14950 sizeof(struct dma_address));
14951 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14952 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14953 LPFC_SLI4_MBX_NEMBED);
14954 if (alloclen < length) {
14955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14956 "3098 Allocated DMA memory size (%d) is "
14957 "less than the requested DMA memory size "
14958 "(%d)\n", alloclen, length);
14959 status = -ENOMEM;
14960 goto out;
14961 }
14962 cq_set = mbox->sge_array->addr[0];
14963 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14964 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14965
14966 for (idx = 0; idx < numcq; idx++) {
14967 cq = cqp[idx];
cdb42bec 14968 eq = hdwq[idx].hba_eq;
2d7dbc4c
JS
14969 if (!cq || !eq) {
14970 status = -ENOMEM;
14971 goto out;
14972 }
81b96eda
JS
14973 if (!phba->sli4_hba.pc_sli4_params.supported)
14974 hw_page_size = cq->page_size;
2d7dbc4c
JS
14975
14976 switch (idx) {
14977 case 0:
14978 bf_set(lpfc_mbx_cq_create_set_page_size,
14979 &cq_set->u.request,
14980 (hw_page_size / SLI4_PAGE_SIZE));
14981 bf_set(lpfc_mbx_cq_create_set_num_pages,
14982 &cq_set->u.request, cq->page_count);
14983 bf_set(lpfc_mbx_cq_create_set_evt,
14984 &cq_set->u.request, 1);
14985 bf_set(lpfc_mbx_cq_create_set_valid,
14986 &cq_set->u.request, 1);
14987 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14988 &cq_set->u.request, 0);
14989 bf_set(lpfc_mbx_cq_create_set_num_cq,
14990 &cq_set->u.request, numcq);
7365f6fd
JS
14991 bf_set(lpfc_mbx_cq_create_set_autovalid,
14992 &cq_set->u.request,
14993 phba->sli4_hba.pc_sli4_params.cqav);
2d7dbc4c 14994 switch (cq->entry_count) {
81b96eda
JS
14995 case 2048:
14996 case 4096:
14997 if (phba->sli4_hba.pc_sli4_params.cqv ==
14998 LPFC_Q_CREATE_VERSION_2) {
14999 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15000 &cq_set->u.request,
15001 cq->entry_count);
15002 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15003 &cq_set->u.request,
15004 LPFC_CQ_CNT_WORD7);
15005 break;
15006 }
5bd5f66c 15007 /* fall through */
2d7dbc4c
JS
15008 default:
15009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15010 "3118 Bad CQ count. (%d)\n",
15011 cq->entry_count);
15012 if (cq->entry_count < 256) {
15013 status = -EINVAL;
15014 goto out;
15015 }
5bd5f66c 15016 /* fall through - otherwise default to smallest */
2d7dbc4c
JS
15017 case 256:
15018 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15019 &cq_set->u.request, LPFC_CQ_CNT_256);
15020 break;
15021 case 512:
15022 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15023 &cq_set->u.request, LPFC_CQ_CNT_512);
15024 break;
15025 case 1024:
15026 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15027 &cq_set->u.request, LPFC_CQ_CNT_1024);
15028 break;
15029 }
15030 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15031 &cq_set->u.request, eq->queue_id);
15032 break;
15033 case 1:
15034 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15035 &cq_set->u.request, eq->queue_id);
15036 break;
15037 case 2:
15038 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15039 &cq_set->u.request, eq->queue_id);
15040 break;
15041 case 3:
15042 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15043 &cq_set->u.request, eq->queue_id);
15044 break;
15045 case 4:
15046 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15047 &cq_set->u.request, eq->queue_id);
15048 break;
15049 case 5:
15050 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15051 &cq_set->u.request, eq->queue_id);
15052 break;
15053 case 6:
15054 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15055 &cq_set->u.request, eq->queue_id);
15056 break;
15057 case 7:
15058 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15059 &cq_set->u.request, eq->queue_id);
15060 break;
15061 case 8:
15062 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15063 &cq_set->u.request, eq->queue_id);
15064 break;
15065 case 9:
15066 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15067 &cq_set->u.request, eq->queue_id);
15068 break;
15069 case 10:
15070 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15071 &cq_set->u.request, eq->queue_id);
15072 break;
15073 case 11:
15074 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15075 &cq_set->u.request, eq->queue_id);
15076 break;
15077 case 12:
15078 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15079 &cq_set->u.request, eq->queue_id);
15080 break;
15081 case 13:
15082 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15083 &cq_set->u.request, eq->queue_id);
15084 break;
15085 case 14:
15086 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15087 &cq_set->u.request, eq->queue_id);
15088 break;
15089 case 15:
15090 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15091 &cq_set->u.request, eq->queue_id);
15092 break;
15093 }
15094
15095 /* link the cq onto the parent eq child list */
15096 list_add_tail(&cq->list, &eq->child_list);
15097 /* Set up completion queue's type and subtype */
15098 cq->type = type;
15099 cq->subtype = subtype;
15100 cq->assoc_qid = eq->queue_id;
6a828b0f 15101 cq->assoc_qp = eq;
2d7dbc4c 15102 cq->host_index = 0;
32517fc0
JS
15103 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15104 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15105 cq->entry_count);
81b96eda 15106 cq->chann = idx;
2d7dbc4c
JS
15107
15108 rc = 0;
15109 list_for_each_entry(dmabuf, &cq->page_list, list) {
15110 memset(dmabuf->virt, 0, hw_page_size);
15111 cnt = page_idx + dmabuf->buffer_tag;
15112 cq_set->u.request.page[cnt].addr_lo =
15113 putPaddrLow(dmabuf->phys);
15114 cq_set->u.request.page[cnt].addr_hi =
15115 putPaddrHigh(dmabuf->phys);
15116 rc++;
15117 }
15118 page_idx += rc;
15119 }
15120
15121 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15122
15123 /* The IOCTL status is embedded in the mailbox subheader. */
15124 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15125 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15126 if (shdr_status || shdr_add_status || rc) {
15127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15128 "3119 CQ_CREATE_SET mailbox failed with "
15129 "status x%x add_status x%x, mbx status x%x\n",
15130 shdr_status, shdr_add_status, rc);
15131 status = -ENXIO;
15132 goto out;
15133 }
15134 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15135 if (rc == 0xFFFF) {
15136 status = -ENXIO;
15137 goto out;
15138 }
15139
15140 for (idx = 0; idx < numcq; idx++) {
15141 cq = cqp[idx];
15142 cq->queue_id = rc + idx;
6a828b0f
JS
15143 if (cq->queue_id > phba->sli4_hba.cq_max)
15144 phba->sli4_hba.cq_max = cq->queue_id;
2d7dbc4c
JS
15145 }
15146
15147out:
15148 lpfc_sli4_mbox_cmd_free(phba, mbox);
15149 return status;
15150}
15151
b19a061a
JS
15152/**
15153 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15154 * @phba: HBA structure that indicates port to create a queue on.
15155 * @mq: The queue structure to use to create the mailbox queue.
15156 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15157 * @cq: The completion queue to associate with this cq.
15158 *
15159 * This function provides failback (fb) functionality when the
15160 * mq_create_ext fails on older FW generations. It's purpose is identical
15161 * to mq_create_ext otherwise.
15162 *
15163 * This routine cannot fail as all attributes were previously accessed and
15164 * initialized in mq_create_ext.
15165 **/
15166static void
15167lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15168 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15169{
15170 struct lpfc_mbx_mq_create *mq_create;
15171 struct lpfc_dmabuf *dmabuf;
15172 int length;
15173
15174 length = (sizeof(struct lpfc_mbx_mq_create) -
15175 sizeof(struct lpfc_sli4_cfg_mhdr));
15176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15177 LPFC_MBOX_OPCODE_MQ_CREATE,
15178 length, LPFC_SLI4_MBX_EMBED);
15179 mq_create = &mbox->u.mqe.un.mq_create;
15180 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15181 mq->page_count);
15182 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15183 cq->queue_id);
15184 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15185 switch (mq->entry_count) {
15186 case 16:
5a6f133e
JS
15187 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15188 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
15189 break;
15190 case 32:
5a6f133e
JS
15191 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15192 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
15193 break;
15194 case 64:
5a6f133e
JS
15195 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15196 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
15197 break;
15198 case 128:
5a6f133e
JS
15199 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15200 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
15201 break;
15202 }
15203 list_for_each_entry(dmabuf, &mq->page_list, list) {
15204 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15205 putPaddrLow(dmabuf->phys);
15206 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15207 putPaddrHigh(dmabuf->phys);
15208 }
15209}
15210
04c68496
JS
15211/**
15212 * lpfc_mq_create - Create a mailbox Queue on the HBA
15213 * @phba: HBA structure that indicates port to create a queue on.
15214 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
15215 * @cq: The completion queue to associate with this cq.
15216 * @subtype: The queue's subtype.
04c68496
JS
15217 *
15218 * This function creates a mailbox queue, as detailed in @mq, on a port,
15219 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15220 *
15221 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15222 * is used to get the entry count and entry size that are necessary to
15223 * determine the number of pages to allocate and use for this queue. This
15224 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15225 * mailbox queue. This function is asynchronous and will wait for the mailbox
15226 * command to finish before continuing.
15227 *
15228 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15229 * memory this function will return -ENOMEM. If the queue create mailbox command
15230 * fails this function will return -ENXIO.
04c68496 15231 **/
b19a061a 15232int32_t
04c68496
JS
15233lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15234 struct lpfc_queue *cq, uint32_t subtype)
15235{
15236 struct lpfc_mbx_mq_create *mq_create;
b19a061a 15237 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
15238 struct lpfc_dmabuf *dmabuf;
15239 LPFC_MBOXQ_t *mbox;
15240 int rc, length, status = 0;
15241 uint32_t shdr_status, shdr_add_status;
15242 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15243 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 15244
2e90f4b5
JS
15245 /* sanity check on queue memory */
15246 if (!mq || !cq)
15247 return -ENODEV;
49198b37
JS
15248 if (!phba->sli4_hba.pc_sli4_params.supported)
15249 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 15250
04c68496
JS
15251 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15252 if (!mbox)
15253 return -ENOMEM;
b19a061a 15254 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
15255 sizeof(struct lpfc_sli4_cfg_mhdr));
15256 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 15257 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 15258 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
15259
15260 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 15261 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
15262 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15263 &mq_create_ext->u.request, mq->page_count);
15264 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15265 &mq_create_ext->u.request, 1);
15266 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
15267 &mq_create_ext->u.request, 1);
15268 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15269 &mq_create_ext->u.request, 1);
70f3c073
JS
15270 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15271 &mq_create_ext->u.request, 1);
15272 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15273 &mq_create_ext->u.request, 1);
b19a061a 15274 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
15275 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15276 phba->sli4_hba.pc_sli4_params.mqv);
15277 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15278 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15279 cq->queue_id);
15280 else
15281 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15282 cq->queue_id);
04c68496
JS
15283 switch (mq->entry_count) {
15284 default:
15285 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15286 "0362 Unsupported MQ count. (%d)\n",
15287 mq->entry_count);
4f4c1863
JS
15288 if (mq->entry_count < 16) {
15289 status = -EINVAL;
15290 goto out;
15291 }
5bd5f66c 15292 /* fall through - otherwise default to smallest count */
04c68496 15293 case 16:
5a6f133e
JS
15294 bf_set(lpfc_mq_context_ring_size,
15295 &mq_create_ext->u.request.context,
15296 LPFC_MQ_RING_SIZE_16);
04c68496
JS
15297 break;
15298 case 32:
5a6f133e
JS
15299 bf_set(lpfc_mq_context_ring_size,
15300 &mq_create_ext->u.request.context,
15301 LPFC_MQ_RING_SIZE_32);
04c68496
JS
15302 break;
15303 case 64:
5a6f133e
JS
15304 bf_set(lpfc_mq_context_ring_size,
15305 &mq_create_ext->u.request.context,
15306 LPFC_MQ_RING_SIZE_64);
04c68496
JS
15307 break;
15308 case 128:
5a6f133e
JS
15309 bf_set(lpfc_mq_context_ring_size,
15310 &mq_create_ext->u.request.context,
15311 LPFC_MQ_RING_SIZE_128);
04c68496
JS
15312 break;
15313 }
15314 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 15315 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 15316 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 15317 putPaddrLow(dmabuf->phys);
b19a061a 15318 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
15319 putPaddrHigh(dmabuf->phys);
15320 }
15321 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
15322 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15323 &mq_create_ext->u.response);
15324 if (rc != MBX_SUCCESS) {
15325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15326 "2795 MQ_CREATE_EXT failed with "
15327 "status x%x. Failback to MQ_CREATE.\n",
15328 rc);
15329 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15330 mq_create = &mbox->u.mqe.un.mq_create;
15331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15332 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15333 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15334 &mq_create->u.response);
15335 }
15336
04c68496 15337 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
15338 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15339 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15340 if (shdr_status || shdr_add_status || rc) {
15341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15342 "2502 MQ_CREATE mailbox failed with "
15343 "status x%x add_status x%x, mbx status x%x\n",
15344 shdr_status, shdr_add_status, rc);
15345 status = -ENXIO;
15346 goto out;
15347 }
04c68496
JS
15348 if (mq->queue_id == 0xFFFF) {
15349 status = -ENXIO;
15350 goto out;
15351 }
15352 mq->type = LPFC_MQ;
2a622bfb 15353 mq->assoc_qid = cq->queue_id;
04c68496
JS
15354 mq->subtype = subtype;
15355 mq->host_index = 0;
15356 mq->hba_index = 0;
15357
15358 /* link the mq onto the parent cq child list */
15359 list_add_tail(&mq->list, &cq->child_list);
15360out:
8fa38513 15361 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
15362 return status;
15363}
15364
4f774513
JS
15365/**
15366 * lpfc_wq_create - Create a Work Queue on the HBA
15367 * @phba: HBA structure that indicates port to create a queue on.
15368 * @wq: The queue structure to use to create the work queue.
15369 * @cq: The completion queue to bind this work queue to.
15370 * @subtype: The subtype of the work queue indicating its functionality.
15371 *
15372 * This function creates a work queue, as detailed in @wq, on a port, described
15373 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15374 *
15375 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15376 * is used to get the entry count and entry size that are necessary to
15377 * determine the number of pages to allocate and use for this queue. The @cq
15378 * is used to indicate which completion queue to bind this work queue to. This
15379 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15380 * work queue. This function is asynchronous and will wait for the mailbox
15381 * command to finish before continuing.
15382 *
15383 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15384 * memory this function will return -ENOMEM. If the queue create mailbox command
15385 * fails this function will return -ENXIO.
4f774513 15386 **/
a2fc4aef 15387int
4f774513
JS
15388lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15389 struct lpfc_queue *cq, uint32_t subtype)
15390{
15391 struct lpfc_mbx_wq_create *wq_create;
15392 struct lpfc_dmabuf *dmabuf;
15393 LPFC_MBOXQ_t *mbox;
15394 int rc, length, status = 0;
15395 uint32_t shdr_status, shdr_add_status;
15396 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15397 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 15398 struct dma_address *page;
962bc51b
JS
15399 void __iomem *bar_memmap_p;
15400 uint32_t db_offset;
15401 uint16_t pci_barset;
1351e69f
JS
15402 uint8_t dpp_barset;
15403 uint32_t dpp_offset;
15404 unsigned long pg_addr;
81b96eda 15405 uint8_t wq_create_version;
49198b37 15406
2e90f4b5
JS
15407 /* sanity check on queue memory */
15408 if (!wq || !cq)
15409 return -ENODEV;
49198b37 15410 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 15411 hw_page_size = wq->page_size;
4f774513
JS
15412
15413 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15414 if (!mbox)
15415 return -ENOMEM;
15416 length = (sizeof(struct lpfc_mbx_wq_create) -
15417 sizeof(struct lpfc_sli4_cfg_mhdr));
15418 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15419 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15420 length, LPFC_SLI4_MBX_EMBED);
15421 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 15422 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
15423 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15424 wq->page_count);
15425 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15426 cq->queue_id);
0c651878
JS
15427
15428 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
15429 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15430 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 15431
c176ffa0
JS
15432 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15433 (wq->page_size > SLI4_PAGE_SIZE))
81b96eda
JS
15434 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15435 else
15436 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15437
0c651878 15438
1351e69f
JS
15439 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15440 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15441 else
15442 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15443
15444 switch (wq_create_version) {
0c651878 15445 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
15446 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15447 wq->entry_count);
3f247de7
JS
15448 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15449 LPFC_Q_CREATE_VERSION_1);
15450
5a6f133e
JS
15451 switch (wq->entry_size) {
15452 default:
15453 case 64:
15454 bf_set(lpfc_mbx_wq_create_wqe_size,
15455 &wq_create->u.request_1,
15456 LPFC_WQ_WQE_SIZE_64);
15457 break;
15458 case 128:
15459 bf_set(lpfc_mbx_wq_create_wqe_size,
15460 &wq_create->u.request_1,
15461 LPFC_WQ_WQE_SIZE_128);
15462 break;
15463 }
1351e69f
JS
15464 /* Request DPP by default */
15465 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
8ea73db4
JS
15466 bf_set(lpfc_mbx_wq_create_page_size,
15467 &wq_create->u.request_1,
81b96eda 15468 (wq->page_size / SLI4_PAGE_SIZE));
5a6f133e 15469 page = wq_create->u.request_1.page;
0c651878
JS
15470 break;
15471 default:
1351e69f
JS
15472 page = wq_create->u.request.page;
15473 break;
5a6f133e 15474 }
0c651878 15475
4f774513 15476 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 15477 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
15478 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15479 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 15480 }
962bc51b
JS
15481
15482 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15483 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15484
4f774513
JS
15485 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15486 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15487 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15488 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15489 if (shdr_status || shdr_add_status || rc) {
15490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15491 "2503 WQ_CREATE mailbox failed with "
15492 "status x%x add_status x%x, mbx status x%x\n",
15493 shdr_status, shdr_add_status, rc);
15494 status = -ENXIO;
15495 goto out;
15496 }
1351e69f
JS
15497
15498 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15499 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15500 &wq_create->u.response);
15501 else
15502 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15503 &wq_create->u.response_1);
15504
4f774513
JS
15505 if (wq->queue_id == 0xFFFF) {
15506 status = -ENXIO;
15507 goto out;
15508 }
1351e69f
JS
15509
15510 wq->db_format = LPFC_DB_LIST_FORMAT;
15511 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15512 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15513 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15514 &wq_create->u.response);
15515 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15516 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15518 "3265 WQ[%d] doorbell format "
15519 "not supported: x%x\n",
15520 wq->queue_id, wq->db_format);
15521 status = -EINVAL;
15522 goto out;
15523 }
15524 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15525 &wq_create->u.response);
15526 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15527 pci_barset);
15528 if (!bar_memmap_p) {
15529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15530 "3263 WQ[%d] failed to memmap "
15531 "pci barset:x%x\n",
15532 wq->queue_id, pci_barset);
15533 status = -ENOMEM;
15534 goto out;
15535 }
15536 db_offset = wq_create->u.response.doorbell_offset;
15537 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15538 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15540 "3252 WQ[%d] doorbell offset "
15541 "not supported: x%x\n",
15542 wq->queue_id, db_offset);
15543 status = -EINVAL;
15544 goto out;
15545 }
15546 wq->db_regaddr = bar_memmap_p + db_offset;
15547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15548 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15549 "format:x%x\n", wq->queue_id,
15550 pci_barset, db_offset, wq->db_format);
15551 } else
15552 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15553 } else {
1351e69f
JS
15554 /* Check if DPP was honored by the firmware */
15555 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15556 &wq_create->u.response_1);
15557 if (wq->dpp_enable) {
15558 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15559 &wq_create->u.response_1);
15560 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15561 pci_barset);
15562 if (!bar_memmap_p) {
15563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15564 "3267 WQ[%d] failed to memmap "
15565 "pci barset:x%x\n",
15566 wq->queue_id, pci_barset);
15567 status = -ENOMEM;
15568 goto out;
15569 }
15570 db_offset = wq_create->u.response_1.doorbell_offset;
15571 wq->db_regaddr = bar_memmap_p + db_offset;
15572 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15573 &wq_create->u.response_1);
15574 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15575 &wq_create->u.response_1);
15576 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15577 dpp_barset);
15578 if (!bar_memmap_p) {
15579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15580 "3268 WQ[%d] failed to memmap "
15581 "pci barset:x%x\n",
15582 wq->queue_id, dpp_barset);
15583 status = -ENOMEM;
15584 goto out;
15585 }
15586 dpp_offset = wq_create->u.response_1.dpp_offset;
15587 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15588 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15589 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15590 "dpp_id:x%x dpp_barset:x%x "
15591 "dpp_offset:x%x\n",
15592 wq->queue_id, pci_barset, db_offset,
15593 wq->dpp_id, dpp_barset, dpp_offset);
15594
15595 /* Enable combined writes for DPP aperture */
15596 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15597#ifdef CONFIG_X86
15598 rc = set_memory_wc(pg_addr, 1);
15599 if (rc) {
15600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15601 "3272 Cannot setup Combined "
15602 "Write on WQ[%d] - disable DPP\n",
15603 wq->queue_id);
15604 phba->cfg_enable_dpp = 0;
15605 }
15606#else
15607 phba->cfg_enable_dpp = 0;
15608#endif
15609 } else
15610 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15611 }
895427bd
JS
15612 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15613 if (wq->pring == NULL) {
15614 status = -ENOMEM;
15615 goto out;
15616 }
4f774513 15617 wq->type = LPFC_WQ;
2a622bfb 15618 wq->assoc_qid = cq->queue_id;
4f774513
JS
15619 wq->subtype = subtype;
15620 wq->host_index = 0;
15621 wq->hba_index = 0;
32517fc0 15622 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
4f774513
JS
15623
15624 /* link the wq onto the parent cq child list */
15625 list_add_tail(&wq->list, &cq->child_list);
15626out:
8fa38513 15627 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15628 return status;
15629}
15630
15631/**
15632 * lpfc_rq_create - Create a Receive Queue on the HBA
15633 * @phba: HBA structure that indicates port to create a queue on.
15634 * @hrq: The queue structure to use to create the header receive queue.
15635 * @drq: The queue structure to use to create the data receive queue.
15636 * @cq: The completion queue to bind this work queue to.
15637 *
15638 * This function creates a receive buffer queue pair , as detailed in @hrq and
15639 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15640 * to the HBA.
15641 *
15642 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15643 * struct is used to get the entry count that is necessary to determine the
15644 * number of pages to use for this queue. The @cq is used to indicate which
15645 * completion queue to bind received buffers that are posted to these queues to.
15646 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15647 * receive queue pair. This function is asynchronous and will wait for the
15648 * mailbox command to finish before continuing.
15649 *
15650 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15651 * memory this function will return -ENOMEM. If the queue create mailbox command
15652 * fails this function will return -ENXIO.
4f774513 15653 **/
a2fc4aef 15654int
4f774513
JS
15655lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15656 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15657{
15658 struct lpfc_mbx_rq_create *rq_create;
15659 struct lpfc_dmabuf *dmabuf;
15660 LPFC_MBOXQ_t *mbox;
15661 int rc, length, status = 0;
15662 uint32_t shdr_status, shdr_add_status;
15663 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15664 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
15665 void __iomem *bar_memmap_p;
15666 uint32_t db_offset;
15667 uint16_t pci_barset;
49198b37 15668
2e90f4b5
JS
15669 /* sanity check on queue memory */
15670 if (!hrq || !drq || !cq)
15671 return -ENODEV;
49198b37
JS
15672 if (!phba->sli4_hba.pc_sli4_params.supported)
15673 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
15674
15675 if (hrq->entry_count != drq->entry_count)
15676 return -EINVAL;
15677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15678 if (!mbox)
15679 return -ENOMEM;
15680 length = (sizeof(struct lpfc_mbx_rq_create) -
15681 sizeof(struct lpfc_sli4_cfg_mhdr));
15682 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15683 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15684 length, LPFC_SLI4_MBX_EMBED);
15685 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
15686 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15687 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15688 phba->sli4_hba.pc_sli4_params.rqv);
15689 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15690 bf_set(lpfc_rq_context_rqe_count_1,
15691 &rq_create->u.request.context,
15692 hrq->entry_count);
15693 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
15694 bf_set(lpfc_rq_context_rqe_size,
15695 &rq_create->u.request.context,
15696 LPFC_RQE_SIZE_8);
15697 bf_set(lpfc_rq_context_page_size,
15698 &rq_create->u.request.context,
8ea73db4 15699 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
15700 } else {
15701 switch (hrq->entry_count) {
15702 default:
15703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15704 "2535 Unsupported RQ count. (%d)\n",
15705 hrq->entry_count);
4f4c1863
JS
15706 if (hrq->entry_count < 512) {
15707 status = -EINVAL;
15708 goto out;
15709 }
5bd5f66c 15710 /* fall through - otherwise default to smallest count */
5a6f133e
JS
15711 case 512:
15712 bf_set(lpfc_rq_context_rqe_count,
15713 &rq_create->u.request.context,
15714 LPFC_RQ_RING_SIZE_512);
15715 break;
15716 case 1024:
15717 bf_set(lpfc_rq_context_rqe_count,
15718 &rq_create->u.request.context,
15719 LPFC_RQ_RING_SIZE_1024);
15720 break;
15721 case 2048:
15722 bf_set(lpfc_rq_context_rqe_count,
15723 &rq_create->u.request.context,
15724 LPFC_RQ_RING_SIZE_2048);
15725 break;
15726 case 4096:
15727 bf_set(lpfc_rq_context_rqe_count,
15728 &rq_create->u.request.context,
15729 LPFC_RQ_RING_SIZE_4096);
15730 break;
15731 }
15732 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15733 LPFC_HDR_BUF_SIZE);
4f774513
JS
15734 }
15735 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15736 cq->queue_id);
15737 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15738 hrq->page_count);
4f774513 15739 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 15740 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
15741 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15742 putPaddrLow(dmabuf->phys);
15743 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15744 putPaddrHigh(dmabuf->phys);
15745 }
962bc51b
JS
15746 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15747 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15748
4f774513
JS
15749 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15750 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15751 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15752 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15753 if (shdr_status || shdr_add_status || rc) {
15754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15755 "2504 RQ_CREATE mailbox failed with "
15756 "status x%x add_status x%x, mbx status x%x\n",
15757 shdr_status, shdr_add_status, rc);
15758 status = -ENXIO;
15759 goto out;
15760 }
15761 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15762 if (hrq->queue_id == 0xFFFF) {
15763 status = -ENXIO;
15764 goto out;
15765 }
962bc51b
JS
15766
15767 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15768 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15769 &rq_create->u.response);
15770 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15771 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15773 "3262 RQ [%d] doorbell format not "
15774 "supported: x%x\n", hrq->queue_id,
15775 hrq->db_format);
15776 status = -EINVAL;
15777 goto out;
15778 }
15779
15780 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15781 &rq_create->u.response);
15782 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15783 if (!bar_memmap_p) {
15784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15785 "3269 RQ[%d] failed to memmap pci "
15786 "barset:x%x\n", hrq->queue_id,
15787 pci_barset);
15788 status = -ENOMEM;
15789 goto out;
15790 }
15791
15792 db_offset = rq_create->u.response.doorbell_offset;
15793 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15794 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15796 "3270 RQ[%d] doorbell offset not "
15797 "supported: x%x\n", hrq->queue_id,
15798 db_offset);
15799 status = -EINVAL;
15800 goto out;
15801 }
15802 hrq->db_regaddr = bar_memmap_p + db_offset;
15803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
15804 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15805 "format:x%x\n", hrq->queue_id, pci_barset,
15806 db_offset, hrq->db_format);
962bc51b
JS
15807 } else {
15808 hrq->db_format = LPFC_DB_RING_FORMAT;
15809 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15810 }
4f774513 15811 hrq->type = LPFC_HRQ;
2a622bfb 15812 hrq->assoc_qid = cq->queue_id;
4f774513
JS
15813 hrq->subtype = subtype;
15814 hrq->host_index = 0;
15815 hrq->hba_index = 0;
32517fc0 15816 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
15817
15818 /* now create the data queue */
15819 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15820 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15821 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
15822 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15823 phba->sli4_hba.pc_sli4_params.rqv);
15824 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15825 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 15826 &rq_create->u.request.context, hrq->entry_count);
3c603be9
JS
15827 if (subtype == LPFC_NVMET)
15828 rq_create->u.request.context.buffer_size =
15829 LPFC_NVMET_DATA_BUF_SIZE;
15830 else
15831 rq_create->u.request.context.buffer_size =
15832 LPFC_DATA_BUF_SIZE;
c31098ce
JS
15833 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15834 LPFC_RQE_SIZE_8);
15835 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15836 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
15837 } else {
15838 switch (drq->entry_count) {
15839 default:
15840 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15841 "2536 Unsupported RQ count. (%d)\n",
15842 drq->entry_count);
4f4c1863
JS
15843 if (drq->entry_count < 512) {
15844 status = -EINVAL;
15845 goto out;
15846 }
5bd5f66c 15847 /* fall through - otherwise default to smallest count */
5a6f133e
JS
15848 case 512:
15849 bf_set(lpfc_rq_context_rqe_count,
15850 &rq_create->u.request.context,
15851 LPFC_RQ_RING_SIZE_512);
15852 break;
15853 case 1024:
15854 bf_set(lpfc_rq_context_rqe_count,
15855 &rq_create->u.request.context,
15856 LPFC_RQ_RING_SIZE_1024);
15857 break;
15858 case 2048:
15859 bf_set(lpfc_rq_context_rqe_count,
15860 &rq_create->u.request.context,
15861 LPFC_RQ_RING_SIZE_2048);
15862 break;
15863 case 4096:
15864 bf_set(lpfc_rq_context_rqe_count,
15865 &rq_create->u.request.context,
15866 LPFC_RQ_RING_SIZE_4096);
15867 break;
15868 }
3c603be9
JS
15869 if (subtype == LPFC_NVMET)
15870 bf_set(lpfc_rq_context_buf_size,
15871 &rq_create->u.request.context,
15872 LPFC_NVMET_DATA_BUF_SIZE);
15873 else
15874 bf_set(lpfc_rq_context_buf_size,
15875 &rq_create->u.request.context,
15876 LPFC_DATA_BUF_SIZE);
4f774513
JS
15877 }
15878 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15879 cq->queue_id);
15880 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15881 drq->page_count);
4f774513
JS
15882 list_for_each_entry(dmabuf, &drq->page_list, list) {
15883 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15884 putPaddrLow(dmabuf->phys);
15885 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15886 putPaddrHigh(dmabuf->phys);
15887 }
962bc51b
JS
15888 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15889 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
15890 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15891 /* The IOCTL status is embedded in the mailbox subheader. */
15892 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15893 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15894 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15895 if (shdr_status || shdr_add_status || rc) {
15896 status = -ENXIO;
15897 goto out;
15898 }
15899 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15900 if (drq->queue_id == 0xFFFF) {
15901 status = -ENXIO;
15902 goto out;
15903 }
15904 drq->type = LPFC_DRQ;
2a622bfb 15905 drq->assoc_qid = cq->queue_id;
4f774513
JS
15906 drq->subtype = subtype;
15907 drq->host_index = 0;
15908 drq->hba_index = 0;
32517fc0 15909 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
4f774513
JS
15910
15911 /* link the header and data RQs onto the parent cq child list */
15912 list_add_tail(&hrq->list, &cq->child_list);
15913 list_add_tail(&drq->list, &cq->child_list);
15914
15915out:
8fa38513 15916 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15917 return status;
15918}
15919
2d7dbc4c
JS
15920/**
15921 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15922 * @phba: HBA structure that indicates port to create a queue on.
15923 * @hrqp: The queue structure array to use to create the header receive queues.
15924 * @drqp: The queue structure array to use to create the data receive queues.
15925 * @cqp: The completion queue array to bind these receive queues to.
15926 *
15927 * This function creates a receive buffer queue pair , as detailed in @hrq and
15928 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15929 * to the HBA.
15930 *
15931 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15932 * struct is used to get the entry count that is necessary to determine the
15933 * number of pages to use for this queue. The @cq is used to indicate which
15934 * completion queue to bind received buffers that are posted to these queues to.
15935 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15936 * receive queue pair. This function is asynchronous and will wait for the
15937 * mailbox command to finish before continuing.
15938 *
15939 * On success this function will return a zero. If unable to allocate enough
15940 * memory this function will return -ENOMEM. If the queue create mailbox command
15941 * fails this function will return -ENXIO.
15942 **/
15943int
15944lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15945 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15946 uint32_t subtype)
15947{
15948 struct lpfc_queue *hrq, *drq, *cq;
15949 struct lpfc_mbx_rq_create_v2 *rq_create;
15950 struct lpfc_dmabuf *dmabuf;
15951 LPFC_MBOXQ_t *mbox;
15952 int rc, length, alloclen, status = 0;
15953 int cnt, idx, numrq, page_idx = 0;
15954 uint32_t shdr_status, shdr_add_status;
15955 union lpfc_sli4_cfg_shdr *shdr;
15956 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15957
15958 numrq = phba->cfg_nvmet_mrq;
15959 /* sanity check on array memory */
15960 if (!hrqp || !drqp || !cqp || !numrq)
15961 return -ENODEV;
15962 if (!phba->sli4_hba.pc_sli4_params.supported)
15963 hw_page_size = SLI4_PAGE_SIZE;
15964
15965 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15966 if (!mbox)
15967 return -ENOMEM;
15968
15969 length = sizeof(struct lpfc_mbx_rq_create_v2);
15970 length += ((2 * numrq * hrqp[0]->page_count) *
15971 sizeof(struct dma_address));
15972
15973 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15974 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15975 LPFC_SLI4_MBX_NEMBED);
15976 if (alloclen < length) {
15977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15978 "3099 Allocated DMA memory size (%d) is "
15979 "less than the requested DMA memory size "
15980 "(%d)\n", alloclen, length);
15981 status = -ENOMEM;
15982 goto out;
15983 }
15984
15985
15986
15987 rq_create = mbox->sge_array->addr[0];
15988 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15989
15990 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15991 cnt = 0;
15992
15993 for (idx = 0; idx < numrq; idx++) {
15994 hrq = hrqp[idx];
15995 drq = drqp[idx];
15996 cq = cqp[idx];
15997
2d7dbc4c
JS
15998 /* sanity check on queue memory */
15999 if (!hrq || !drq || !cq) {
16000 status = -ENODEV;
16001 goto out;
16002 }
16003
7aabe84b
JS
16004 if (hrq->entry_count != drq->entry_count) {
16005 status = -EINVAL;
16006 goto out;
16007 }
16008
2d7dbc4c
JS
16009 if (idx == 0) {
16010 bf_set(lpfc_mbx_rq_create_num_pages,
16011 &rq_create->u.request,
16012 hrq->page_count);
16013 bf_set(lpfc_mbx_rq_create_rq_cnt,
16014 &rq_create->u.request, (numrq * 2));
16015 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16016 1);
16017 bf_set(lpfc_rq_context_base_cq,
16018 &rq_create->u.request.context,
16019 cq->queue_id);
16020 bf_set(lpfc_rq_context_data_size,
16021 &rq_create->u.request.context,
3c603be9 16022 LPFC_NVMET_DATA_BUF_SIZE);
2d7dbc4c
JS
16023 bf_set(lpfc_rq_context_hdr_size,
16024 &rq_create->u.request.context,
16025 LPFC_HDR_BUF_SIZE);
16026 bf_set(lpfc_rq_context_rqe_count_1,
16027 &rq_create->u.request.context,
16028 hrq->entry_count);
16029 bf_set(lpfc_rq_context_rqe_size,
16030 &rq_create->u.request.context,
16031 LPFC_RQE_SIZE_8);
16032 bf_set(lpfc_rq_context_page_size,
16033 &rq_create->u.request.context,
16034 (PAGE_SIZE/SLI4_PAGE_SIZE));
16035 }
16036 rc = 0;
16037 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16038 memset(dmabuf->virt, 0, hw_page_size);
16039 cnt = page_idx + dmabuf->buffer_tag;
16040 rq_create->u.request.page[cnt].addr_lo =
16041 putPaddrLow(dmabuf->phys);
16042 rq_create->u.request.page[cnt].addr_hi =
16043 putPaddrHigh(dmabuf->phys);
16044 rc++;
16045 }
16046 page_idx += rc;
16047
16048 rc = 0;
16049 list_for_each_entry(dmabuf, &drq->page_list, list) {
16050 memset(dmabuf->virt, 0, hw_page_size);
16051 cnt = page_idx + dmabuf->buffer_tag;
16052 rq_create->u.request.page[cnt].addr_lo =
16053 putPaddrLow(dmabuf->phys);
16054 rq_create->u.request.page[cnt].addr_hi =
16055 putPaddrHigh(dmabuf->phys);
16056 rc++;
16057 }
16058 page_idx += rc;
16059
16060 hrq->db_format = LPFC_DB_RING_FORMAT;
16061 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16062 hrq->type = LPFC_HRQ;
16063 hrq->assoc_qid = cq->queue_id;
16064 hrq->subtype = subtype;
16065 hrq->host_index = 0;
16066 hrq->hba_index = 0;
32517fc0 16067 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16068
16069 drq->db_format = LPFC_DB_RING_FORMAT;
16070 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16071 drq->type = LPFC_DRQ;
16072 drq->assoc_qid = cq->queue_id;
16073 drq->subtype = subtype;
16074 drq->host_index = 0;
16075 drq->hba_index = 0;
32517fc0 16076 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
2d7dbc4c
JS
16077
16078 list_add_tail(&hrq->list, &cq->child_list);
16079 list_add_tail(&drq->list, &cq->child_list);
16080 }
16081
16082 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16083 /* The IOCTL status is embedded in the mailbox subheader. */
16084 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16085 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16086 if (shdr_status || shdr_add_status || rc) {
16087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16088 "3120 RQ_CREATE mailbox failed with "
16089 "status x%x add_status x%x, mbx status x%x\n",
16090 shdr_status, shdr_add_status, rc);
16091 status = -ENXIO;
16092 goto out;
16093 }
16094 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16095 if (rc == 0xFFFF) {
16096 status = -ENXIO;
16097 goto out;
16098 }
16099
16100 /* Initialize all RQs with associated queue id */
16101 for (idx = 0; idx < numrq; idx++) {
16102 hrq = hrqp[idx];
16103 hrq->queue_id = rc + (2 * idx);
16104 drq = drqp[idx];
16105 drq->queue_id = rc + (2 * idx) + 1;
16106 }
16107
16108out:
16109 lpfc_sli4_mbox_cmd_free(phba, mbox);
16110 return status;
16111}
16112
4f774513
JS
16113/**
16114 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16115 * @eq: The queue structure associated with the queue to destroy.
16116 *
16117 * This function destroys a queue, as detailed in @eq by sending an mailbox
16118 * command, specific to the type of queue, to the HBA.
16119 *
16120 * The @eq struct is used to get the queue ID of the queue to destroy.
16121 *
16122 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16123 * command fails this function will return -ENXIO.
4f774513 16124 **/
a2fc4aef 16125int
4f774513
JS
16126lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16127{
16128 LPFC_MBOXQ_t *mbox;
16129 int rc, length, status = 0;
16130 uint32_t shdr_status, shdr_add_status;
16131 union lpfc_sli4_cfg_shdr *shdr;
16132
2e90f4b5 16133 /* sanity check on queue memory */
4f774513
JS
16134 if (!eq)
16135 return -ENODEV;
32517fc0 16136
4f774513
JS
16137 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16138 if (!mbox)
16139 return -ENOMEM;
16140 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16141 sizeof(struct lpfc_sli4_cfg_mhdr));
16142 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16143 LPFC_MBOX_OPCODE_EQ_DESTROY,
16144 length, LPFC_SLI4_MBX_EMBED);
16145 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16146 eq->queue_id);
16147 mbox->vport = eq->phba->pport;
16148 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16149
16150 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16151 /* The IOCTL status is embedded in the mailbox subheader. */
16152 shdr = (union lpfc_sli4_cfg_shdr *)
16153 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16154 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16155 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16156 if (shdr_status || shdr_add_status || rc) {
16157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16158 "2505 EQ_DESTROY mailbox failed with "
16159 "status x%x add_status x%x, mbx status x%x\n",
16160 shdr_status, shdr_add_status, rc);
16161 status = -ENXIO;
16162 }
16163
16164 /* Remove eq from any list */
16165 list_del_init(&eq->list);
8fa38513 16166 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
16167 return status;
16168}
16169
16170/**
16171 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16172 * @cq: The queue structure associated with the queue to destroy.
16173 *
16174 * This function destroys a queue, as detailed in @cq by sending an mailbox
16175 * command, specific to the type of queue, to the HBA.
16176 *
16177 * The @cq struct is used to get the queue ID of the queue to destroy.
16178 *
16179 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16180 * command fails this function will return -ENXIO.
4f774513 16181 **/
a2fc4aef 16182int
4f774513
JS
16183lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16184{
16185 LPFC_MBOXQ_t *mbox;
16186 int rc, length, status = 0;
16187 uint32_t shdr_status, shdr_add_status;
16188 union lpfc_sli4_cfg_shdr *shdr;
16189
2e90f4b5 16190 /* sanity check on queue memory */
4f774513
JS
16191 if (!cq)
16192 return -ENODEV;
16193 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16194 if (!mbox)
16195 return -ENOMEM;
16196 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16197 sizeof(struct lpfc_sli4_cfg_mhdr));
16198 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16199 LPFC_MBOX_OPCODE_CQ_DESTROY,
16200 length, LPFC_SLI4_MBX_EMBED);
16201 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16202 cq->queue_id);
16203 mbox->vport = cq->phba->pport;
16204 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16205 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16206 /* The IOCTL status is embedded in the mailbox subheader. */
16207 shdr = (union lpfc_sli4_cfg_shdr *)
16208 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16209 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16210 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16211 if (shdr_status || shdr_add_status || rc) {
16212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16213 "2506 CQ_DESTROY mailbox failed with "
16214 "status x%x add_status x%x, mbx status x%x\n",
16215 shdr_status, shdr_add_status, rc);
16216 status = -ENXIO;
16217 }
16218 /* Remove cq from any list */
16219 list_del_init(&cq->list);
8fa38513 16220 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
16221 return status;
16222}
16223
04c68496
JS
16224/**
16225 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16226 * @qm: The queue structure associated with the queue to destroy.
16227 *
16228 * This function destroys a queue, as detailed in @mq by sending an mailbox
16229 * command, specific to the type of queue, to the HBA.
16230 *
16231 * The @mq struct is used to get the queue ID of the queue to destroy.
16232 *
16233 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16234 * command fails this function will return -ENXIO.
04c68496 16235 **/
a2fc4aef 16236int
04c68496
JS
16237lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16238{
16239 LPFC_MBOXQ_t *mbox;
16240 int rc, length, status = 0;
16241 uint32_t shdr_status, shdr_add_status;
16242 union lpfc_sli4_cfg_shdr *shdr;
16243
2e90f4b5 16244 /* sanity check on queue memory */
04c68496
JS
16245 if (!mq)
16246 return -ENODEV;
16247 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16248 if (!mbox)
16249 return -ENOMEM;
16250 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16251 sizeof(struct lpfc_sli4_cfg_mhdr));
16252 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16253 LPFC_MBOX_OPCODE_MQ_DESTROY,
16254 length, LPFC_SLI4_MBX_EMBED);
16255 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16256 mq->queue_id);
16257 mbox->vport = mq->phba->pport;
16258 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16259 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16260 /* The IOCTL status is embedded in the mailbox subheader. */
16261 shdr = (union lpfc_sli4_cfg_shdr *)
16262 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16263 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16264 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16265 if (shdr_status || shdr_add_status || rc) {
16266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16267 "2507 MQ_DESTROY mailbox failed with "
16268 "status x%x add_status x%x, mbx status x%x\n",
16269 shdr_status, shdr_add_status, rc);
16270 status = -ENXIO;
16271 }
16272 /* Remove mq from any list */
16273 list_del_init(&mq->list);
8fa38513 16274 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
16275 return status;
16276}
16277
4f774513
JS
16278/**
16279 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16280 * @wq: The queue structure associated with the queue to destroy.
16281 *
16282 * This function destroys a queue, as detailed in @wq by sending an mailbox
16283 * command, specific to the type of queue, to the HBA.
16284 *
16285 * The @wq struct is used to get the queue ID of the queue to destroy.
16286 *
16287 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16288 * command fails this function will return -ENXIO.
4f774513 16289 **/
a2fc4aef 16290int
4f774513
JS
16291lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16292{
16293 LPFC_MBOXQ_t *mbox;
16294 int rc, length, status = 0;
16295 uint32_t shdr_status, shdr_add_status;
16296 union lpfc_sli4_cfg_shdr *shdr;
16297
2e90f4b5 16298 /* sanity check on queue memory */
4f774513
JS
16299 if (!wq)
16300 return -ENODEV;
16301 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16302 if (!mbox)
16303 return -ENOMEM;
16304 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16305 sizeof(struct lpfc_sli4_cfg_mhdr));
16306 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16307 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16308 length, LPFC_SLI4_MBX_EMBED);
16309 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16310 wq->queue_id);
16311 mbox->vport = wq->phba->pport;
16312 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16313 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16314 shdr = (union lpfc_sli4_cfg_shdr *)
16315 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16316 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16317 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16318 if (shdr_status || shdr_add_status || rc) {
16319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16320 "2508 WQ_DESTROY mailbox failed with "
16321 "status x%x add_status x%x, mbx status x%x\n",
16322 shdr_status, shdr_add_status, rc);
16323 status = -ENXIO;
16324 }
16325 /* Remove wq from any list */
16326 list_del_init(&wq->list);
d1f525aa
JS
16327 kfree(wq->pring);
16328 wq->pring = NULL;
8fa38513 16329 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
16330 return status;
16331}
16332
16333/**
16334 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16335 * @rq: The queue structure associated with the queue to destroy.
16336 *
16337 * This function destroys a queue, as detailed in @rq by sending an mailbox
16338 * command, specific to the type of queue, to the HBA.
16339 *
16340 * The @rq struct is used to get the queue ID of the queue to destroy.
16341 *
16342 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16343 * command fails this function will return -ENXIO.
4f774513 16344 **/
a2fc4aef 16345int
4f774513
JS
16346lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16347 struct lpfc_queue *drq)
16348{
16349 LPFC_MBOXQ_t *mbox;
16350 int rc, length, status = 0;
16351 uint32_t shdr_status, shdr_add_status;
16352 union lpfc_sli4_cfg_shdr *shdr;
16353
2e90f4b5 16354 /* sanity check on queue memory */
4f774513
JS
16355 if (!hrq || !drq)
16356 return -ENODEV;
16357 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16358 if (!mbox)
16359 return -ENOMEM;
16360 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 16361 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
16362 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16363 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16364 length, LPFC_SLI4_MBX_EMBED);
16365 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16366 hrq->queue_id);
16367 mbox->vport = hrq->phba->pport;
16368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16369 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16370 /* The IOCTL status is embedded in the mailbox subheader. */
16371 shdr = (union lpfc_sli4_cfg_shdr *)
16372 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16373 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16374 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16375 if (shdr_status || shdr_add_status || rc) {
16376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16377 "2509 RQ_DESTROY mailbox failed with "
16378 "status x%x add_status x%x, mbx status x%x\n",
16379 shdr_status, shdr_add_status, rc);
16380 if (rc != MBX_TIMEOUT)
16381 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16382 return -ENXIO;
16383 }
16384 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16385 drq->queue_id);
16386 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16387 shdr = (union lpfc_sli4_cfg_shdr *)
16388 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16389 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16390 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16391 if (shdr_status || shdr_add_status || rc) {
16392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16393 "2510 RQ_DESTROY mailbox failed with "
16394 "status x%x add_status x%x, mbx status x%x\n",
16395 shdr_status, shdr_add_status, rc);
16396 status = -ENXIO;
16397 }
16398 list_del_init(&hrq->list);
16399 list_del_init(&drq->list);
8fa38513 16400 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
16401 return status;
16402}
16403
16404/**
16405 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16406 * @phba: The virtual port for which this call being executed.
16407 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16408 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16409 * @xritag: the xritag that ties this io to the SGL pages.
16410 *
16411 * This routine will post the sgl pages for the IO that has the xritag
16412 * that is in the iocbq structure. The xritag is assigned during iocbq
16413 * creation and persists for as long as the driver is loaded.
16414 * if the caller has fewer than 256 scatter gather segments to map then
16415 * pdma_phys_addr1 should be 0.
16416 * If the caller needs to map more than 256 scatter gather segment then
16417 * pdma_phys_addr1 should be a valid physical address.
16418 * physical address for SGLs must be 64 byte aligned.
16419 * If you are going to map 2 SGL's then the first one must have 256 entries
16420 * the second sgl can have between 1 and 256 entries.
16421 *
16422 * Return codes:
16423 * 0 - Success
16424 * -ENXIO, -ENOMEM - Failure
16425 **/
16426int
16427lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16428 dma_addr_t pdma_phys_addr0,
16429 dma_addr_t pdma_phys_addr1,
16430 uint16_t xritag)
16431{
16432 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16433 LPFC_MBOXQ_t *mbox;
16434 int rc;
16435 uint32_t shdr_status, shdr_add_status;
6d368e53 16436 uint32_t mbox_tmo;
4f774513
JS
16437 union lpfc_sli4_cfg_shdr *shdr;
16438
16439 if (xritag == NO_XRI) {
16440 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16441 "0364 Invalid param:\n");
16442 return -EINVAL;
16443 }
16444
16445 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16446 if (!mbox)
16447 return -ENOMEM;
16448
16449 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16450 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16451 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 16452 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
16453
16454 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16455 &mbox->u.mqe.un.post_sgl_pages;
16456 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16457 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16458
16459 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16460 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16461 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16462 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16463
16464 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16465 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16466 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16467 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16468 if (!phba->sli4_hba.intr_enable)
16469 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 16470 else {
a183a15f 16471 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
16472 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16473 }
4f774513
JS
16474 /* The IOCTL status is embedded in the mailbox subheader. */
16475 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16476 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16477 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16478 if (rc != MBX_TIMEOUT)
16479 mempool_free(mbox, phba->mbox_mem_pool);
16480 if (shdr_status || shdr_add_status || rc) {
16481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16482 "2511 POST_SGL mailbox failed with "
16483 "status x%x add_status x%x, mbx status x%x\n",
16484 shdr_status, shdr_add_status, rc);
4f774513
JS
16485 }
16486 return 0;
16487}
4f774513 16488
6d368e53 16489/**
88a2cfbb 16490 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
16491 * @phba: pointer to lpfc hba data structure.
16492 *
16493 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
16494 * HBA consistent with the SLI-4 interface spec. This routine
16495 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16496 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 16497 *
88a2cfbb
JS
16498 * Returns
16499 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16500 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16501 **/
5d8b8167 16502static uint16_t
6d368e53
JS
16503lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16504{
16505 unsigned long xri;
16506
16507 /*
16508 * Fetch the next logical xri. Because this index is logical,
16509 * the driver starts at 0 each time.
16510 */
16511 spin_lock_irq(&phba->hbalock);
16512 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16513 phba->sli4_hba.max_cfg_param.max_xri, 0);
16514 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16515 spin_unlock_irq(&phba->hbalock);
16516 return NO_XRI;
16517 } else {
16518 set_bit(xri, phba->sli4_hba.xri_bmask);
16519 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 16520 }
6d368e53
JS
16521 spin_unlock_irq(&phba->hbalock);
16522 return xri;
16523}
16524
16525/**
16526 * lpfc_sli4_free_xri - Release an xri for reuse.
16527 * @phba: pointer to lpfc hba data structure.
16528 *
16529 * This routine is invoked to release an xri to the pool of
16530 * available rpis maintained by the driver.
16531 **/
5d8b8167 16532static void
6d368e53
JS
16533__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16534{
16535 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
16536 phba->sli4_hba.max_cfg_param.xri_used--;
16537 }
16538}
16539
16540/**
16541 * lpfc_sli4_free_xri - Release an xri for reuse.
16542 * @phba: pointer to lpfc hba data structure.
16543 *
16544 * This routine is invoked to release an xri to the pool of
16545 * available rpis maintained by the driver.
16546 **/
16547void
16548lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16549{
16550 spin_lock_irq(&phba->hbalock);
16551 __lpfc_sli4_free_xri(phba, xri);
16552 spin_unlock_irq(&phba->hbalock);
16553}
16554
4f774513
JS
16555/**
16556 * lpfc_sli4_next_xritag - Get an xritag for the io
16557 * @phba: Pointer to HBA context object.
16558 *
16559 * This function gets an xritag for the iocb. If there is no unused xritag
16560 * it will return 0xffff.
16561 * The function returns the allocated xritag if successful, else returns zero.
16562 * Zero is not a valid xritag.
16563 * The caller is not required to hold any lock.
16564 **/
16565uint16_t
16566lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16567{
6d368e53 16568 uint16_t xri_index;
4f774513 16569
6d368e53 16570 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
16571 if (xri_index == NO_XRI)
16572 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16573 "2004 Failed to allocate XRI.last XRITAG is %d"
16574 " Max XRI is %d, Used XRI is %d\n",
16575 xri_index,
16576 phba->sli4_hba.max_cfg_param.max_xri,
16577 phba->sli4_hba.max_cfg_param.xri_used);
16578 return xri_index;
4f774513
JS
16579}
16580
16581/**
895427bd 16582 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 16583 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
16584 * @post_sgl_list: pointer to els sgl entry list.
16585 * @count: number of els sgl entries on the list.
4f774513
JS
16586 *
16587 * This routine is invoked to post a block of driver's sgl pages to the
16588 * HBA using non-embedded mailbox command. No Lock is held. This routine
16589 * is only called when the driver is loading and after all IO has been
16590 * stopped.
16591 **/
8a9d2e80 16592static int
895427bd 16593lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
16594 struct list_head *post_sgl_list,
16595 int post_cnt)
4f774513 16596{
8a9d2e80 16597 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
16598 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16599 struct sgl_page_pairs *sgl_pg_pairs;
16600 void *viraddr;
16601 LPFC_MBOXQ_t *mbox;
16602 uint32_t reqlen, alloclen, pg_pairs;
16603 uint32_t mbox_tmo;
8a9d2e80
JS
16604 uint16_t xritag_start = 0;
16605 int rc = 0;
4f774513
JS
16606 uint32_t shdr_status, shdr_add_status;
16607 union lpfc_sli4_cfg_shdr *shdr;
16608
895427bd 16609 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 16610 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16611 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 16612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
16613 "2559 Block sgl registration required DMA "
16614 "size (%d) great than a page\n", reqlen);
16615 return -ENOMEM;
16616 }
895427bd 16617
4f774513 16618 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 16619 if (!mbox)
4f774513 16620 return -ENOMEM;
4f774513
JS
16621
16622 /* Allocate DMA memory and set up the non-embedded mailbox command */
16623 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16624 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16625 LPFC_SLI4_MBX_NEMBED);
16626
16627 if (alloclen < reqlen) {
16628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16629 "0285 Allocated DMA memory size (%d) is "
16630 "less than the requested DMA memory "
16631 "size (%d)\n", alloclen, reqlen);
16632 lpfc_sli4_mbox_cmd_free(phba, mbox);
16633 return -ENOMEM;
16634 }
4f774513 16635 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 16636 viraddr = mbox->sge_array->addr[0];
4f774513
JS
16637 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16638 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16639
8a9d2e80
JS
16640 pg_pairs = 0;
16641 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
16642 /* Set up the sge entry */
16643 sgl_pg_pairs->sgl_pg0_addr_lo =
16644 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16645 sgl_pg_pairs->sgl_pg0_addr_hi =
16646 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16647 sgl_pg_pairs->sgl_pg1_addr_lo =
16648 cpu_to_le32(putPaddrLow(0));
16649 sgl_pg_pairs->sgl_pg1_addr_hi =
16650 cpu_to_le32(putPaddrHigh(0));
6d368e53 16651
4f774513
JS
16652 /* Keep the first xritag on the list */
16653 if (pg_pairs == 0)
16654 xritag_start = sglq_entry->sli4_xritag;
16655 sgl_pg_pairs++;
8a9d2e80 16656 pg_pairs++;
4f774513 16657 }
6d368e53
JS
16658
16659 /* Complete initialization and perform endian conversion. */
4f774513 16660 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 16661 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 16662 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 16663
4f774513
JS
16664 if (!phba->sli4_hba.intr_enable)
16665 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16666 else {
a183a15f 16667 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16668 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16669 }
16670 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16671 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16672 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16673 if (rc != MBX_TIMEOUT)
16674 lpfc_sli4_mbox_cmd_free(phba, mbox);
16675 if (shdr_status || shdr_add_status || rc) {
16676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16677 "2513 POST_SGL_BLOCK mailbox command failed "
16678 "status x%x add_status x%x mbx status x%x\n",
16679 shdr_status, shdr_add_status, rc);
16680 rc = -ENXIO;
16681 }
16682 return rc;
16683}
16684
16685/**
5e5b511d 16686 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
4f774513 16687 * @phba: pointer to lpfc hba data structure.
0794d601 16688 * @nblist: pointer to nvme buffer list.
4f774513
JS
16689 * @count: number of scsi buffers on the list.
16690 *
16691 * This routine is invoked to post a block of @count scsi sgl pages from a
0794d601 16692 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
4f774513
JS
16693 * No Lock is held.
16694 *
16695 **/
0794d601 16696static int
5e5b511d
JS
16697lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16698 int count)
4f774513 16699{
c490850a 16700 struct lpfc_io_buf *lpfc_ncmd;
4f774513
JS
16701 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16702 struct sgl_page_pairs *sgl_pg_pairs;
16703 void *viraddr;
16704 LPFC_MBOXQ_t *mbox;
16705 uint32_t reqlen, alloclen, pg_pairs;
16706 uint32_t mbox_tmo;
16707 uint16_t xritag_start = 0;
16708 int rc = 0;
16709 uint32_t shdr_status, shdr_add_status;
16710 dma_addr_t pdma_phys_bpl1;
16711 union lpfc_sli4_cfg_shdr *shdr;
16712
16713 /* Calculate the requested length of the dma memory */
8a9d2e80 16714 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 16715 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16716 if (reqlen > SLI4_PAGE_SIZE) {
4f774513 16717 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0794d601 16718 "6118 Block sgl registration required DMA "
4f774513
JS
16719 "size (%d) great than a page\n", reqlen);
16720 return -ENOMEM;
16721 }
16722 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16723 if (!mbox) {
16724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16725 "6119 Failed to allocate mbox cmd memory\n");
4f774513
JS
16726 return -ENOMEM;
16727 }
16728
16729 /* Allocate DMA memory and set up the non-embedded mailbox command */
16730 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
0794d601
JS
16731 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16732 reqlen, LPFC_SLI4_MBX_NEMBED);
4f774513
JS
16733
16734 if (alloclen < reqlen) {
16735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16736 "6120 Allocated DMA memory size (%d) is "
4f774513
JS
16737 "less than the requested DMA memory "
16738 "size (%d)\n", alloclen, reqlen);
16739 lpfc_sli4_mbox_cmd_free(phba, mbox);
16740 return -ENOMEM;
16741 }
6d368e53 16742
4f774513 16743 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
16744 viraddr = mbox->sge_array->addr[0];
16745
16746 /* Set up the SGL pages in the non-embedded DMA pages */
16747 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16748 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16749
16750 pg_pairs = 0;
0794d601 16751 list_for_each_entry(lpfc_ncmd, nblist, list) {
4f774513
JS
16752 /* Set up the sge entry */
16753 sgl_pg_pairs->sgl_pg0_addr_lo =
0794d601 16754 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
4f774513 16755 sgl_pg_pairs->sgl_pg0_addr_hi =
0794d601 16756 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
4f774513 16757 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
0794d601
JS
16758 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16759 SGL_PAGE_SIZE;
4f774513
JS
16760 else
16761 pdma_phys_bpl1 = 0;
16762 sgl_pg_pairs->sgl_pg1_addr_lo =
16763 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16764 sgl_pg_pairs->sgl_pg1_addr_hi =
16765 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16766 /* Keep the first xritag on the list */
16767 if (pg_pairs == 0)
0794d601 16768 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
4f774513
JS
16769 sgl_pg_pairs++;
16770 pg_pairs++;
16771 }
16772 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16773 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16774 /* Perform endian conversion if necessary */
16775 sgl->word0 = cpu_to_le32(sgl->word0);
16776
0794d601 16777 if (!phba->sli4_hba.intr_enable) {
4f774513 16778 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
0794d601 16779 } else {
a183a15f 16780 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16781 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16782 }
0794d601 16783 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
4f774513
JS
16784 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16785 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16786 if (rc != MBX_TIMEOUT)
16787 lpfc_sli4_mbox_cmd_free(phba, mbox);
16788 if (shdr_status || shdr_add_status || rc) {
16789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
0794d601 16790 "6125 POST_SGL_BLOCK mailbox command failed "
4f774513
JS
16791 "status x%x add_status x%x mbx status x%x\n",
16792 shdr_status, shdr_add_status, rc);
16793 rc = -ENXIO;
16794 }
16795 return rc;
16796}
16797
0794d601 16798/**
5e5b511d 16799 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
0794d601
JS
16800 * @phba: pointer to lpfc hba data structure.
16801 * @post_nblist: pointer to the nvme buffer list.
16802 *
16803 * This routine walks a list of nvme buffers that was passed in. It attempts
16804 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16805 * uses the non-embedded SGL block post mailbox commands to post to the port.
16806 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16807 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16808 * must be local list, thus no lock is needed when manipulate the list.
16809 *
16810 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16811 **/
16812int
5e5b511d
JS
16813lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16814 struct list_head *post_nblist, int sb_count)
0794d601 16815{
c490850a 16816 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
0794d601
JS
16817 int status, sgl_size;
16818 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16819 dma_addr_t pdma_phys_sgl1;
16820 int last_xritag = NO_XRI;
16821 int cur_xritag;
0794d601
JS
16822 LIST_HEAD(prep_nblist);
16823 LIST_HEAD(blck_nblist);
16824 LIST_HEAD(nvme_nblist);
16825
16826 /* sanity check */
16827 if (sb_count <= 0)
16828 return -EINVAL;
16829
16830 sgl_size = phba->cfg_sg_dma_buf_size;
16831 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16832 list_del_init(&lpfc_ncmd->list);
16833 block_cnt++;
16834 if ((last_xritag != NO_XRI) &&
16835 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16836 /* a hole in xri block, form a sgl posting block */
16837 list_splice_init(&prep_nblist, &blck_nblist);
16838 post_cnt = block_cnt - 1;
16839 /* prepare list for next posting block */
16840 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16841 block_cnt = 1;
16842 } else {
16843 /* prepare list for next posting block */
16844 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16845 /* enough sgls for non-embed sgl mbox command */
16846 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16847 list_splice_init(&prep_nblist, &blck_nblist);
16848 post_cnt = block_cnt;
16849 block_cnt = 0;
16850 }
16851 }
16852 num_posting++;
16853 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16854
16855 /* end of repost sgl list condition for NVME buffers */
16856 if (num_posting == sb_count) {
16857 if (post_cnt == 0) {
16858 /* last sgl posting block */
16859 list_splice_init(&prep_nblist, &blck_nblist);
16860 post_cnt = block_cnt;
16861 } else if (block_cnt == 1) {
16862 /* last single sgl with non-contiguous xri */
16863 if (sgl_size > SGL_PAGE_SIZE)
16864 pdma_phys_sgl1 =
16865 lpfc_ncmd->dma_phys_sgl +
16866 SGL_PAGE_SIZE;
16867 else
16868 pdma_phys_sgl1 = 0;
16869 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16870 status = lpfc_sli4_post_sgl(
16871 phba, lpfc_ncmd->dma_phys_sgl,
16872 pdma_phys_sgl1, cur_xritag);
16873 if (status) {
c490850a
JS
16874 /* Post error. Buffer unavailable. */
16875 lpfc_ncmd->flags |=
16876 LPFC_SBUF_NOT_POSTED;
0794d601 16877 } else {
c490850a
JS
16878 /* Post success. Bffer available. */
16879 lpfc_ncmd->flags &=
16880 ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
16881 lpfc_ncmd->status = IOSTAT_SUCCESS;
16882 num_posted++;
16883 }
16884 /* success, put on NVME buffer sgl list */
16885 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16886 }
16887 }
16888
16889 /* continue until a nembed page worth of sgls */
16890 if (post_cnt == 0)
16891 continue;
16892
16893 /* post block of NVME buffer list sgls */
5e5b511d
JS
16894 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16895 post_cnt);
0794d601
JS
16896
16897 /* don't reset xirtag due to hole in xri block */
16898 if (block_cnt == 0)
16899 last_xritag = NO_XRI;
4f774513 16900
0794d601
JS
16901 /* reset NVME buffer post count for next round of posting */
16902 post_cnt = 0;
4f774513 16903
0794d601
JS
16904 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16905 while (!list_empty(&blck_nblist)) {
16906 list_remove_head(&blck_nblist, lpfc_ncmd,
c490850a 16907 struct lpfc_io_buf, list);
0794d601 16908 if (status) {
c490850a
JS
16909 /* Post error. Mark buffer unavailable. */
16910 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
0794d601 16911 } else {
c490850a
JS
16912 /* Post success, Mark buffer available. */
16913 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
0794d601
JS
16914 lpfc_ncmd->status = IOSTAT_SUCCESS;
16915 num_posted++;
16916 }
16917 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16918 }
4f774513 16919 }
0794d601 16920 /* Push NVME buffers with sgl posted to the available list */
5e5b511d
JS
16921 lpfc_io_buf_replenish(phba, &nvme_nblist);
16922
0794d601 16923 return num_posted;
4f774513
JS
16924}
16925
16926/**
16927 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16928 * @phba: pointer to lpfc_hba struct that the frame was received on
16929 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16930 *
16931 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16932 * valid type of frame that the LPFC driver will handle. This function will
16933 * return a zero if the frame is a valid frame or a non zero value when the
16934 * frame does not pass the check.
16935 **/
16936static int
16937lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16938{
474ffb74 16939 /* make rctl_names static to save stack space */
4f774513 16940 struct fc_vft_header *fc_vft_hdr;
546fc854 16941 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
16942
16943 switch (fc_hdr->fh_r_ctl) {
16944 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16945 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16946 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16947 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16948 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16949 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16950 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16951 case FC_RCTL_DD_CMD_STATUS: /* command status */
16952 case FC_RCTL_ELS_REQ: /* extended link services request */
16953 case FC_RCTL_ELS_REP: /* extended link services reply */
16954 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16955 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16956 case FC_RCTL_BA_NOP: /* basic link service NOP */
16957 case FC_RCTL_BA_ABTS: /* basic link service abort */
16958 case FC_RCTL_BA_RMC: /* remove connection */
16959 case FC_RCTL_BA_ACC: /* basic accept */
16960 case FC_RCTL_BA_RJT: /* basic reject */
16961 case FC_RCTL_BA_PRMT:
16962 case FC_RCTL_ACK_1: /* acknowledge_1 */
16963 case FC_RCTL_ACK_0: /* acknowledge_0 */
16964 case FC_RCTL_P_RJT: /* port reject */
16965 case FC_RCTL_F_RJT: /* fabric reject */
16966 case FC_RCTL_P_BSY: /* port busy */
16967 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16968 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16969 case FC_RCTL_LCR: /* link credit reset */
ae9e28f3 16970 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
4f774513
JS
16971 case FC_RCTL_END: /* end */
16972 break;
16973 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16974 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16975 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16976 return lpfc_fc_frame_check(phba, fc_hdr);
16977 default:
16978 goto drop;
16979 }
ae9e28f3 16980
4f774513
JS
16981 switch (fc_hdr->fh_type) {
16982 case FC_TYPE_BLS:
16983 case FC_TYPE_ELS:
16984 case FC_TYPE_FCP:
16985 case FC_TYPE_CT:
895427bd 16986 case FC_TYPE_NVME:
4f774513
JS
16987 break;
16988 case FC_TYPE_IP:
16989 case FC_TYPE_ILS:
16990 default:
16991 goto drop;
16992 }
546fc854 16993
4f774513 16994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
78e1d200 16995 "2538 Received frame rctl:x%x, type:x%x, "
88f43a08 16996 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
78e1d200
JS
16997 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16998 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16999 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17000 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17001 be32_to_cpu(header[6]));
4f774513
JS
17002 return 0;
17003drop:
17004 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
78e1d200
JS
17005 "2539 Dropped frame rctl:x%x type:x%x\n",
17006 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
4f774513
JS
17007 return 1;
17008}
17009
17010/**
17011 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17012 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17013 *
17014 * This function processes the FC header to retrieve the VFI from the VF
17015 * header, if one exists. This function will return the VFI if one exists
17016 * or 0 if no VSAN Header exists.
17017 **/
17018static uint32_t
17019lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17020{
17021 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17022
17023 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17024 return 0;
17025 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17026}
17027
17028/**
17029 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17030 * @phba: Pointer to the HBA structure to search for the vport on
17031 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17032 * @fcfi: The FC Fabric ID that the frame came from
17033 *
17034 * This function searches the @phba for a vport that matches the content of the
17035 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17036 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17037 * returns the matching vport pointer or NULL if unable to match frame to a
17038 * vport.
17039 **/
17040static struct lpfc_vport *
17041lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 17042 uint16_t fcfi, uint32_t did)
4f774513
JS
17043{
17044 struct lpfc_vport **vports;
17045 struct lpfc_vport *vport = NULL;
17046 int i;
939723a4 17047
bf08611b
JS
17048 if (did == Fabric_DID)
17049 return phba->pport;
939723a4
JS
17050 if ((phba->pport->fc_flag & FC_PT2PT) &&
17051 !(phba->link_state == LPFC_HBA_READY))
17052 return phba->pport;
17053
4f774513 17054 vports = lpfc_create_vport_work_array(phba);
895427bd 17055 if (vports != NULL) {
4f774513
JS
17056 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17057 if (phba->fcf.fcfi == fcfi &&
17058 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17059 vports[i]->fc_myDID == did) {
17060 vport = vports[i];
17061 break;
17062 }
17063 }
895427bd 17064 }
4f774513
JS
17065 lpfc_destroy_vport_work_array(phba, vports);
17066 return vport;
17067}
17068
45ed1190
JS
17069/**
17070 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17071 * @vport: The vport to work on.
17072 *
17073 * This function updates the receive sequence time stamp for this vport. The
17074 * receive sequence time stamp indicates the time that the last frame of the
17075 * the sequence that has been idle for the longest amount of time was received.
17076 * the driver uses this time stamp to indicate if any received sequences have
17077 * timed out.
17078 **/
5d8b8167 17079static void
45ed1190
JS
17080lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17081{
17082 struct lpfc_dmabuf *h_buf;
17083 struct hbq_dmabuf *dmabuf = NULL;
17084
17085 /* get the oldest sequence on the rcv list */
17086 h_buf = list_get_first(&vport->rcv_buffer_list,
17087 struct lpfc_dmabuf, list);
17088 if (!h_buf)
17089 return;
17090 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17091 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17092}
17093
17094/**
17095 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17096 * @vport: The vport that the received sequences were sent to.
17097 *
17098 * This function cleans up all outstanding received sequences. This is called
17099 * by the driver when a link event or user action invalidates all the received
17100 * sequences.
17101 **/
17102void
17103lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17104{
17105 struct lpfc_dmabuf *h_buf, *hnext;
17106 struct lpfc_dmabuf *d_buf, *dnext;
17107 struct hbq_dmabuf *dmabuf = NULL;
17108
17109 /* start with the oldest sequence on the rcv list */
17110 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17111 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17112 list_del_init(&dmabuf->hbuf.list);
17113 list_for_each_entry_safe(d_buf, dnext,
17114 &dmabuf->dbuf.list, list) {
17115 list_del_init(&d_buf->list);
17116 lpfc_in_buf_free(vport->phba, d_buf);
17117 }
17118 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17119 }
17120}
17121
17122/**
17123 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17124 * @vport: The vport that the received sequences were sent to.
17125 *
17126 * This function determines whether any received sequences have timed out by
17127 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17128 * indicates that there is at least one timed out sequence this routine will
17129 * go through the received sequences one at a time from most inactive to most
17130 * active to determine which ones need to be cleaned up. Once it has determined
17131 * that a sequence needs to be cleaned up it will simply free up the resources
17132 * without sending an abort.
17133 **/
17134void
17135lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17136{
17137 struct lpfc_dmabuf *h_buf, *hnext;
17138 struct lpfc_dmabuf *d_buf, *dnext;
17139 struct hbq_dmabuf *dmabuf = NULL;
17140 unsigned long timeout;
17141 int abort_count = 0;
17142
17143 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17144 vport->rcv_buffer_time_stamp);
17145 if (list_empty(&vport->rcv_buffer_list) ||
17146 time_before(jiffies, timeout))
17147 return;
17148 /* start with the oldest sequence on the rcv list */
17149 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17150 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17151 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17152 dmabuf->time_stamp);
17153 if (time_before(jiffies, timeout))
17154 break;
17155 abort_count++;
17156 list_del_init(&dmabuf->hbuf.list);
17157 list_for_each_entry_safe(d_buf, dnext,
17158 &dmabuf->dbuf.list, list) {
17159 list_del_init(&d_buf->list);
17160 lpfc_in_buf_free(vport->phba, d_buf);
17161 }
17162 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17163 }
17164 if (abort_count)
17165 lpfc_update_rcv_time_stamp(vport);
17166}
17167
4f774513
JS
17168/**
17169 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17170 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17171 *
17172 * This function searches through the existing incomplete sequences that have
17173 * been sent to this @vport. If the frame matches one of the incomplete
17174 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17175 * make up that sequence. If no sequence is found that matches this frame then
17176 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17177 * This function returns a pointer to the first dmabuf in the sequence list that
17178 * the frame was linked to.
17179 **/
17180static struct hbq_dmabuf *
17181lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17182{
17183 struct fc_frame_header *new_hdr;
17184 struct fc_frame_header *temp_hdr;
17185 struct lpfc_dmabuf *d_buf;
17186 struct lpfc_dmabuf *h_buf;
17187 struct hbq_dmabuf *seq_dmabuf = NULL;
17188 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 17189 uint8_t found = 0;
4f774513 17190
4d9ab994 17191 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 17192 dmabuf->time_stamp = jiffies;
4f774513 17193 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 17194
4f774513
JS
17195 /* Use the hdr_buf to find the sequence that this frame belongs to */
17196 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17197 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17198 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17199 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17200 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17201 continue;
17202 /* found a pending sequence that matches this frame */
17203 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17204 break;
17205 }
17206 if (!seq_dmabuf) {
17207 /*
17208 * This indicates first frame received for this sequence.
17209 * Queue the buffer on the vport's rcv_buffer_list.
17210 */
17211 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 17212 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17213 return dmabuf;
17214 }
17215 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
17216 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17217 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
17218 list_del_init(&seq_dmabuf->hbuf.list);
17219 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17220 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 17221 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17222 return dmabuf;
17223 }
45ed1190
JS
17224 /* move this sequence to the tail to indicate a young sequence */
17225 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17226 seq_dmabuf->time_stamp = jiffies;
17227 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
17228 if (list_empty(&seq_dmabuf->dbuf.list)) {
17229 temp_hdr = dmabuf->hbuf.virt;
17230 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17231 return seq_dmabuf;
17232 }
4f774513 17233 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
17234 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17235 while (!found) {
4f774513
JS
17236 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17237 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17238 /*
17239 * If the frame's sequence count is greater than the frame on
17240 * the list then insert the frame right after this frame
17241 */
eeead811
JS
17242 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17243 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 17244 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
17245 found = 1;
17246 break;
4f774513 17247 }
4360ca9c
JS
17248
17249 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17250 break;
17251 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 17252 }
4360ca9c
JS
17253
17254 if (found)
17255 return seq_dmabuf;
4f774513
JS
17256 return NULL;
17257}
17258
6669f9bb
JS
17259/**
17260 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17261 * @vport: pointer to a vitural port
17262 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17263 *
17264 * This function tries to abort from the partially assembed sequence, described
17265 * by the information from basic abbort @dmabuf. It checks to see whether such
17266 * partially assembled sequence held by the driver. If so, it shall free up all
17267 * the frames from the partially assembled sequence.
17268 *
17269 * Return
17270 * true -- if there is matching partially assembled sequence present and all
17271 * the frames freed with the sequence;
17272 * false -- if there is no matching partially assembled sequence present so
17273 * nothing got aborted in the lower layer driver
17274 **/
17275static bool
17276lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17277 struct hbq_dmabuf *dmabuf)
17278{
17279 struct fc_frame_header *new_hdr;
17280 struct fc_frame_header *temp_hdr;
17281 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17282 struct hbq_dmabuf *seq_dmabuf = NULL;
17283
17284 /* Use the hdr_buf to find the sequence that matches this frame */
17285 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17286 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17287 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17288 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17289 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17290 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17291 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17292 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17293 continue;
17294 /* found a pending sequence that matches this frame */
17295 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17296 break;
17297 }
17298
17299 /* Free up all the frames from the partially assembled sequence */
17300 if (seq_dmabuf) {
17301 list_for_each_entry_safe(d_buf, n_buf,
17302 &seq_dmabuf->dbuf.list, list) {
17303 list_del_init(&d_buf->list);
17304 lpfc_in_buf_free(vport->phba, d_buf);
17305 }
17306 return true;
17307 }
17308 return false;
17309}
17310
6dd9e31c
JS
17311/**
17312 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17313 * @vport: pointer to a vitural port
17314 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17315 *
17316 * This function tries to abort from the assembed sequence from upper level
17317 * protocol, described by the information from basic abbort @dmabuf. It
17318 * checks to see whether such pending context exists at upper level protocol.
17319 * If so, it shall clean up the pending context.
17320 *
17321 * Return
17322 * true -- if there is matching pending context of the sequence cleaned
17323 * at ulp;
17324 * false -- if there is no matching pending context of the sequence present
17325 * at ulp.
17326 **/
17327static bool
17328lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17329{
17330 struct lpfc_hba *phba = vport->phba;
17331 int handled;
17332
17333 /* Accepting abort at ulp with SLI4 only */
17334 if (phba->sli_rev < LPFC_SLI_REV4)
17335 return false;
17336
17337 /* Register all caring upper level protocols to attend abort */
17338 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17339 if (handled)
17340 return true;
17341
17342 return false;
17343}
17344
6669f9bb 17345/**
546fc854 17346 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
17347 * @phba: Pointer to HBA context object.
17348 * @cmd_iocbq: pointer to the command iocbq structure.
17349 * @rsp_iocbq: pointer to the response iocbq structure.
17350 *
546fc854 17351 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
17352 * event. It properly releases the memory allocated to the sequence abort
17353 * accept iocb.
17354 **/
17355static void
546fc854 17356lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
17357 struct lpfc_iocbq *cmd_iocbq,
17358 struct lpfc_iocbq *rsp_iocbq)
17359{
6dd9e31c
JS
17360 struct lpfc_nodelist *ndlp;
17361
17362 if (cmd_iocbq) {
17363 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17364 lpfc_nlp_put(ndlp);
17365 lpfc_nlp_not_used(ndlp);
6669f9bb 17366 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 17367 }
6b5151fd
JS
17368
17369 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17370 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17371 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17372 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17373 rsp_iocbq->iocb.ulpStatus,
17374 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
17375}
17376
6d368e53
JS
17377/**
17378 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17379 * @phba: Pointer to HBA context object.
17380 * @xri: xri id in transaction.
17381 *
17382 * This function validates the xri maps to the known range of XRIs allocated an
17383 * used by the driver.
17384 **/
7851fe2c 17385uint16_t
6d368e53
JS
17386lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17387 uint16_t xri)
17388{
a2fc4aef 17389 uint16_t i;
6d368e53
JS
17390
17391 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17392 if (xri == phba->sli4_hba.xri_ids[i])
17393 return i;
17394 }
17395 return NO_XRI;
17396}
17397
6669f9bb 17398/**
546fc854 17399 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
17400 * @phba: Pointer to HBA context object.
17401 * @fc_hdr: pointer to a FC frame header.
17402 *
546fc854 17403 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
17404 * event after aborting the sequence handling.
17405 **/
86c67379 17406void
6dd9e31c
JS
17407lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17408 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 17409{
6dd9e31c 17410 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
17411 struct lpfc_iocbq *ctiocb = NULL;
17412 struct lpfc_nodelist *ndlp;
ee0f4fe1 17413 uint16_t oxid, rxid, xri, lxri;
5ffc266e 17414 uint32_t sid, fctl;
6669f9bb 17415 IOCB_t *icmd;
546fc854 17416 int rc;
6669f9bb
JS
17417
17418 if (!lpfc_is_link_up(phba))
17419 return;
17420
17421 sid = sli4_sid_from_fc_hdr(fc_hdr);
17422 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 17423 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 17424
6dd9e31c 17425 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 17426 if (!ndlp) {
9d3d340d 17427 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
17428 if (!ndlp) {
17429 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17430 "1268 Failed to allocate ndlp for "
17431 "oxid:x%x SID:x%x\n", oxid, sid);
17432 return;
17433 }
6dd9e31c
JS
17434 /* Put ndlp onto pport node list */
17435 lpfc_enqueue_node(vport, ndlp);
17436 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17437 /* re-setup ndlp without removing from node list */
17438 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17439 if (!ndlp) {
17440 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17441 "3275 Failed to active ndlp found "
17442 "for oxid:x%x SID:x%x\n", oxid, sid);
17443 return;
17444 }
6669f9bb
JS
17445 }
17446
546fc854 17447 /* Allocate buffer for rsp iocb */
6669f9bb
JS
17448 ctiocb = lpfc_sli_get_iocbq(phba);
17449 if (!ctiocb)
17450 return;
17451
5ffc266e
JS
17452 /* Extract the F_CTL field from FC_HDR */
17453 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17454
6669f9bb 17455 icmd = &ctiocb->iocb;
6669f9bb 17456 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 17457 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
17458 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17459 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17460 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17461
17462 /* Fill in the rest of iocb fields */
17463 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17464 icmd->ulpBdeCount = 0;
17465 icmd->ulpLe = 1;
17466 icmd->ulpClass = CLASS3;
6d368e53 17467 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 17468 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 17469
6669f9bb 17470 ctiocb->vport = phba->pport;
546fc854 17471 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 17472 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
17473 ctiocb->sli4_xritag = NO_XRI;
17474
ee0f4fe1
JS
17475 if (fctl & FC_FC_EX_CTX)
17476 /* Exchange responder sent the abort so we
17477 * own the oxid.
17478 */
17479 xri = oxid;
17480 else
17481 xri = rxid;
17482 lxri = lpfc_sli4_xri_inrange(phba, xri);
17483 if (lxri != NO_XRI)
17484 lpfc_set_rrq_active(phba, ndlp, lxri,
17485 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
17486 /* For BA_ABTS from exchange responder, if the logical xri with
17487 * the oxid maps to the FCP XRI range, the port no longer has
17488 * that exchange context, send a BLS_RJT. Override the IOCB for
17489 * a BA_RJT.
17490 */
17491 if ((fctl & FC_FC_EX_CTX) &&
895427bd 17492 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
17493 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17494 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17495 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17496 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17497 }
17498
17499 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17500 * the driver no longer has that exchange, send a BLS_RJT. Override
17501 * the IOCB for a BA_RJT.
546fc854 17502 */
6dd9e31c 17503 if (aborted == false) {
546fc854
JS
17504 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17505 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17506 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17507 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17508 }
6669f9bb 17509
5ffc266e
JS
17510 if (fctl & FC_FC_EX_CTX) {
17511 /* ABTS sent by responder to CT exchange, construction
17512 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17513 * field and RX_ID from ABTS for RX_ID field.
17514 */
546fc854 17515 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
17516 } else {
17517 /* ABTS sent by initiator to CT exchange, construction
17518 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 17519 * XRI_TAG field.
5ffc266e 17520 */
546fc854 17521 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 17522 }
f09c3acc 17523 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 17524 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 17525
546fc854 17526 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
17527 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17528 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17529 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
17530
17531 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17532 if (rc == IOCB_ERROR) {
6dd9e31c
JS
17533 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17534 "2925 Failed to issue CT ABTS RSP x%x on "
17535 "xri x%x, Data x%x\n",
17536 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17537 phba->link_state);
17538 lpfc_nlp_put(ndlp);
17539 ctiocb->context1 = NULL;
546fc854
JS
17540 lpfc_sli_release_iocbq(phba, ctiocb);
17541 }
6669f9bb
JS
17542}
17543
17544/**
17545 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17546 * @vport: Pointer to the vport on which this sequence was received
17547 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17548 *
17549 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17550 * receive sequence is only partially assembed by the driver, it shall abort
17551 * the partially assembled frames for the sequence. Otherwise, if the
17552 * unsolicited receive sequence has been completely assembled and passed to
17553 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17554 * unsolicited sequence has been aborted. After that, it will issue a basic
17555 * accept to accept the abort.
17556 **/
5d8b8167 17557static void
6669f9bb
JS
17558lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17559 struct hbq_dmabuf *dmabuf)
17560{
17561 struct lpfc_hba *phba = vport->phba;
17562 struct fc_frame_header fc_hdr;
5ffc266e 17563 uint32_t fctl;
6dd9e31c 17564 bool aborted;
6669f9bb 17565
6669f9bb
JS
17566 /* Make a copy of fc_hdr before the dmabuf being released */
17567 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 17568 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 17569
5ffc266e 17570 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
17571 /* ABTS by responder to exchange, no cleanup needed */
17572 aborted = true;
5ffc266e 17573 } else {
6dd9e31c
JS
17574 /* ABTS by initiator to exchange, need to do cleanup */
17575 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17576 if (aborted == false)
17577 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 17578 }
6dd9e31c
JS
17579 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17580
86c67379
JS
17581 if (phba->nvmet_support) {
17582 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17583 return;
17584 }
17585
6dd9e31c
JS
17586 /* Respond with BA_ACC or BA_RJT accordingly */
17587 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
17588}
17589
4f774513
JS
17590/**
17591 * lpfc_seq_complete - Indicates if a sequence is complete
17592 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17593 *
17594 * This function checks the sequence, starting with the frame described by
17595 * @dmabuf, to see if all the frames associated with this sequence are present.
17596 * the frames associated with this sequence are linked to the @dmabuf using the
17597 * dbuf list. This function looks for two major things. 1) That the first frame
17598 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17599 * set. 3) That there are no holes in the sequence count. The function will
17600 * return 1 when the sequence is complete, otherwise it will return 0.
17601 **/
17602static int
17603lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17604{
17605 struct fc_frame_header *hdr;
17606 struct lpfc_dmabuf *d_buf;
17607 struct hbq_dmabuf *seq_dmabuf;
17608 uint32_t fctl;
17609 int seq_count = 0;
17610
17611 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17612 /* make sure first fame of sequence has a sequence count of zero */
17613 if (hdr->fh_seq_cnt != seq_count)
17614 return 0;
17615 fctl = (hdr->fh_f_ctl[0] << 16 |
17616 hdr->fh_f_ctl[1] << 8 |
17617 hdr->fh_f_ctl[2]);
17618 /* If last frame of sequence we can return success. */
17619 if (fctl & FC_FC_END_SEQ)
17620 return 1;
17621 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17622 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17623 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17624 /* If there is a hole in the sequence count then fail. */
eeead811 17625 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
17626 return 0;
17627 fctl = (hdr->fh_f_ctl[0] << 16 |
17628 hdr->fh_f_ctl[1] << 8 |
17629 hdr->fh_f_ctl[2]);
17630 /* If last frame of sequence we can return success. */
17631 if (fctl & FC_FC_END_SEQ)
17632 return 1;
17633 }
17634 return 0;
17635}
17636
17637/**
17638 * lpfc_prep_seq - Prep sequence for ULP processing
17639 * @vport: Pointer to the vport on which this sequence was received
17640 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17641 *
17642 * This function takes a sequence, described by a list of frames, and creates
17643 * a list of iocbq structures to describe the sequence. This iocbq list will be
17644 * used to issue to the generic unsolicited sequence handler. This routine
17645 * returns a pointer to the first iocbq in the list. If the function is unable
17646 * to allocate an iocbq then it throw out the received frames that were not
17647 * able to be described and return a pointer to the first iocbq. If unable to
17648 * allocate any iocbqs (including the first) this function will return NULL.
17649 **/
17650static struct lpfc_iocbq *
17651lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17652{
7851fe2c 17653 struct hbq_dmabuf *hbq_buf;
4f774513
JS
17654 struct lpfc_dmabuf *d_buf, *n_buf;
17655 struct lpfc_iocbq *first_iocbq, *iocbq;
17656 struct fc_frame_header *fc_hdr;
17657 uint32_t sid;
7851fe2c 17658 uint32_t len, tot_len;
eeead811 17659 struct ulp_bde64 *pbde;
4f774513
JS
17660
17661 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17662 /* remove from receive buffer list */
17663 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 17664 lpfc_update_rcv_time_stamp(vport);
4f774513 17665 /* get the Remote Port's SID */
6669f9bb 17666 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 17667 tot_len = 0;
4f774513
JS
17668 /* Get an iocbq struct to fill in. */
17669 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17670 if (first_iocbq) {
17671 /* Initialize the first IOCB. */
8fa38513 17672 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 17673 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 17674 first_iocbq->vport = vport;
939723a4
JS
17675
17676 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17677 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17678 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17679 first_iocbq->iocb.un.rcvels.parmRo =
17680 sli4_did_from_fc_hdr(fc_hdr);
17681 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17682 } else
17683 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
17684 first_iocbq->iocb.ulpContext = NO_XRI;
17685 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17686 be16_to_cpu(fc_hdr->fh_ox_id);
17687 /* iocbq is prepped for internal consumption. Physical vpi. */
17688 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17689 vport->phba->vpi_ids[vport->vpi];
4f774513 17690 /* put the first buffer into the first IOCBq */
48a5a664
JS
17691 tot_len = bf_get(lpfc_rcqe_length,
17692 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17693
4f774513
JS
17694 first_iocbq->context2 = &seq_dmabuf->dbuf;
17695 first_iocbq->context3 = NULL;
17696 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17697 if (tot_len > LPFC_DATA_BUF_SIZE)
17698 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17699 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17700 else
17701 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17702
4f774513 17703 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 17704
7851fe2c 17705 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
17706 }
17707 iocbq = first_iocbq;
17708 /*
17709 * Each IOCBq can have two Buffers assigned, so go through the list
17710 * of buffers for this sequence and save two buffers in each IOCBq
17711 */
17712 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17713 if (!iocbq) {
17714 lpfc_in_buf_free(vport->phba, d_buf);
17715 continue;
17716 }
17717 if (!iocbq->context3) {
17718 iocbq->context3 = d_buf;
17719 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
17720 /* We need to get the size out of the right CQE */
17721 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17722 len = bf_get(lpfc_rcqe_length,
17723 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
17724 pbde = (struct ulp_bde64 *)
17725 &iocbq->iocb.unsli3.sli3Words[4];
17726 if (len > LPFC_DATA_BUF_SIZE)
17727 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17728 else
17729 pbde->tus.f.bdeSize = len;
17730
7851fe2c
JS
17731 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17732 tot_len += len;
4f774513
JS
17733 } else {
17734 iocbq = lpfc_sli_get_iocbq(vport->phba);
17735 if (!iocbq) {
17736 if (first_iocbq) {
17737 first_iocbq->iocb.ulpStatus =
17738 IOSTAT_FCP_RSP_ERROR;
17739 first_iocbq->iocb.un.ulpWord[4] =
17740 IOERR_NO_RESOURCES;
17741 }
17742 lpfc_in_buf_free(vport->phba, d_buf);
17743 continue;
17744 }
48a5a664
JS
17745 /* We need to get the size out of the right CQE */
17746 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17747 len = bf_get(lpfc_rcqe_length,
17748 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
17749 iocbq->context2 = d_buf;
17750 iocbq->context3 = NULL;
17751 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17752 if (len > LPFC_DATA_BUF_SIZE)
17753 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17754 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17755 else
17756 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 17757
7851fe2c
JS
17758 tot_len += len;
17759 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17760
4f774513
JS
17761 iocbq->iocb.un.rcvels.remoteID = sid;
17762 list_add_tail(&iocbq->list, &first_iocbq->list);
17763 }
17764 }
17765 return first_iocbq;
17766}
17767
6669f9bb
JS
17768static void
17769lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17770 struct hbq_dmabuf *seq_dmabuf)
17771{
17772 struct fc_frame_header *fc_hdr;
17773 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17774 struct lpfc_hba *phba = vport->phba;
17775
17776 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17777 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17778 if (!iocbq) {
17779 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17780 "2707 Ring %d handler: Failed to allocate "
17781 "iocb Rctl x%x Type x%x received\n",
17782 LPFC_ELS_RING,
17783 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17784 return;
17785 }
17786 if (!lpfc_complete_unsol_iocb(phba,
895427bd 17787 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
17788 iocbq, fc_hdr->fh_r_ctl,
17789 fc_hdr->fh_type))
6d368e53 17790 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
17791 "2540 Ring %d handler: unexpected Rctl "
17792 "x%x Type x%x received\n",
17793 LPFC_ELS_RING,
17794 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17795
17796 /* Free iocb created in lpfc_prep_seq */
17797 list_for_each_entry_safe(curr_iocb, next_iocb,
17798 &iocbq->list, list) {
17799 list_del_init(&curr_iocb->list);
17800 lpfc_sli_release_iocbq(phba, curr_iocb);
17801 }
17802 lpfc_sli_release_iocbq(phba, iocbq);
17803}
17804
ae9e28f3
JS
17805static void
17806lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17807 struct lpfc_iocbq *rspiocb)
17808{
17809 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17810
17811 if (pcmd && pcmd->virt)
771db5c0 17812 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3
JS
17813 kfree(pcmd);
17814 lpfc_sli_release_iocbq(phba, cmdiocb);
e817e5d7 17815 lpfc_drain_txq(phba);
ae9e28f3
JS
17816}
17817
17818static void
17819lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17820 struct hbq_dmabuf *dmabuf)
17821{
17822 struct fc_frame_header *fc_hdr;
17823 struct lpfc_hba *phba = vport->phba;
17824 struct lpfc_iocbq *iocbq = NULL;
17825 union lpfc_wqe *wqe;
17826 struct lpfc_dmabuf *pcmd = NULL;
17827 uint32_t frame_len;
17828 int rc;
e817e5d7 17829 unsigned long iflags;
ae9e28f3
JS
17830
17831 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17832 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17833
17834 /* Send the received frame back */
17835 iocbq = lpfc_sli_get_iocbq(phba);
e817e5d7
JS
17836 if (!iocbq) {
17837 /* Queue cq event and wakeup worker thread to process it */
17838 spin_lock_irqsave(&phba->hbalock, iflags);
17839 list_add_tail(&dmabuf->cq_event.list,
17840 &phba->sli4_hba.sp_queue_event);
17841 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17842 spin_unlock_irqrestore(&phba->hbalock, iflags);
17843 lpfc_worker_wake_up(phba);
17844 return;
17845 }
ae9e28f3
JS
17846
17847 /* Allocate buffer for command payload */
17848 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17849 if (pcmd)
771db5c0 17850 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
ae9e28f3
JS
17851 &pcmd->phys);
17852 if (!pcmd || !pcmd->virt)
17853 goto exit;
17854
17855 INIT_LIST_HEAD(&pcmd->list);
17856
17857 /* copyin the payload */
17858 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17859
17860 /* fill in BDE's for command */
17861 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17862 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17863 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17864 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17865
17866 iocbq->context2 = pcmd;
17867 iocbq->vport = vport;
17868 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17869 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17870
17871 /*
17872 * Setup rest of the iocb as though it were a WQE
17873 * Build the SEND_FRAME WQE
17874 */
17875 wqe = (union lpfc_wqe *)&iocbq->iocb;
17876
17877 wqe->send_frame.frame_len = frame_len;
17878 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17879 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17880 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17881 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17882 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17883 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17884
17885 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17886 iocbq->iocb.ulpLe = 1;
17887 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17888 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17889 if (rc == IOCB_ERROR)
17890 goto exit;
17891
17892 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17893 return;
17894
17895exit:
17896 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17897 "2023 Unable to process MDS loopback frame\n");
17898 if (pcmd && pcmd->virt)
771db5c0 17899 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3 17900 kfree(pcmd);
401bb416
DK
17901 if (iocbq)
17902 lpfc_sli_release_iocbq(phba, iocbq);
ae9e28f3
JS
17903 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17904}
17905
4f774513
JS
17906/**
17907 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17908 * @phba: Pointer to HBA context object.
17909 *
17910 * This function is called with no lock held. This function processes all
17911 * the received buffers and gives it to upper layers when a received buffer
17912 * indicates that it is the final frame in the sequence. The interrupt
895427bd 17913 * service routine processes received buffers at interrupt contexts.
4f774513
JS
17914 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17915 * appropriate receive function when the final frame in a sequence is received.
17916 **/
4d9ab994
JS
17917void
17918lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17919 struct hbq_dmabuf *dmabuf)
4f774513 17920{
4d9ab994 17921 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
17922 struct fc_frame_header *fc_hdr;
17923 struct lpfc_vport *vport;
17924 uint32_t fcfi;
939723a4 17925 uint32_t did;
4f774513 17926
4f774513 17927 /* Process each received buffer */
4d9ab994 17928 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 17929
e817e5d7
JS
17930 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17931 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17932 vport = phba->pport;
17933 /* Handle MDS Loopback frames */
17934 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17935 return;
17936 }
17937
4d9ab994
JS
17938 /* check to see if this a valid type of frame */
17939 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17940 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17941 return;
17942 }
2ea259ee 17943
7851fe2c
JS
17944 if ((bf_get(lpfc_cqe_code,
17945 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17946 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17947 &dmabuf->cq_event.cqe.rcqe_cmpl);
17948 else
17949 fcfi = bf_get(lpfc_rcqe_fcf_id,
17950 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 17951
895427bd
JS
17952 /* d_id this frame is directed to */
17953 did = sli4_did_from_fc_hdr(fc_hdr);
17954
17955 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 17956 if (!vport) {
4d9ab994
JS
17957 /* throw out the frame */
17958 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17959 return;
17960 }
939723a4 17961
939723a4
JS
17962 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17963 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17964 (did != Fabric_DID)) {
17965 /*
17966 * Throw out the frame if we are not pt2pt.
17967 * The pt2pt protocol allows for discovery frames
17968 * to be received without a registered VPI.
17969 */
17970 if (!(vport->fc_flag & FC_PT2PT) ||
17971 (phba->link_state == LPFC_HBA_READY)) {
17972 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17973 return;
17974 }
17975 }
17976
6669f9bb
JS
17977 /* Handle the basic abort sequence (BA_ABTS) event */
17978 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17979 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17980 return;
17981 }
17982
4d9ab994
JS
17983 /* Link this frame */
17984 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17985 if (!seq_dmabuf) {
17986 /* unable to add frame to vport - throw it out */
17987 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17988 return;
17989 }
17990 /* If not last frame in sequence continue processing frames. */
def9c7a9 17991 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 17992 return;
def9c7a9 17993
6669f9bb
JS
17994 /* Send the complete sequence to the upper layer protocol */
17995 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 17996}
6fb120a7
JS
17997
17998/**
17999 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18000 * @phba: pointer to lpfc hba data structure.
18001 *
18002 * This routine is invoked to post rpi header templates to the
18003 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18004 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18005 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18006 *
18007 * This routine does not require any locks. It's usage is expected
18008 * to be driver load or reset recovery when the driver is
18009 * sequential.
18010 *
18011 * Return codes
af901ca1 18012 * 0 - successful
d439d286 18013 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18014 * When this error occurs, the driver is not guaranteed
18015 * to have any rpi regions posted to the device and
18016 * must either attempt to repost the regions or take a
18017 * fatal error.
18018 **/
18019int
18020lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18021{
18022 struct lpfc_rpi_hdr *rpi_page;
18023 uint32_t rc = 0;
6d368e53
JS
18024 uint16_t lrpi = 0;
18025
18026 /* SLI4 ports that support extents do not require RPI headers. */
18027 if (!phba->sli4_hba.rpi_hdrs_in_use)
18028 goto exit;
18029 if (phba->sli4_hba.extents_in_use)
18030 return -EIO;
6fb120a7 18031
6fb120a7 18032 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
18033 /*
18034 * Assign the rpi headers a physical rpi only if the driver
18035 * has not initialized those resources. A port reset only
18036 * needs the headers posted.
18037 */
18038 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18039 LPFC_RPI_RSRC_RDY)
18040 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18041
6fb120a7
JS
18042 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18043 if (rc != MBX_SUCCESS) {
18044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18045 "2008 Error %d posting all rpi "
18046 "headers\n", rc);
18047 rc = -EIO;
18048 break;
18049 }
18050 }
18051
6d368e53
JS
18052 exit:
18053 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18054 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
18055 return rc;
18056}
18057
18058/**
18059 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18060 * @phba: pointer to lpfc hba data structure.
18061 * @rpi_page: pointer to the rpi memory region.
18062 *
18063 * This routine is invoked to post a single rpi header to the
18064 * HBA consistent with the SLI-4 interface spec. This memory region
18065 * maps up to 64 rpi context regions.
18066 *
18067 * Return codes
af901ca1 18068 * 0 - successful
d439d286
JS
18069 * -ENOMEM - No available memory
18070 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18071 **/
18072int
18073lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18074{
18075 LPFC_MBOXQ_t *mboxq;
18076 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18077 uint32_t rc = 0;
6fb120a7
JS
18078 uint32_t shdr_status, shdr_add_status;
18079 union lpfc_sli4_cfg_shdr *shdr;
18080
6d368e53
JS
18081 /* SLI4 ports that support extents do not require RPI headers. */
18082 if (!phba->sli4_hba.rpi_hdrs_in_use)
18083 return rc;
18084 if (phba->sli4_hba.extents_in_use)
18085 return -EIO;
18086
6fb120a7
JS
18087 /* The port is notified of the header region via a mailbox command. */
18088 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18089 if (!mboxq) {
18090 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18091 "2001 Unable to allocate memory for issuing "
18092 "SLI_CONFIG_SPECIAL mailbox command\n");
18093 return -ENOMEM;
18094 }
18095
18096 /* Post all rpi memory regions to the port. */
18097 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
18098 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18099 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18100 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
18101 sizeof(struct lpfc_sli4_cfg_mhdr),
18102 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
18103
18104
18105 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
18106 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18107 rpi_page->start_rpi);
6d368e53
JS
18108 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18109 hdr_tmpl, rpi_page->page_count);
18110
6fb120a7
JS
18111 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18112 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 18113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
18114 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18115 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18116 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18117 if (rc != MBX_TIMEOUT)
18118 mempool_free(mboxq, phba->mbox_mem_pool);
18119 if (shdr_status || shdr_add_status || rc) {
18120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18121 "2514 POST_RPI_HDR mailbox failed with "
18122 "status x%x add_status x%x, mbx status x%x\n",
18123 shdr_status, shdr_add_status, rc);
18124 rc = -ENXIO;
845d9e8d
JS
18125 } else {
18126 /*
18127 * The next_rpi stores the next logical module-64 rpi value used
18128 * to post physical rpis in subsequent rpi postings.
18129 */
18130 spin_lock_irq(&phba->hbalock);
18131 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18132 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
18133 }
18134 return rc;
18135}
18136
18137/**
18138 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18139 * @phba: pointer to lpfc hba data structure.
18140 *
18141 * This routine is invoked to post rpi header templates to the
18142 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18143 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18144 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18145 *
18146 * Returns
af901ca1 18147 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
18148 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18149 **/
18150int
18151lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18152{
6d368e53
JS
18153 unsigned long rpi;
18154 uint16_t max_rpi, rpi_limit;
18155 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 18156 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 18157 unsigned long iflag;
6fb120a7 18158
6fb120a7 18159 /*
6d368e53
JS
18160 * Fetch the next logical rpi. Because this index is logical,
18161 * the driver starts at 0 each time.
6fb120a7 18162 */
4902b381 18163 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
18164 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18165 rpi_limit = phba->sli4_hba.next_rpi;
18166
6d368e53
JS
18167 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18168 if (rpi >= rpi_limit)
6fb120a7
JS
18169 rpi = LPFC_RPI_ALLOC_ERROR;
18170 else {
18171 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18172 phba->sli4_hba.max_cfg_param.rpi_used++;
18173 phba->sli4_hba.rpi_count++;
18174 }
be6bb941
JS
18175 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18176 "0001 rpi:%x max:%x lim:%x\n",
18177 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
18178
18179 /*
18180 * Don't try to allocate more rpi header regions if the device limit
6d368e53 18181 * has been exhausted.
6fb120a7
JS
18182 */
18183 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18184 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 18185 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18186 return rpi;
18187 }
18188
6d368e53
JS
18189 /*
18190 * RPI header postings are not required for SLI4 ports capable of
18191 * extents.
18192 */
18193 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 18194 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
18195 return rpi;
18196 }
18197
6fb120a7
JS
18198 /*
18199 * If the driver is running low on rpi resources, allocate another
18200 * page now. Note that the next_rpi value is used because
18201 * it represents how many are actually in use whereas max_rpi notes
18202 * how many are supported max by the device.
18203 */
6d368e53 18204 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 18205 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18206 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18207 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18208 if (!rpi_hdr) {
18209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18210 "2002 Error Could not grow rpi "
18211 "count\n");
18212 } else {
6d368e53
JS
18213 lrpi = rpi_hdr->start_rpi;
18214 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
18215 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18216 }
18217 }
18218
18219 return rpi;
18220}
18221
d7c47992
JS
18222/**
18223 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18224 * @phba: pointer to lpfc hba data structure.
18225 *
18226 * This routine is invoked to release an rpi to the pool of
18227 * available rpis maintained by the driver.
18228 **/
5d8b8167 18229static void
d7c47992
JS
18230__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18231{
18232 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18233 phba->sli4_hba.rpi_count--;
18234 phba->sli4_hba.max_cfg_param.rpi_used--;
b95b2119
JS
18235 } else {
18236 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18237 "2016 rpi %x not inuse\n",
18238 rpi);
d7c47992
JS
18239 }
18240}
18241
6fb120a7
JS
18242/**
18243 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18244 * @phba: pointer to lpfc hba data structure.
18245 *
18246 * This routine is invoked to release an rpi to the pool of
18247 * available rpis maintained by the driver.
18248 **/
18249void
18250lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18251{
18252 spin_lock_irq(&phba->hbalock);
d7c47992 18253 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
18254 spin_unlock_irq(&phba->hbalock);
18255}
18256
18257/**
18258 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18259 * @phba: pointer to lpfc hba data structure.
18260 *
18261 * This routine is invoked to remove the memory region that
18262 * provided rpi via a bitmask.
18263 **/
18264void
18265lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18266{
18267 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
18268 kfree(phba->sli4_hba.rpi_ids);
18269 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
18270}
18271
18272/**
18273 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18274 * @phba: pointer to lpfc hba data structure.
18275 *
18276 * This routine is invoked to remove the memory region that
18277 * provided rpi via a bitmask.
18278 **/
18279int
6b5151fd
JS
18280lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18281 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
18282{
18283 LPFC_MBOXQ_t *mboxq;
18284 struct lpfc_hba *phba = ndlp->phba;
18285 int rc;
18286
18287 /* The port is notified of the header region via a mailbox command. */
18288 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18289 if (!mboxq)
18290 return -ENOMEM;
18291
18292 /* Post all rpi memory regions to the port. */
18293 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
18294 if (cmpl) {
18295 mboxq->mbox_cmpl = cmpl;
3e1f0718
JS
18296 mboxq->ctx_buf = arg;
18297 mboxq->ctx_ndlp = ndlp;
72859909
JS
18298 } else
18299 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 18300 mboxq->vport = ndlp->vport;
6fb120a7
JS
18301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18302 if (rc == MBX_NOT_FINISHED) {
18303 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18304 "2010 Resume RPI Mailbox failed "
18305 "status %d, mbxStatus x%x\n", rc,
18306 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18307 mempool_free(mboxq, phba->mbox_mem_pool);
18308 return -EIO;
18309 }
18310 return 0;
18311}
18312
18313/**
18314 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 18315 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 18316 *
76a95d75 18317 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
18318 *
18319 * Returns:
18320 * 0 success
18321 * -Evalue otherwise
18322 **/
18323int
76a95d75 18324lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
18325{
18326 LPFC_MBOXQ_t *mboxq;
18327 int rc = 0;
6a9c52cf 18328 int retval = MBX_SUCCESS;
6fb120a7 18329 uint32_t mbox_tmo;
76a95d75 18330 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
18331 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18332 if (!mboxq)
18333 return -ENOMEM;
76a95d75 18334 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 18335 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 18336 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 18337 if (rc != MBX_SUCCESS) {
76a95d75 18338 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
18339 "2022 INIT VPI Mailbox failed "
18340 "status %d, mbxStatus x%x\n", rc,
18341 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 18342 retval = -EIO;
6fb120a7 18343 }
6a9c52cf 18344 if (rc != MBX_TIMEOUT)
76a95d75 18345 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
18346
18347 return retval;
6fb120a7
JS
18348}
18349
18350/**
18351 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18352 * @phba: pointer to lpfc hba data structure.
18353 * @mboxq: Pointer to mailbox object.
18354 *
18355 * This routine is invoked to manually add a single FCF record. The caller
18356 * must pass a completely initialized FCF_Record. This routine takes
18357 * care of the nonembedded mailbox operations.
18358 **/
18359static void
18360lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18361{
18362 void *virt_addr;
18363 union lpfc_sli4_cfg_shdr *shdr;
18364 uint32_t shdr_status, shdr_add_status;
18365
18366 virt_addr = mboxq->sge_array->addr[0];
18367 /* The IOCTL status is embedded in the mailbox subheader. */
18368 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18369 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18370 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18371
18372 if ((shdr_status || shdr_add_status) &&
18373 (shdr_status != STATUS_FCF_IN_USE))
18374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18375 "2558 ADD_FCF_RECORD mailbox failed with "
18376 "status x%x add_status x%x\n",
18377 shdr_status, shdr_add_status);
18378
18379 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18380}
18381
18382/**
18383 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18384 * @phba: pointer to lpfc hba data structure.
18385 * @fcf_record: pointer to the initialized fcf record to add.
18386 *
18387 * This routine is invoked to manually add a single FCF record. The caller
18388 * must pass a completely initialized FCF_Record. This routine takes
18389 * care of the nonembedded mailbox operations.
18390 **/
18391int
18392lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18393{
18394 int rc = 0;
18395 LPFC_MBOXQ_t *mboxq;
18396 uint8_t *bytep;
18397 void *virt_addr;
6fb120a7
JS
18398 struct lpfc_mbx_sge sge;
18399 uint32_t alloc_len, req_len;
18400 uint32_t fcfindex;
18401
18402 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18403 if (!mboxq) {
18404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18405 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18406 return -ENOMEM;
18407 }
18408
18409 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18410 sizeof(uint32_t);
18411
18412 /* Allocate DMA memory and set up the non-embedded mailbox command */
18413 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18414 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18415 req_len, LPFC_SLI4_MBX_NEMBED);
18416 if (alloc_len < req_len) {
18417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18418 "2523 Allocated DMA memory size (x%x) is "
18419 "less than the requested DMA memory "
18420 "size (x%x)\n", alloc_len, req_len);
18421 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18422 return -ENOMEM;
18423 }
18424
18425 /*
18426 * Get the first SGE entry from the non-embedded DMA memory. This
18427 * routine only uses a single SGE.
18428 */
18429 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
18430 virt_addr = mboxq->sge_array->addr[0];
18431 /*
18432 * Configure the FCF record for FCFI 0. This is the driver's
18433 * hardcoded default and gets used in nonFIP mode.
18434 */
18435 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18436 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18437 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18438
18439 /*
18440 * Copy the fcf_index and the FCF Record Data. The data starts after
18441 * the FCoE header plus word10. The data copy needs to be endian
18442 * correct.
18443 */
18444 bytep += sizeof(uint32_t);
18445 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18446 mboxq->vport = phba->pport;
18447 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18449 if (rc == MBX_NOT_FINISHED) {
18450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18451 "2515 ADD_FCF_RECORD mailbox failed with "
18452 "status 0x%x\n", rc);
18453 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18454 rc = -EIO;
18455 } else
18456 rc = 0;
18457
18458 return rc;
18459}
18460
18461/**
18462 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18463 * @phba: pointer to lpfc hba data structure.
18464 * @fcf_record: pointer to the fcf record to write the default data.
18465 * @fcf_index: FCF table entry index.
18466 *
18467 * This routine is invoked to build the driver's default FCF record. The
18468 * values used are hardcoded. This routine handles memory initialization.
18469 *
18470 **/
18471void
18472lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18473 struct fcf_record *fcf_record,
18474 uint16_t fcf_index)
18475{
18476 memset(fcf_record, 0, sizeof(struct fcf_record));
18477 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18478 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18479 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18480 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18481 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18482 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18483 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18484 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18485 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18486 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18487 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18488 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18489 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 18490 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
18491 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18492 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18493 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18494 /* Set the VLAN bit map */
18495 if (phba->valid_vlan) {
18496 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18497 = 1 << (phba->vlan_id % 8);
18498 }
18499}
18500
18501/**
0c9ab6f5 18502 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
18503 * @phba: pointer to lpfc hba data structure.
18504 * @fcf_index: FCF table entry offset.
18505 *
0c9ab6f5
JS
18506 * This routine is invoked to scan the entire FCF table by reading FCF
18507 * record and processing it one at a time starting from the @fcf_index
18508 * for initial FCF discovery or fast FCF failover rediscovery.
18509 *
25985edc 18510 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 18511 * otherwise.
6fb120a7
JS
18512 **/
18513int
0c9ab6f5 18514lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
18515{
18516 int rc = 0, error;
18517 LPFC_MBOXQ_t *mboxq;
6fb120a7 18518
32b9793f 18519 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 18520 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
18521 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18522 if (!mboxq) {
18523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18524 "2000 Failed to allocate mbox for "
18525 "READ_FCF cmd\n");
4d9ab994 18526 error = -ENOMEM;
0c9ab6f5 18527 goto fail_fcf_scan;
6fb120a7 18528 }
ecfd03c6 18529 /* Construct the read FCF record mailbox command */
0c9ab6f5 18530 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
18531 if (rc) {
18532 error = -EINVAL;
0c9ab6f5 18533 goto fail_fcf_scan;
6fb120a7 18534 }
ecfd03c6 18535 /* Issue the mailbox command asynchronously */
6fb120a7 18536 mboxq->vport = phba->pport;
0c9ab6f5 18537 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
18538
18539 spin_lock_irq(&phba->hbalock);
18540 phba->hba_flag |= FCF_TS_INPROG;
18541 spin_unlock_irq(&phba->hbalock);
18542
6fb120a7 18543 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 18544 if (rc == MBX_NOT_FINISHED)
6fb120a7 18545 error = -EIO;
ecfd03c6 18546 else {
38b92ef8
JS
18547 /* Reset eligible FCF count for new scan */
18548 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 18549 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 18550 error = 0;
32b9793f 18551 }
0c9ab6f5 18552fail_fcf_scan:
4d9ab994
JS
18553 if (error) {
18554 if (mboxq)
18555 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 18556 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 18557 spin_lock_irq(&phba->hbalock);
a93ff37a 18558 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
18559 spin_unlock_irq(&phba->hbalock);
18560 }
6fb120a7
JS
18561 return error;
18562}
a0c87cbd 18563
0c9ab6f5 18564/**
a93ff37a 18565 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
18566 * @phba: pointer to lpfc hba data structure.
18567 * @fcf_index: FCF table entry offset.
18568 *
18569 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 18570 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 18571 *
25985edc 18572 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18573 * otherwise.
18574 **/
18575int
18576lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18577{
18578 int rc = 0, error;
18579 LPFC_MBOXQ_t *mboxq;
18580
18581 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18582 if (!mboxq) {
18583 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18584 "2763 Failed to allocate mbox for "
18585 "READ_FCF cmd\n");
18586 error = -ENOMEM;
18587 goto fail_fcf_read;
18588 }
18589 /* Construct the read FCF record mailbox command */
18590 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18591 if (rc) {
18592 error = -EINVAL;
18593 goto fail_fcf_read;
18594 }
18595 /* Issue the mailbox command asynchronously */
18596 mboxq->vport = phba->pport;
18597 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18598 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18599 if (rc == MBX_NOT_FINISHED)
18600 error = -EIO;
18601 else
18602 error = 0;
18603
18604fail_fcf_read:
18605 if (error && mboxq)
18606 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18607 return error;
18608}
18609
18610/**
18611 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18612 * @phba: pointer to lpfc hba data structure.
18613 * @fcf_index: FCF table entry offset.
18614 *
18615 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 18616 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 18617 *
25985edc 18618 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18619 * otherwise.
18620 **/
18621int
18622lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18623{
18624 int rc = 0, error;
18625 LPFC_MBOXQ_t *mboxq;
18626
18627 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18628 if (!mboxq) {
18629 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18630 "2758 Failed to allocate mbox for "
18631 "READ_FCF cmd\n");
18632 error = -ENOMEM;
18633 goto fail_fcf_read;
18634 }
18635 /* Construct the read FCF record mailbox command */
18636 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18637 if (rc) {
18638 error = -EINVAL;
18639 goto fail_fcf_read;
18640 }
18641 /* Issue the mailbox command asynchronously */
18642 mboxq->vport = phba->pport;
18643 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18644 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18645 if (rc == MBX_NOT_FINISHED)
18646 error = -EIO;
18647 else
18648 error = 0;
18649
18650fail_fcf_read:
18651 if (error && mboxq)
18652 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18653 return error;
18654}
18655
7d791df7 18656/**
f5cb5304 18657 * lpfc_check_next_fcf_pri_level
7d791df7
JS
18658 * phba pointer to the lpfc_hba struct for this port.
18659 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18660 * routine when the rr_bmask is empty. The FCF indecies are put into the
18661 * rr_bmask based on their priority level. Starting from the highest priority
18662 * to the lowest. The most likely FCF candidate will be in the highest
18663 * priority group. When this routine is called it searches the fcf_pri list for
18664 * next lowest priority group and repopulates the rr_bmask with only those
18665 * fcf_indexes.
18666 * returns:
18667 * 1=success 0=failure
18668 **/
5d8b8167 18669static int
7d791df7
JS
18670lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18671{
18672 uint16_t next_fcf_pri;
18673 uint16_t last_index;
18674 struct lpfc_fcf_pri *fcf_pri;
18675 int rc;
18676 int ret = 0;
18677
18678 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18679 LPFC_SLI4_FCF_TBL_INDX_MAX);
18680 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18681 "3060 Last IDX %d\n", last_index);
2562669c
JS
18682
18683 /* Verify the priority list has 2 or more entries */
18684 spin_lock_irq(&phba->hbalock);
18685 if (list_empty(&phba->fcf.fcf_pri_list) ||
18686 list_is_singular(&phba->fcf.fcf_pri_list)) {
18687 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
18688 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18689 "3061 Last IDX %d\n", last_index);
18690 return 0; /* Empty rr list */
18691 }
2562669c
JS
18692 spin_unlock_irq(&phba->hbalock);
18693
7d791df7
JS
18694 next_fcf_pri = 0;
18695 /*
18696 * Clear the rr_bmask and set all of the bits that are at this
18697 * priority.
18698 */
18699 memset(phba->fcf.fcf_rr_bmask, 0,
18700 sizeof(*phba->fcf.fcf_rr_bmask));
18701 spin_lock_irq(&phba->hbalock);
18702 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18703 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18704 continue;
18705 /*
18706 * the 1st priority that has not FLOGI failed
18707 * will be the highest.
18708 */
18709 if (!next_fcf_pri)
18710 next_fcf_pri = fcf_pri->fcf_rec.priority;
18711 spin_unlock_irq(&phba->hbalock);
18712 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18713 rc = lpfc_sli4_fcf_rr_index_set(phba,
18714 fcf_pri->fcf_rec.fcf_index);
18715 if (rc)
18716 return 0;
18717 }
18718 spin_lock_irq(&phba->hbalock);
18719 }
18720 /*
18721 * if next_fcf_pri was not set above and the list is not empty then
18722 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 18723 * and start at the beginning.
7d791df7
JS
18724 */
18725 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18726 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18727 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18728 /*
18729 * the 1st priority that has not FLOGI failed
18730 * will be the highest.
18731 */
18732 if (!next_fcf_pri)
18733 next_fcf_pri = fcf_pri->fcf_rec.priority;
18734 spin_unlock_irq(&phba->hbalock);
18735 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18736 rc = lpfc_sli4_fcf_rr_index_set(phba,
18737 fcf_pri->fcf_rec.fcf_index);
18738 if (rc)
18739 return 0;
18740 }
18741 spin_lock_irq(&phba->hbalock);
18742 }
18743 } else
18744 ret = 1;
18745 spin_unlock_irq(&phba->hbalock);
18746
18747 return ret;
18748}
0c9ab6f5
JS
18749/**
18750 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18751 * @phba: pointer to lpfc hba data structure.
18752 *
18753 * This routine is to get the next eligible FCF record index in a round
18754 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 18755 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
18756 * shall be returned, otherwise, the next eligible FCF record's index
18757 * shall be returned.
18758 **/
18759uint16_t
18760lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18761{
18762 uint16_t next_fcf_index;
18763
421c6622 18764initial_priority:
3804dc84 18765 /* Search start from next bit of currently registered FCF index */
421c6622
JS
18766 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18767
7d791df7 18768next_priority:
421c6622
JS
18769 /* Determine the next fcf index to check */
18770 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
18771 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18772 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
18773 next_fcf_index);
18774
0c9ab6f5 18775 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
18776 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18777 /*
18778 * If we have wrapped then we need to clear the bits that
18779 * have been tested so that we can detect when we should
18780 * change the priority level.
18781 */
0c9ab6f5
JS
18782 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18783 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
18784 }
18785
3804dc84
JS
18786
18787 /* Check roundrobin failover list empty condition */
7d791df7
JS
18788 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18789 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18790 /*
18791 * If next fcf index is not found check if there are lower
18792 * Priority level fcf's in the fcf_priority list.
18793 * Set up the rr_bmask with all of the avaiable fcf bits
18794 * at that level and continue the selection process.
18795 */
18796 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 18797 goto initial_priority;
3804dc84
JS
18798 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18799 "2844 No roundrobin failover FCF available\n");
036cad1f
JS
18800
18801 return LPFC_FCOE_FCF_NEXT_NONE;
3804dc84
JS
18802 }
18803
7d791df7
JS
18804 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18805 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
18806 LPFC_FCF_FLOGI_FAILED) {
18807 if (list_is_singular(&phba->fcf.fcf_pri_list))
18808 return LPFC_FCOE_FCF_NEXT_NONE;
18809
7d791df7 18810 goto next_priority;
f5cb5304 18811 }
7d791df7 18812
3804dc84 18813 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
18814 "2845 Get next roundrobin failover FCF (x%x)\n",
18815 next_fcf_index);
18816
0c9ab6f5
JS
18817 return next_fcf_index;
18818}
18819
18820/**
18821 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18822 * @phba: pointer to lpfc hba data structure.
18823 *
18824 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 18825 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18826 * does not go beyond the range of the driver allocated bmask dimension
18827 * before setting the bit.
18828 *
18829 * Returns 0 if the index bit successfully set, otherwise, it returns
18830 * -EINVAL.
18831 **/
18832int
18833lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18834{
18835 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18836 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18837 "2610 FCF (x%x) reached driver's book "
18838 "keeping dimension:x%x\n",
0c9ab6f5
JS
18839 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18840 return -EINVAL;
18841 }
18842 /* Set the eligible FCF record index bmask */
18843 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18844
3804dc84 18845 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18846 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
18847 "bmask\n", fcf_index);
18848
0c9ab6f5
JS
18849 return 0;
18850}
18851
18852/**
3804dc84 18853 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
18854 * @phba: pointer to lpfc hba data structure.
18855 *
18856 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 18857 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18858 * does not go beyond the range of the driver allocated bmask dimension
18859 * before clearing the bit.
18860 **/
18861void
18862lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18863{
9a803a74 18864 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
18865 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18866 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18867 "2762 FCF (x%x) reached driver's book "
18868 "keeping dimension:x%x\n",
0c9ab6f5
JS
18869 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18870 return;
18871 }
18872 /* Clear the eligible FCF record index bmask */
7d791df7 18873 spin_lock_irq(&phba->hbalock);
9a803a74
JS
18874 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18875 list) {
7d791df7
JS
18876 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18877 list_del_init(&fcf_pri->list);
18878 break;
18879 }
18880 }
18881 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 18882 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
18883
18884 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18885 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 18886 "bmask\n", fcf_index);
0c9ab6f5
JS
18887}
18888
ecfd03c6
JS
18889/**
18890 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18891 * @phba: pointer to lpfc hba data structure.
18892 *
18893 * This routine is the completion routine for the rediscover FCF table mailbox
18894 * command. If the mailbox command returned failure, it will try to stop the
18895 * FCF rediscover wait timer.
18896 **/
5d8b8167 18897static void
ecfd03c6
JS
18898lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18899{
18900 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18901 uint32_t shdr_status, shdr_add_status;
18902
18903 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18904
18905 shdr_status = bf_get(lpfc_mbox_hdr_status,
18906 &redisc_fcf->header.cfg_shdr.response);
18907 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18908 &redisc_fcf->header.cfg_shdr.response);
18909 if (shdr_status || shdr_add_status) {
0c9ab6f5 18910 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
18911 "2746 Requesting for FCF rediscovery failed "
18912 "status x%x add_status x%x\n",
18913 shdr_status, shdr_add_status);
0c9ab6f5 18914 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 18915 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18916 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
18917 spin_unlock_irq(&phba->hbalock);
18918 /*
18919 * CVL event triggered FCF rediscover request failed,
18920 * last resort to re-try current registered FCF entry.
18921 */
18922 lpfc_retry_pport_discovery(phba);
18923 } else {
18924 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18925 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
18926 spin_unlock_irq(&phba->hbalock);
18927 /*
18928 * DEAD FCF event triggered FCF rediscover request
18929 * failed, last resort to fail over as a link down
18930 * to FCF registration.
18931 */
18932 lpfc_sli4_fcf_dead_failthrough(phba);
18933 }
0c9ab6f5
JS
18934 } else {
18935 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18936 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
18937 /*
18938 * Start FCF rediscovery wait timer for pending FCF
18939 * before rescan FCF record table.
18940 */
18941 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 18942 }
ecfd03c6
JS
18943
18944 mempool_free(mbox, phba->mbox_mem_pool);
18945}
18946
18947/**
3804dc84 18948 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
18949 * @phba: pointer to lpfc hba data structure.
18950 *
18951 * This routine is invoked to request for rediscovery of the entire FCF table
18952 * by the port.
18953 **/
18954int
18955lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18956{
18957 LPFC_MBOXQ_t *mbox;
18958 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18959 int rc, length;
18960
0c9ab6f5
JS
18961 /* Cancel retry delay timers to all vports before FCF rediscover */
18962 lpfc_cancel_all_vport_retry_delay_timer(phba);
18963
ecfd03c6
JS
18964 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18965 if (!mbox) {
18966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18967 "2745 Failed to allocate mbox for "
18968 "requesting FCF rediscover.\n");
18969 return -ENOMEM;
18970 }
18971
18972 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18973 sizeof(struct lpfc_sli4_cfg_mhdr));
18974 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18975 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18976 length, LPFC_SLI4_MBX_EMBED);
18977
18978 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18979 /* Set count to 0 for invalidating the entire FCF database */
18980 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18981
18982 /* Issue the mailbox command asynchronously */
18983 mbox->vport = phba->pport;
18984 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18986
18987 if (rc == MBX_NOT_FINISHED) {
18988 mempool_free(mbox, phba->mbox_mem_pool);
18989 return -EIO;
18990 }
18991 return 0;
18992}
18993
fc2b989b
JS
18994/**
18995 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18996 * @phba: pointer to lpfc hba data structure.
18997 *
18998 * This function is the failover routine as a last resort to the FCF DEAD
18999 * event when driver failed to perform fast FCF failover.
19000 **/
19001void
19002lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19003{
19004 uint32_t link_state;
19005
19006 /*
19007 * Last resort as FCF DEAD event failover will treat this as
19008 * a link down, but save the link state because we don't want
19009 * it to be changed to Link Down unless it is already down.
19010 */
19011 link_state = phba->link_state;
19012 lpfc_linkdown(phba);
19013 phba->link_state = link_state;
19014
19015 /* Unregister FCF if no devices connected to it */
19016 lpfc_unregister_unused_fcf(phba);
19017}
19018
a0c87cbd 19019/**
026abb87 19020 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 19021 * @phba: pointer to lpfc hba data structure.
026abb87 19022 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 19023 *
026abb87
JS
19024 * This function gets SLI3 port configure region 23 data through memory dump
19025 * mailbox command. When it successfully retrieves data, the size of the data
19026 * will be returned, otherwise, 0 will be returned.
a0c87cbd 19027 **/
026abb87
JS
19028static uint32_t
19029lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
19030{
19031 LPFC_MBOXQ_t *pmb = NULL;
19032 MAILBOX_t *mb;
026abb87 19033 uint32_t offset = 0;
a0c87cbd
JS
19034 int rc;
19035
026abb87
JS
19036 if (!rgn23_data)
19037 return 0;
19038
a0c87cbd
JS
19039 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19040 if (!pmb) {
19041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
19042 "2600 failed to allocate mailbox memory\n");
19043 return 0;
a0c87cbd
JS
19044 }
19045 mb = &pmb->u.mb;
19046
a0c87cbd
JS
19047 do {
19048 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19049 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19050
19051 if (rc != MBX_SUCCESS) {
19052 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
19053 "2601 failed to read config "
19054 "region 23, rc 0x%x Status 0x%x\n",
19055 rc, mb->mbxStatus);
a0c87cbd
JS
19056 mb->un.varDmp.word_cnt = 0;
19057 }
19058 /*
19059 * dump mem may return a zero when finished or we got a
19060 * mailbox error, either way we are done.
19061 */
19062 if (mb->un.varDmp.word_cnt == 0)
19063 break;
19064 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19065 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19066
19067 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
19068 rgn23_data + offset,
19069 mb->un.varDmp.word_cnt);
a0c87cbd
JS
19070 offset += mb->un.varDmp.word_cnt;
19071 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19072
026abb87
JS
19073 mempool_free(pmb, phba->mbox_mem_pool);
19074 return offset;
19075}
19076
19077/**
19078 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19079 * @phba: pointer to lpfc hba data structure.
19080 * @rgn23_data: pointer to configure region 23 data.
19081 *
19082 * This function gets SLI4 port configure region 23 data through memory dump
19083 * mailbox command. When it successfully retrieves data, the size of the data
19084 * will be returned, otherwise, 0 will be returned.
19085 **/
19086static uint32_t
19087lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19088{
19089 LPFC_MBOXQ_t *mboxq = NULL;
19090 struct lpfc_dmabuf *mp = NULL;
19091 struct lpfc_mqe *mqe;
19092 uint32_t data_length = 0;
19093 int rc;
19094
19095 if (!rgn23_data)
19096 return 0;
19097
19098 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19099 if (!mboxq) {
19100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19101 "3105 failed to allocate mailbox memory\n");
19102 return 0;
19103 }
19104
19105 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19106 goto out;
19107 mqe = &mboxq->u.mqe;
3e1f0718 19108 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
026abb87
JS
19109 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19110 if (rc)
19111 goto out;
19112 data_length = mqe->un.mb_words[5];
19113 if (data_length == 0)
19114 goto out;
19115 if (data_length > DMP_RGN23_SIZE) {
19116 data_length = 0;
19117 goto out;
19118 }
19119 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19120out:
19121 mempool_free(mboxq, phba->mbox_mem_pool);
19122 if (mp) {
19123 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19124 kfree(mp);
19125 }
19126 return data_length;
19127}
19128
19129/**
19130 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19131 * @phba: pointer to lpfc hba data structure.
19132 *
19133 * This function read region 23 and parse TLV for port status to
19134 * decide if the user disaled the port. If the TLV indicates the
19135 * port is disabled, the hba_flag is set accordingly.
19136 **/
19137void
19138lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19139{
19140 uint8_t *rgn23_data = NULL;
19141 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19142 uint32_t offset = 0;
19143
19144 /* Get adapter Region 23 data */
19145 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19146 if (!rgn23_data)
19147 goto out;
19148
19149 if (phba->sli_rev < LPFC_SLI_REV4)
19150 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19151 else {
19152 if_type = bf_get(lpfc_sli_intf_if_type,
19153 &phba->sli4_hba.sli_intf);
19154 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19155 goto out;
19156 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19157 }
a0c87cbd
JS
19158
19159 if (!data_size)
19160 goto out;
19161
19162 /* Check the region signature first */
19163 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19165 "2619 Config region 23 has bad signature\n");
19166 goto out;
19167 }
19168 offset += 4;
19169
19170 /* Check the data structure version */
19171 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19173 "2620 Config region 23 has bad version\n");
19174 goto out;
19175 }
19176 offset += 4;
19177
19178 /* Parse TLV entries in the region */
19179 while (offset < data_size) {
19180 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19181 break;
19182 /*
19183 * If the TLV is not driver specific TLV or driver id is
19184 * not linux driver id, skip the record.
19185 */
19186 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19187 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19188 (rgn23_data[offset + 3] != 0)) {
19189 offset += rgn23_data[offset + 1] * 4 + 4;
19190 continue;
19191 }
19192
19193 /* Driver found a driver specific TLV in the config region */
19194 sub_tlv_len = rgn23_data[offset + 1] * 4;
19195 offset += 4;
19196 tlv_offset = 0;
19197
19198 /*
19199 * Search for configured port state sub-TLV.
19200 */
19201 while ((offset < data_size) &&
19202 (tlv_offset < sub_tlv_len)) {
19203 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19204 offset += 4;
19205 tlv_offset += 4;
19206 break;
19207 }
19208 if (rgn23_data[offset] != PORT_STE_TYPE) {
19209 offset += rgn23_data[offset + 1] * 4 + 4;
19210 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19211 continue;
19212 }
19213
19214 /* This HBA contains PORT_STE configured */
19215 if (!rgn23_data[offset + 2])
19216 phba->hba_flag |= LINK_DISABLED;
19217
19218 goto out;
19219 }
19220 }
026abb87 19221
a0c87cbd 19222out:
a0c87cbd
JS
19223 kfree(rgn23_data);
19224 return;
19225}
695a814e 19226
52d52440
JS
19227/**
19228 * lpfc_wr_object - write an object to the firmware
19229 * @phba: HBA structure that indicates port to create a queue on.
19230 * @dmabuf_list: list of dmabufs to write to the port.
19231 * @size: the total byte value of the objects to write to the port.
19232 * @offset: the current offset to be used to start the transfer.
19233 *
19234 * This routine will create a wr_object mailbox command to send to the port.
19235 * the mailbox command will be constructed using the dma buffers described in
19236 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19237 * BDEs that the imbedded mailbox can support. The @offset variable will be
19238 * used to indicate the starting offset of the transfer and will also return
19239 * the offset after the write object mailbox has completed. @size is used to
19240 * determine the end of the object and whether the eof bit should be set.
19241 *
19242 * Return 0 is successful and offset will contain the the new offset to use
19243 * for the next write.
19244 * Return negative value for error cases.
19245 **/
19246int
19247lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19248 uint32_t size, uint32_t *offset)
19249{
19250 struct lpfc_mbx_wr_object *wr_object;
19251 LPFC_MBOXQ_t *mbox;
19252 int rc = 0, i = 0;
5021267a 19253 uint32_t shdr_status, shdr_add_status, shdr_change_status;
52d52440 19254 uint32_t mbox_tmo;
52d52440
JS
19255 struct lpfc_dmabuf *dmabuf;
19256 uint32_t written = 0;
5021267a 19257 bool check_change_status = false;
52d52440
JS
19258
19259 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19260 if (!mbox)
19261 return -ENOMEM;
19262
19263 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19264 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19265 sizeof(struct lpfc_mbx_wr_object) -
19266 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19267
19268 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19269 wr_object->u.request.write_offset = *offset;
19270 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19271 wr_object->u.request.object_name[0] =
19272 cpu_to_le32(wr_object->u.request.object_name[0]);
19273 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19274 list_for_each_entry(dmabuf, dmabuf_list, list) {
19275 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19276 break;
19277 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19278 wr_object->u.request.bde[i].addrHigh =
19279 putPaddrHigh(dmabuf->phys);
19280 if (written + SLI4_PAGE_SIZE >= size) {
19281 wr_object->u.request.bde[i].tus.f.bdeSize =
19282 (size - written);
19283 written += (size - written);
19284 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
5021267a
JS
19285 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19286 check_change_status = true;
52d52440
JS
19287 } else {
19288 wr_object->u.request.bde[i].tus.f.bdeSize =
19289 SLI4_PAGE_SIZE;
19290 written += SLI4_PAGE_SIZE;
19291 }
19292 i++;
19293 }
19294 wr_object->u.request.bde_count = i;
19295 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19296 if (!phba->sli4_hba.intr_enable)
19297 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19298 else {
a183a15f 19299 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
19300 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19301 }
19302 /* The IOCTL status is embedded in the mailbox subheader. */
5021267a
JS
19303 shdr_status = bf_get(lpfc_mbox_hdr_status,
19304 &wr_object->header.cfg_shdr.response);
19305 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19306 &wr_object->header.cfg_shdr.response);
19307 if (check_change_status) {
19308 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19309 &wr_object->u.response);
19310 switch (shdr_change_status) {
19311 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19313 "3198 Firmware write complete: System "
19314 "reboot required to instantiate\n");
19315 break;
19316 case (LPFC_CHANGE_STATUS_FW_RESET):
19317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19318 "3199 Firmware write complete: Firmware"
19319 " reset required to instantiate\n");
19320 break;
19321 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19322 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19323 "3200 Firmware write complete: Port "
19324 "Migration or PCI Reset required to "
19325 "instantiate\n");
19326 break;
19327 case (LPFC_CHANGE_STATUS_PCI_RESET):
19328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19329 "3201 Firmware write complete: PCI "
19330 "Reset required to instantiate\n");
19331 break;
19332 default:
19333 break;
19334 }
19335 }
52d52440
JS
19336 if (rc != MBX_TIMEOUT)
19337 mempool_free(mbox, phba->mbox_mem_pool);
19338 if (shdr_status || shdr_add_status || rc) {
19339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19340 "3025 Write Object mailbox failed with "
19341 "status x%x add_status x%x, mbx status x%x\n",
19342 shdr_status, shdr_add_status, rc);
19343 rc = -ENXIO;
1feb8204 19344 *offset = shdr_add_status;
52d52440
JS
19345 } else
19346 *offset += wr_object->u.response.actual_write_length;
19347 return rc;
19348}
19349
695a814e
JS
19350/**
19351 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19352 * @vport: pointer to vport data structure.
19353 *
19354 * This function iterate through the mailboxq and clean up all REG_LOGIN
19355 * and REG_VPI mailbox commands associated with the vport. This function
19356 * is called when driver want to restart discovery of the vport due to
19357 * a Clear Virtual Link event.
19358 **/
19359void
19360lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19361{
19362 struct lpfc_hba *phba = vport->phba;
19363 LPFC_MBOXQ_t *mb, *nextmb;
19364 struct lpfc_dmabuf *mp;
78730cfe 19365 struct lpfc_nodelist *ndlp;
d439d286 19366 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 19367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 19368 LIST_HEAD(mbox_cmd_list);
63e801ce 19369 uint8_t restart_loop;
695a814e 19370
d439d286 19371 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
19372 spin_lock_irq(&phba->hbalock);
19373 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19374 if (mb->vport != vport)
19375 continue;
19376
19377 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19378 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19379 continue;
19380
d439d286
JS
19381 list_del(&mb->list);
19382 list_add_tail(&mb->list, &mbox_cmd_list);
19383 }
19384 /* Clean up active mailbox command with the vport */
19385 mb = phba->sli.mbox_active;
19386 if (mb && (mb->vport == vport)) {
19387 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19388 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19389 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19390 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19391 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
d439d286
JS
19392 /* Put reference count for delayed processing */
19393 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19394 /* Unregister the RPI when mailbox complete */
19395 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19396 }
19397 }
63e801ce
JS
19398 /* Cleanup any mailbox completions which are not yet processed */
19399 do {
19400 restart_loop = 0;
19401 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19402 /*
19403 * If this mailox is already processed or it is
19404 * for another vport ignore it.
19405 */
19406 if ((mb->vport != vport) ||
19407 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19408 continue;
19409
19410 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19411 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19412 continue;
19413
19414 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19415 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19416 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
63e801ce
JS
19417 /* Unregister the RPI when mailbox complete */
19418 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19419 restart_loop = 1;
19420 spin_unlock_irq(&phba->hbalock);
19421 spin_lock(shost->host_lock);
19422 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19423 spin_unlock(shost->host_lock);
19424 spin_lock_irq(&phba->hbalock);
19425 break;
19426 }
19427 }
19428 } while (restart_loop);
19429
d439d286
JS
19430 spin_unlock_irq(&phba->hbalock);
19431
19432 /* Release the cleaned-up mailbox commands */
19433 while (!list_empty(&mbox_cmd_list)) {
19434 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e 19435 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19436 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
695a814e
JS
19437 if (mp) {
19438 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19439 kfree(mp);
19440 }
3e1f0718
JS
19441 mb->ctx_buf = NULL;
19442 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19443 mb->ctx_ndlp = NULL;
78730cfe 19444 if (ndlp) {
ec21b3b0 19445 spin_lock(shost->host_lock);
589a52d6 19446 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 19447 spin_unlock(shost->host_lock);
78730cfe 19448 lpfc_nlp_put(ndlp);
78730cfe 19449 }
695a814e 19450 }
695a814e
JS
19451 mempool_free(mb, phba->mbox_mem_pool);
19452 }
d439d286
JS
19453
19454 /* Release the ndlp with the cleaned-up active mailbox command */
19455 if (act_mbx_ndlp) {
19456 spin_lock(shost->host_lock);
19457 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19458 spin_unlock(shost->host_lock);
19459 lpfc_nlp_put(act_mbx_ndlp);
695a814e 19460 }
695a814e
JS
19461}
19462
2a9bf3d0
JS
19463/**
19464 * lpfc_drain_txq - Drain the txq
19465 * @phba: Pointer to HBA context object.
19466 *
19467 * This function attempt to submit IOCBs on the txq
19468 * to the adapter. For SLI4 adapters, the txq contains
19469 * ELS IOCBs that have been deferred because the there
19470 * are no SGLs. This congestion can occur with large
19471 * vport counts during node discovery.
19472 **/
19473
19474uint32_t
19475lpfc_drain_txq(struct lpfc_hba *phba)
19476{
19477 LIST_HEAD(completions);
895427bd 19478 struct lpfc_sli_ring *pring;
2e706377 19479 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
19480 unsigned long iflags = 0;
19481 char *fail_msg = NULL;
19482 struct lpfc_sglq *sglq;
205e8240 19483 union lpfc_wqe128 wqe;
a2fc4aef 19484 uint32_t txq_cnt = 0;
dc19e3b4 19485 struct lpfc_queue *wq;
2a9bf3d0 19486
dc19e3b4
JS
19487 if (phba->link_flag & LS_MDS_LOOPBACK) {
19488 /* MDS WQE are posted only to first WQ*/
cdb42bec 19489 wq = phba->sli4_hba.hdwq[0].fcp_wq;
dc19e3b4
JS
19490 if (unlikely(!wq))
19491 return 0;
19492 pring = wq->pring;
19493 } else {
19494 wq = phba->sli4_hba.els_wq;
19495 if (unlikely(!wq))
19496 return 0;
19497 pring = lpfc_phba_elsring(phba);
19498 }
19499
19500 if (unlikely(!pring) || list_empty(&pring->txq))
1234a6d5 19501 return 0;
895427bd 19502
398d81c9 19503 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
19504 list_for_each_entry(piocbq, &pring->txq, list) {
19505 txq_cnt++;
19506 }
19507
19508 if (txq_cnt > pring->txq_max)
19509 pring->txq_max = txq_cnt;
2a9bf3d0 19510
398d81c9 19511 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19512
0e9bb8d7 19513 while (!list_empty(&pring->txq)) {
398d81c9 19514 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 19515
19ca7609 19516 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 19517 if (!piocbq) {
398d81c9 19518 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
19519 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19520 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 19521 txq_cnt);
a629852a
JS
19522 break;
19523 }
895427bd 19524 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 19525 if (!sglq) {
19ca7609 19526 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 19527 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19528 break;
2a9bf3d0 19529 }
0e9bb8d7 19530 txq_cnt--;
2a9bf3d0
JS
19531
19532 /* The xri and iocb resources secured,
19533 * attempt to issue request
19534 */
6d368e53 19535 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
19536 piocbq->sli4_xritag = sglq->sli4_xritag;
19537 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19538 fail_msg = "to convert bpl to sgl";
205e8240 19539 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
2a9bf3d0 19540 fail_msg = "to convert iocb to wqe";
dc19e3b4 19541 else if (lpfc_sli4_wq_put(wq, &wqe))
2a9bf3d0
JS
19542 fail_msg = " - Wq is full";
19543 else
19544 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19545
19546 if (fail_msg) {
19547 /* Failed means we can't issue and need to cancel */
19548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19549 "2822 IOCB failed %s iotag 0x%x "
19550 "xri 0x%x\n",
19551 fail_msg,
19552 piocbq->iotag, piocbq->sli4_xritag);
19553 list_add_tail(&piocbq->list, &completions);
19554 }
398d81c9 19555 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
19556 }
19557
2a9bf3d0
JS
19558 /* Cancel all the IOCBs that cannot be issued */
19559 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19560 IOERR_SLI_ABORTED);
19561
0e9bb8d7 19562 return txq_cnt;
2a9bf3d0 19563}
895427bd
JS
19564
19565/**
19566 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19567 * @phba: Pointer to HBA context object.
19568 * @pwqe: Pointer to command WQE.
19569 * @sglq: Pointer to the scatter gather queue object.
19570 *
19571 * This routine converts the bpl or bde that is in the WQE
19572 * to a sgl list for the sli4 hardware. The physical address
19573 * of the bpl/bde is converted back to a virtual address.
19574 * If the WQE contains a BPL then the list of BDE's is
19575 * converted to sli4_sge's. If the WQE contains a single
19576 * BDE then it is converted to a single sli_sge.
19577 * The WQE is still in cpu endianness so the contents of
19578 * the bpl can be used without byte swapping.
19579 *
19580 * Returns valid XRI = Success, NO_XRI = Failure.
19581 */
19582static uint16_t
19583lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19584 struct lpfc_sglq *sglq)
19585{
19586 uint16_t xritag = NO_XRI;
19587 struct ulp_bde64 *bpl = NULL;
19588 struct ulp_bde64 bde;
19589 struct sli4_sge *sgl = NULL;
19590 struct lpfc_dmabuf *dmabuf;
205e8240 19591 union lpfc_wqe128 *wqe;
895427bd
JS
19592 int numBdes = 0;
19593 int i = 0;
19594 uint32_t offset = 0; /* accumulated offset in the sg request list */
19595 int inbound = 0; /* number of sg reply entries inbound from firmware */
19596 uint32_t cmd;
19597
19598 if (!pwqeq || !sglq)
19599 return xritag;
19600
19601 sgl = (struct sli4_sge *)sglq->sgl;
19602 wqe = &pwqeq->wqe;
19603 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19604
19605 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19606 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19607 return sglq->sli4_xritag;
19608 numBdes = pwqeq->rsvd2;
19609 if (numBdes) {
19610 /* The addrHigh and addrLow fields within the WQE
19611 * have not been byteswapped yet so there is no
19612 * need to swap them back.
19613 */
19614 if (pwqeq->context3)
19615 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19616 else
19617 return xritag;
19618
19619 bpl = (struct ulp_bde64 *)dmabuf->virt;
19620 if (!bpl)
19621 return xritag;
19622
19623 for (i = 0; i < numBdes; i++) {
19624 /* Should already be byte swapped. */
19625 sgl->addr_hi = bpl->addrHigh;
19626 sgl->addr_lo = bpl->addrLow;
19627
19628 sgl->word2 = le32_to_cpu(sgl->word2);
19629 if ((i+1) == numBdes)
19630 bf_set(lpfc_sli4_sge_last, sgl, 1);
19631 else
19632 bf_set(lpfc_sli4_sge_last, sgl, 0);
19633 /* swap the size field back to the cpu so we
19634 * can assign it to the sgl.
19635 */
19636 bde.tus.w = le32_to_cpu(bpl->tus.w);
19637 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19638 /* The offsets in the sgl need to be accumulated
19639 * separately for the request and reply lists.
19640 * The request is always first, the reply follows.
19641 */
19642 switch (cmd) {
19643 case CMD_GEN_REQUEST64_WQE:
19644 /* add up the reply sg entries */
19645 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19646 inbound++;
19647 /* first inbound? reset the offset */
19648 if (inbound == 1)
19649 offset = 0;
19650 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19651 bf_set(lpfc_sli4_sge_type, sgl,
19652 LPFC_SGE_TYPE_DATA);
19653 offset += bde.tus.f.bdeSize;
19654 break;
19655 case CMD_FCP_TRSP64_WQE:
19656 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19657 bf_set(lpfc_sli4_sge_type, sgl,
19658 LPFC_SGE_TYPE_DATA);
19659 break;
19660 case CMD_FCP_TSEND64_WQE:
19661 case CMD_FCP_TRECEIVE64_WQE:
19662 bf_set(lpfc_sli4_sge_type, sgl,
19663 bpl->tus.f.bdeFlags);
19664 if (i < 3)
19665 offset = 0;
19666 else
19667 offset += bde.tus.f.bdeSize;
19668 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19669 break;
19670 }
19671 sgl->word2 = cpu_to_le32(sgl->word2);
19672 bpl++;
19673 sgl++;
19674 }
19675 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19676 /* The addrHigh and addrLow fields of the BDE have not
19677 * been byteswapped yet so they need to be swapped
19678 * before putting them in the sgl.
19679 */
19680 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19681 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19682 sgl->word2 = le32_to_cpu(sgl->word2);
19683 bf_set(lpfc_sli4_sge_last, sgl, 1);
19684 sgl->word2 = cpu_to_le32(sgl->word2);
19685 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19686 }
19687 return sglq->sli4_xritag;
19688}
19689
19690/**
19691 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19692 * @phba: Pointer to HBA context object.
19693 * @ring_number: Base sli ring number
19694 * @pwqe: Pointer to command WQE.
19695 **/
19696int
1fbf9742 19697lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
895427bd
JS
19698 struct lpfc_iocbq *pwqe)
19699{
205e8240 19700 union lpfc_wqe128 *wqe = &pwqe->wqe;
f358dd0c 19701 struct lpfc_nvmet_rcv_ctx *ctxp;
895427bd
JS
19702 struct lpfc_queue *wq;
19703 struct lpfc_sglq *sglq;
19704 struct lpfc_sli_ring *pring;
19705 unsigned long iflags;
cd22d605 19706 uint32_t ret = 0;
895427bd
JS
19707
19708 /* NVME_LS and NVME_LS ABTS requests. */
19709 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19710 pring = phba->sli4_hba.nvmels_wq->pring;
6a828b0f
JS
19711 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19712 qp, wq_access);
895427bd
JS
19713 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19714 if (!sglq) {
19715 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19716 return WQE_BUSY;
19717 }
19718 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19719 pwqe->sli4_xritag = sglq->sli4_xritag;
19720 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19721 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19722 return WQE_ERROR;
19723 }
19724 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19725 pwqe->sli4_xritag);
cd22d605
DK
19726 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19727 if (ret) {
895427bd 19728 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19729 return ret;
895427bd 19730 }
cd22d605 19731
895427bd
JS
19732 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19733 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19734 return 0;
19735 }
19736
19737 /* NVME_FCREQ and NVME_ABTS requests */
19738 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19739 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
1fbf9742
JS
19740 wq = qp->nvme_wq;
19741 pring = wq->pring;
895427bd 19742
1fbf9742 19743 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
895427bd 19744
6a828b0f
JS
19745 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19746 qp, wq_access);
cd22d605
DK
19747 ret = lpfc_sli4_wq_put(wq, wqe);
19748 if (ret) {
895427bd 19749 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19750 return ret;
895427bd
JS
19751 }
19752 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19753 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19754 return 0;
19755 }
19756
f358dd0c
JS
19757 /* NVMET requests */
19758 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19759 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
1fbf9742
JS
19760 wq = qp->nvme_wq;
19761 pring = wq->pring;
f358dd0c 19762
f358dd0c 19763 ctxp = pwqe->context2;
6c621a22 19764 sglq = ctxp->ctxbuf->sglq;
f358dd0c
JS
19765 if (pwqe->sli4_xritag == NO_XRI) {
19766 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19767 pwqe->sli4_xritag = sglq->sli4_xritag;
19768 }
19769 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19770 pwqe->sli4_xritag);
1fbf9742
JS
19771 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19772
6a828b0f
JS
19773 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19774 qp, wq_access);
cd22d605
DK
19775 ret = lpfc_sli4_wq_put(wq, wqe);
19776 if (ret) {
f358dd0c 19777 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19778 return ret;
f358dd0c
JS
19779 }
19780 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19781 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19782 return 0;
19783 }
895427bd
JS
19784 return WQE_ERROR;
19785}
c490850a
JS
19786
19787#ifdef LPFC_MXP_STAT
19788/**
19789 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19790 * @phba: pointer to lpfc hba data structure.
19791 * @hwqid: belong to which HWQ.
19792 *
19793 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19794 * 15 seconds after a test case is running.
19795 *
19796 * The user should call lpfc_debugfs_multixripools_write before running a test
19797 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19798 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19799 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19800 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19801 **/
19802void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19803{
19804 struct lpfc_sli4_hdw_queue *qp;
19805 struct lpfc_multixri_pool *multixri_pool;
19806 struct lpfc_pvt_pool *pvt_pool;
19807 struct lpfc_pbl_pool *pbl_pool;
19808 u32 txcmplq_cnt;
19809
19810 qp = &phba->sli4_hba.hdwq[hwqid];
19811 multixri_pool = qp->p_multixri_pool;
19812 if (!multixri_pool)
19813 return;
19814
19815 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19816 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19817 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19818 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19819 if (qp->nvme_wq)
19820 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19821
19822 multixri_pool->stat_pbl_count = pbl_pool->count;
19823 multixri_pool->stat_pvt_count = pvt_pool->count;
19824 multixri_pool->stat_busy_count = txcmplq_cnt;
19825 }
19826
19827 multixri_pool->stat_snapshot_taken++;
19828}
19829#endif
19830
19831/**
19832 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19833 * @phba: pointer to lpfc hba data structure.
19834 * @hwqid: belong to which HWQ.
19835 *
19836 * This routine moves some XRIs from private to public pool when private pool
19837 * is not busy.
19838 **/
19839void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19840{
19841 struct lpfc_multixri_pool *multixri_pool;
19842 u32 io_req_count;
19843 u32 prev_io_req_count;
19844
19845 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19846 if (!multixri_pool)
19847 return;
19848 io_req_count = multixri_pool->io_req_count;
19849 prev_io_req_count = multixri_pool->prev_io_req_count;
19850
19851 if (prev_io_req_count != io_req_count) {
19852 /* Private pool is busy */
19853 multixri_pool->prev_io_req_count = io_req_count;
19854 } else {
19855 /* Private pool is not busy.
19856 * Move XRIs from private to public pool.
19857 */
19858 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19859 }
19860}
19861
19862/**
19863 * lpfc_adjust_high_watermark - Adjust high watermark
19864 * @phba: pointer to lpfc hba data structure.
19865 * @hwqid: belong to which HWQ.
19866 *
19867 * This routine sets high watermark as number of outstanding XRIs,
19868 * but make sure the new value is between xri_limit/2 and xri_limit.
19869 **/
19870void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19871{
19872 u32 new_watermark;
19873 u32 watermark_max;
19874 u32 watermark_min;
19875 u32 xri_limit;
19876 u32 txcmplq_cnt;
19877 u32 abts_io_bufs;
19878 struct lpfc_multixri_pool *multixri_pool;
19879 struct lpfc_sli4_hdw_queue *qp;
19880
19881 qp = &phba->sli4_hba.hdwq[hwqid];
19882 multixri_pool = qp->p_multixri_pool;
19883 if (!multixri_pool)
19884 return;
19885 xri_limit = multixri_pool->xri_limit;
19886
19887 watermark_max = xri_limit;
19888 watermark_min = xri_limit / 2;
19889
19890 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19891 abts_io_bufs = qp->abts_scsi_io_bufs;
19892 if (qp->nvme_wq) {
19893 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19894 abts_io_bufs += qp->abts_nvme_io_bufs;
19895 }
19896
19897 new_watermark = txcmplq_cnt + abts_io_bufs;
19898 new_watermark = min(watermark_max, new_watermark);
19899 new_watermark = max(watermark_min, new_watermark);
19900 multixri_pool->pvt_pool.high_watermark = new_watermark;
19901
19902#ifdef LPFC_MXP_STAT
19903 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19904 new_watermark);
19905#endif
19906}
19907
19908/**
19909 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
19910 * @phba: pointer to lpfc hba data structure.
19911 * @hwqid: belong to which HWQ.
19912 *
19913 * This routine is called from hearbeat timer when pvt_pool is idle.
19914 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
19915 * The first step moves (all - low_watermark) amount of XRIs.
19916 * The second step moves the rest of XRIs.
19917 **/
19918void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19919{
19920 struct lpfc_pbl_pool *pbl_pool;
19921 struct lpfc_pvt_pool *pvt_pool;
6a828b0f 19922 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
19923 struct lpfc_io_buf *lpfc_ncmd;
19924 struct lpfc_io_buf *lpfc_ncmd_next;
19925 unsigned long iflag;
19926 struct list_head tmp_list;
19927 u32 tmp_count;
19928
6a828b0f
JS
19929 qp = &phba->sli4_hba.hdwq[hwqid];
19930 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19931 pvt_pool = &qp->p_multixri_pool->pvt_pool;
c490850a
JS
19932 tmp_count = 0;
19933
6a828b0f
JS
19934 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19935 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
c490850a
JS
19936
19937 if (pvt_pool->count > pvt_pool->low_watermark) {
19938 /* Step 1: move (all - low_watermark) from pvt_pool
19939 * to pbl_pool
19940 */
19941
19942 /* Move low watermark of bufs from pvt_pool to tmp_list */
19943 INIT_LIST_HEAD(&tmp_list);
19944 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19945 &pvt_pool->list, list) {
19946 list_move_tail(&lpfc_ncmd->list, &tmp_list);
19947 tmp_count++;
19948 if (tmp_count >= pvt_pool->low_watermark)
19949 break;
19950 }
19951
19952 /* Move all bufs from pvt_pool to pbl_pool */
19953 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19954
19955 /* Move all bufs from tmp_list to pvt_pool */
19956 list_splice(&tmp_list, &pvt_pool->list);
19957
19958 pbl_pool->count += (pvt_pool->count - tmp_count);
19959 pvt_pool->count = tmp_count;
19960 } else {
19961 /* Step 2: move the rest from pvt_pool to pbl_pool */
19962 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19963 pbl_pool->count += pvt_pool->count;
19964 pvt_pool->count = 0;
19965 }
19966
19967 spin_unlock(&pvt_pool->lock);
19968 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19969}
19970
19971/**
19972 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
19973 * @phba: pointer to lpfc hba data structure
19974 * @pbl_pool: specified public free XRI pool
19975 * @pvt_pool: specified private free XRI pool
19976 * @count: number of XRIs to move
19977 *
19978 * This routine tries to move some free common bufs from the specified pbl_pool
19979 * to the specified pvt_pool. It might move less than count XRIs if there's not
19980 * enough in public pool.
19981 *
19982 * Return:
19983 * true - if XRIs are successfully moved from the specified pbl_pool to the
19984 * specified pvt_pool
19985 * false - if the specified pbl_pool is empty or locked by someone else
19986 **/
19987static bool
6a828b0f
JS
19988_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19989 struct lpfc_pbl_pool *pbl_pool,
c490850a
JS
19990 struct lpfc_pvt_pool *pvt_pool, u32 count)
19991{
19992 struct lpfc_io_buf *lpfc_ncmd;
19993 struct lpfc_io_buf *lpfc_ncmd_next;
19994 unsigned long iflag;
19995 int ret;
19996
19997 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
19998 if (ret) {
19999 if (pbl_pool->count) {
20000 /* Move a batch of XRIs from public to private pool */
6a828b0f 20001 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
c490850a
JS
20002 list_for_each_entry_safe(lpfc_ncmd,
20003 lpfc_ncmd_next,
20004 &pbl_pool->list,
20005 list) {
20006 list_move_tail(&lpfc_ncmd->list,
20007 &pvt_pool->list);
20008 pvt_pool->count++;
20009 pbl_pool->count--;
20010 count--;
20011 if (count == 0)
20012 break;
20013 }
20014
20015 spin_unlock(&pvt_pool->lock);
20016 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20017 return true;
20018 }
20019 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20020 }
20021
20022 return false;
20023}
20024
20025/**
20026 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20027 * @phba: pointer to lpfc hba data structure.
20028 * @hwqid: belong to which HWQ.
20029 * @count: number of XRIs to move
20030 *
20031 * This routine tries to find some free common bufs in one of public pools with
20032 * Round Robin method. The search always starts from local hwqid, then the next
20033 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20034 * a batch of free common bufs are moved to private pool on hwqid.
20035 * It might move less than count XRIs if there's not enough in public pool.
20036 **/
20037void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20038{
20039 struct lpfc_multixri_pool *multixri_pool;
20040 struct lpfc_multixri_pool *next_multixri_pool;
20041 struct lpfc_pvt_pool *pvt_pool;
20042 struct lpfc_pbl_pool *pbl_pool;
6a828b0f 20043 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
20044 u32 next_hwqid;
20045 u32 hwq_count;
20046 int ret;
20047
6a828b0f
JS
20048 qp = &phba->sli4_hba.hdwq[hwqid];
20049 multixri_pool = qp->p_multixri_pool;
c490850a
JS
20050 pvt_pool = &multixri_pool->pvt_pool;
20051 pbl_pool = &multixri_pool->pbl_pool;
20052
20053 /* Check if local pbl_pool is available */
6a828b0f 20054 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20055 if (ret) {
20056#ifdef LPFC_MXP_STAT
20057 multixri_pool->local_pbl_hit_count++;
20058#endif
20059 return;
20060 }
20061
20062 hwq_count = phba->cfg_hdw_queue;
20063
20064 /* Get the next hwqid which was found last time */
20065 next_hwqid = multixri_pool->rrb_next_hwqid;
20066
20067 do {
20068 /* Go to next hwq */
20069 next_hwqid = (next_hwqid + 1) % hwq_count;
20070
20071 next_multixri_pool =
20072 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20073 pbl_pool = &next_multixri_pool->pbl_pool;
20074
20075 /* Check if the public free xri pool is available */
20076 ret = _lpfc_move_xri_pbl_to_pvt(
6a828b0f 20077 phba, qp, pbl_pool, pvt_pool, count);
c490850a
JS
20078
20079 /* Exit while-loop if success or all hwqid are checked */
20080 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20081
20082 /* Starting point for the next time */
20083 multixri_pool->rrb_next_hwqid = next_hwqid;
20084
20085 if (!ret) {
20086 /* stats: all public pools are empty*/
20087 multixri_pool->pbl_empty_count++;
20088 }
20089
20090#ifdef LPFC_MXP_STAT
20091 if (ret) {
20092 if (next_hwqid == hwqid)
20093 multixri_pool->local_pbl_hit_count++;
20094 else
20095 multixri_pool->other_pbl_hit_count++;
20096 }
20097#endif
20098}
20099
20100/**
20101 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20102 * @phba: pointer to lpfc hba data structure.
20103 * @qp: belong to which HWQ.
20104 *
20105 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20106 * low watermark.
20107 **/
20108void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20109{
20110 struct lpfc_multixri_pool *multixri_pool;
20111 struct lpfc_pvt_pool *pvt_pool;
20112
20113 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20114 pvt_pool = &multixri_pool->pvt_pool;
20115
20116 if (pvt_pool->count < pvt_pool->low_watermark)
20117 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20118}
20119
20120/**
20121 * lpfc_release_io_buf - Return one IO buf back to free pool
20122 * @phba: pointer to lpfc hba data structure.
20123 * @lpfc_ncmd: IO buf to be returned.
20124 * @qp: belong to which HWQ.
20125 *
20126 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20127 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20128 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20129 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20130 * lpfc_io_buf_list_put.
20131 **/
20132void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20133 struct lpfc_sli4_hdw_queue *qp)
20134{
20135 unsigned long iflag;
20136 struct lpfc_pbl_pool *pbl_pool;
20137 struct lpfc_pvt_pool *pvt_pool;
20138 struct lpfc_epd_pool *epd_pool;
20139 u32 txcmplq_cnt;
20140 u32 xri_owned;
20141 u32 xri_limit;
20142 u32 abts_io_bufs;
20143
20144 /* MUST zero fields if buffer is reused by another protocol */
20145 lpfc_ncmd->nvmeCmd = NULL;
20146 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20147 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20148
20149 if (phba->cfg_xri_rebalancing) {
20150 if (lpfc_ncmd->expedite) {
20151 /* Return to expedite pool */
20152 epd_pool = &phba->epd_pool;
20153 spin_lock_irqsave(&epd_pool->lock, iflag);
20154 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20155 epd_pool->count++;
20156 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20157 return;
20158 }
20159
20160 /* Avoid invalid access if an IO sneaks in and is being rejected
20161 * just _after_ xri pools are destroyed in lpfc_offline.
20162 * Nothing much can be done at this point.
20163 */
20164 if (!qp->p_multixri_pool)
20165 return;
20166
20167 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20168 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20169
20170 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20171 abts_io_bufs = qp->abts_scsi_io_bufs;
20172 if (qp->nvme_wq) {
20173 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20174 abts_io_bufs += qp->abts_nvme_io_bufs;
20175 }
20176
20177 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20178 xri_limit = qp->p_multixri_pool->xri_limit;
20179
20180#ifdef LPFC_MXP_STAT
20181 if (xri_owned <= xri_limit)
20182 qp->p_multixri_pool->below_limit_count++;
20183 else
20184 qp->p_multixri_pool->above_limit_count++;
20185#endif
20186
20187 /* XRI goes to either public or private free xri pool
20188 * based on watermark and xri_limit
20189 */
20190 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20191 (xri_owned < xri_limit &&
20192 pvt_pool->count < pvt_pool->high_watermark)) {
6a828b0f
JS
20193 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20194 qp, free_pvt_pool);
c490850a
JS
20195 list_add_tail(&lpfc_ncmd->list,
20196 &pvt_pool->list);
20197 pvt_pool->count++;
20198 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20199 } else {
6a828b0f
JS
20200 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20201 qp, free_pub_pool);
c490850a
JS
20202 list_add_tail(&lpfc_ncmd->list,
20203 &pbl_pool->list);
20204 pbl_pool->count++;
20205 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20206 }
20207 } else {
6a828b0f
JS
20208 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20209 qp, free_xri);
c490850a
JS
20210 list_add_tail(&lpfc_ncmd->list,
20211 &qp->lpfc_io_buf_list_put);
20212 qp->put_io_bufs++;
20213 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20214 iflag);
20215 }
20216}
20217
20218/**
20219 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20220 * @phba: pointer to lpfc hba data structure.
20221 * @pvt_pool: pointer to private pool data structure.
20222 * @ndlp: pointer to lpfc nodelist data structure.
20223 *
20224 * This routine tries to get one free IO buf from private pool.
20225 *
20226 * Return:
20227 * pointer to one free IO buf - if private pool is not empty
20228 * NULL - if private pool is empty
20229 **/
20230static struct lpfc_io_buf *
20231lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
6a828b0f 20232 struct lpfc_sli4_hdw_queue *qp,
c490850a
JS
20233 struct lpfc_pvt_pool *pvt_pool,
20234 struct lpfc_nodelist *ndlp)
20235{
20236 struct lpfc_io_buf *lpfc_ncmd;
20237 struct lpfc_io_buf *lpfc_ncmd_next;
20238 unsigned long iflag;
20239
6a828b0f 20240 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
c490850a
JS
20241 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20242 &pvt_pool->list, list) {
20243 if (lpfc_test_rrq_active(
20244 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20245 continue;
20246 list_del(&lpfc_ncmd->list);
20247 pvt_pool->count--;
20248 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20249 return lpfc_ncmd;
20250 }
20251 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20252
20253 return NULL;
20254}
20255
20256/**
20257 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20258 * @phba: pointer to lpfc hba data structure.
20259 *
20260 * This routine tries to get one free IO buf from expedite pool.
20261 *
20262 * Return:
20263 * pointer to one free IO buf - if expedite pool is not empty
20264 * NULL - if expedite pool is empty
20265 **/
20266static struct lpfc_io_buf *
20267lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20268{
20269 struct lpfc_io_buf *lpfc_ncmd;
20270 struct lpfc_io_buf *lpfc_ncmd_next;
20271 unsigned long iflag;
20272 struct lpfc_epd_pool *epd_pool;
20273
20274 epd_pool = &phba->epd_pool;
20275 lpfc_ncmd = NULL;
20276
20277 spin_lock_irqsave(&epd_pool->lock, iflag);
20278 if (epd_pool->count > 0) {
20279 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20280 &epd_pool->list, list) {
20281 list_del(&lpfc_ncmd->list);
20282 epd_pool->count--;
20283 break;
20284 }
20285 }
20286 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20287
20288 return lpfc_ncmd;
20289}
20290
20291/**
20292 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20293 * @phba: pointer to lpfc hba data structure.
20294 * @ndlp: pointer to lpfc nodelist data structure.
20295 * @hwqid: belong to which HWQ
20296 * @expedite: 1 means this request is urgent.
20297 *
20298 * This routine will do the following actions and then return a pointer to
20299 * one free IO buf.
20300 *
20301 * 1. If private free xri count is empty, move some XRIs from public to
20302 * private pool.
20303 * 2. Get one XRI from private free xri pool.
20304 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20305 * get one free xri from expedite pool.
20306 *
20307 * Note: ndlp is only used on SCSI side for RRQ testing.
20308 * The caller should pass NULL for ndlp on NVME side.
20309 *
20310 * Return:
20311 * pointer to one free IO buf - if private pool is not empty
20312 * NULL - if private pool is empty
20313 **/
20314static struct lpfc_io_buf *
20315lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20316 struct lpfc_nodelist *ndlp,
20317 int hwqid, int expedite)
20318{
20319 struct lpfc_sli4_hdw_queue *qp;
20320 struct lpfc_multixri_pool *multixri_pool;
20321 struct lpfc_pvt_pool *pvt_pool;
20322 struct lpfc_io_buf *lpfc_ncmd;
20323
20324 qp = &phba->sli4_hba.hdwq[hwqid];
20325 lpfc_ncmd = NULL;
20326 multixri_pool = qp->p_multixri_pool;
20327 pvt_pool = &multixri_pool->pvt_pool;
20328 multixri_pool->io_req_count++;
20329
20330 /* If pvt_pool is empty, move some XRIs from public to private pool */
20331 if (pvt_pool->count == 0)
20332 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20333
20334 /* Get one XRI from private free xri pool */
6a828b0f 20335 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
c490850a
JS
20336
20337 if (lpfc_ncmd) {
20338 lpfc_ncmd->hdwq = qp;
20339 lpfc_ncmd->hdwq_no = hwqid;
20340 } else if (expedite) {
20341 /* If we fail to get one from pvt_pool and this is an expedite
20342 * request, get one free xri from expedite pool.
20343 */
20344 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20345 }
20346
20347 return lpfc_ncmd;
20348}
20349
20350static inline struct lpfc_io_buf *
20351lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20352{
20353 struct lpfc_sli4_hdw_queue *qp;
20354 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20355
20356 qp = &phba->sli4_hba.hdwq[idx];
20357 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20358 &qp->lpfc_io_buf_list_get, list) {
20359 if (lpfc_test_rrq_active(phba, ndlp,
20360 lpfc_cmd->cur_iocbq.sli4_lxritag))
20361 continue;
20362
20363 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20364 continue;
20365
20366 list_del_init(&lpfc_cmd->list);
20367 qp->get_io_bufs--;
20368 lpfc_cmd->hdwq = qp;
20369 lpfc_cmd->hdwq_no = idx;
20370 return lpfc_cmd;
20371 }
20372 return NULL;
20373}
20374
20375/**
20376 * lpfc_get_io_buf - Get one IO buffer from free pool
20377 * @phba: The HBA for which this call is being executed.
20378 * @ndlp: pointer to lpfc nodelist data structure.
20379 * @hwqid: belong to which HWQ
20380 * @expedite: 1 means this request is urgent.
20381 *
20382 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20383 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20384 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20385 *
20386 * Note: ndlp is only used on SCSI side for RRQ testing.
20387 * The caller should pass NULL for ndlp on NVME side.
20388 *
20389 * Return codes:
20390 * NULL - Error
20391 * Pointer to lpfc_io_buf - Success
20392 **/
20393struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20394 struct lpfc_nodelist *ndlp,
20395 u32 hwqid, int expedite)
20396{
20397 struct lpfc_sli4_hdw_queue *qp;
20398 unsigned long iflag;
20399 struct lpfc_io_buf *lpfc_cmd;
20400
20401 qp = &phba->sli4_hba.hdwq[hwqid];
20402 lpfc_cmd = NULL;
20403
20404 if (phba->cfg_xri_rebalancing)
20405 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20406 phba, ndlp, hwqid, expedite);
20407 else {
6a828b0f
JS
20408 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20409 qp, alloc_xri_get);
c490850a
JS
20410 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20411 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20412 if (!lpfc_cmd) {
6a828b0f
JS
20413 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20414 qp, alloc_xri_put);
c490850a
JS
20415 list_splice(&qp->lpfc_io_buf_list_put,
20416 &qp->lpfc_io_buf_list_get);
20417 qp->get_io_bufs += qp->put_io_bufs;
20418 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20419 qp->put_io_bufs = 0;
20420 spin_unlock(&qp->io_buf_list_put_lock);
20421 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20422 expedite)
20423 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20424 }
20425 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20426 }
20427
20428 return lpfc_cmd;
20429}