scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
[linux-block.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
128bddac 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
5a0e3ad6 28#include <linux/slab.h>
1c2ba475 29#include <linux/lockdep.h>
dea3101e 30
91886523 31#include <scsi/scsi.h>
dea3101e 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
f888ba3c 35#include <scsi/scsi_transport_fc.h>
da0436e9 36#include <scsi/fc/fc_fs.h>
0d878419 37#include <linux/aer.h>
1351e69f
JS
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
dea3101e 41
895427bd
JS
42#include <linux/nvme-fc-driver.h>
43
da0436e9 44#include "lpfc_hw4.h"
dea3101e 45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
da0436e9 47#include "lpfc_sli4.h"
ea2151b4 48#include "lpfc_nl.h"
dea3101e 49#include "lpfc_disc.h"
dea3101e 50#include "lpfc.h"
895427bd
JS
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
f358dd0c 53#include "lpfc_nvmet.h"
dea3101e 54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
858c9f6c 57#include "lpfc_debugfs.h"
04c68496 58#include "lpfc_vport.h"
61bda8f7 59#include "lpfc_version.h"
dea3101e 60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
4f774513
JS
69
70/* Provide function prototypes local to this module. */
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
6669f9bb
JS
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
ae9e28f3
JS
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
895427bd 81static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
0558056c 82 struct lpfc_cqe *);
895427bd 83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8a9d2e80 84 int);
f485c18d
DK
85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
e8d3c3b1
JS
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
895427bd
JS
89static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
0558056c 92
4f774513
JS
93static IOCB_t *
94lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95{
96 return &iocbq->iocb;
97}
98
48f8fdb4
JS
99#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
100/**
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
106 *
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
111 * lock.
112 **/
113void
114lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115{
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
118 int i;
119
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
121 *dest++ = *src++;
122}
123#else
124#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
125#endif
126
4f774513
JS
127/**
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
131 *
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
136 * -ENOMEM.
137 * The caller is expected to hold the hbalock when calling this routine.
138 **/
cd22d605 139static int
205e8240 140lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
4f774513 141{
2e90f4b5 142 union lpfc_wqe *temp_wqe;
4f774513
JS
143 struct lpfc_register doorbell;
144 uint32_t host_index;
027140ea 145 uint32_t idx;
1351e69f
JS
146 uint32_t i = 0;
147 uint8_t *tmp;
5cc167dd 148 u32 if_type;
4f774513 149
2e90f4b5
JS
150 /* sanity check on queue memory */
151 if (unlikely(!q))
152 return -ENOMEM;
153 temp_wqe = q->qe[q->host_index].wqe;
154
4f774513 155 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
b84daac9 158 q->WQ_overflow++;
cd22d605 159 return -EBUSY;
b84daac9
JS
160 }
161 q->WQ_posted++;
4f774513 162 /* set consumption flag every once in a while */
ff78d8f9 163 if (!((q->host_index + 1) % q->entry_repost))
f0d9bccc 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
04673e38
JS
165 else
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
fedd3b7b
JS
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
48f8fdb4 169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
1351e69f
JS
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
4c06619f
JS
172 tmp = (uint8_t *)temp_wqe;
173#ifdef __raw_writeq
1351e69f 174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
4c06619f
JS
175 __raw_writeq(*((uint64_t *)(tmp + i)),
176 q->dpp_regaddr + i);
177#else
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
180 q->dpp_regaddr + i);
181#endif
1351e69f
JS
182 }
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
6b3b3bdb 184 wmb();
4f774513
JS
185
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
027140ea
JS
188
189 q->host_index = idx;
4f774513
JS
190
191 /* Ring Doorbell */
192 doorbell.word0 = 0;
962bc51b 193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
1351e69f
JS
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
198 q->dpp_id);
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 q->queue_id);
201 } else {
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
1351e69f 203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
5cc167dd
JS
204
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 host_index);
1351e69f 211 }
962bc51b
JS
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
215 } else {
216 return -EINVAL;
217 }
218 writel(doorbell.word0, q->db_regaddr);
4f774513
JS
219
220 return 0;
221}
222
223/**
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
227 *
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers. This routine returns the number of entries that were consumed by
232 * the HBA.
233 **/
234static uint32_t
235lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
236{
237 uint32_t released = 0;
238
2e90f4b5
JS
239 /* sanity check on queue memory */
240 if (unlikely(!q))
241 return 0;
242
4f774513
JS
243 if (q->hba_index == index)
244 return 0;
245 do {
246 q->hba_index = ((q->hba_index + 1) % q->entry_count);
247 released++;
248 } while (q->hba_index != index);
249 return released;
250}
251
252/**
253 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
254 * @q: The Mailbox Queue to operate on.
255 * @wqe: The Mailbox Queue Entry to put on the Work queue.
256 *
257 * This routine will copy the contents of @mqe to the next available entry on
258 * the @q. This function will then ring the Work Queue Doorbell to signal the
259 * HBA to start processing the Work Queue Entry. This function returns 0 if
260 * successful. If no entries are available on @q then this function will return
261 * -ENOMEM.
262 * The caller is expected to hold the hbalock when calling this routine.
263 **/
264static uint32_t
265lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
266{
2e90f4b5 267 struct lpfc_mqe *temp_mqe;
4f774513 268 struct lpfc_register doorbell;
4f774513 269
2e90f4b5
JS
270 /* sanity check on queue memory */
271 if (unlikely(!q))
272 return -ENOMEM;
273 temp_mqe = q->qe[q->host_index].mqe;
274
4f774513
JS
275 /* If the host has not yet processed the next entry then we are done */
276 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
277 return -ENOMEM;
48f8fdb4 278 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
4f774513
JS
279 /* Save off the mailbox pointer for completion */
280 q->phba->mbox = (MAILBOX_t *)temp_mqe;
281
282 /* Update the host index before invoking device */
4f774513
JS
283 q->host_index = ((q->host_index + 1) % q->entry_count);
284
285 /* Ring Doorbell */
286 doorbell.word0 = 0;
287 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
288 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
289 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
4f774513
JS
290 return 0;
291}
292
293/**
294 * lpfc_sli4_mq_release - Updates internal hba index for MQ
295 * @q: The Mailbox Queue to operate on.
296 *
297 * This routine will update the HBA index of a queue to reflect consumption of
298 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
299 * an entry the host calls this function to update the queue's internal
300 * pointers. This routine returns the number of entries that were consumed by
301 * the HBA.
302 **/
303static uint32_t
304lpfc_sli4_mq_release(struct lpfc_queue *q)
305{
2e90f4b5
JS
306 /* sanity check on queue memory */
307 if (unlikely(!q))
308 return 0;
309
4f774513
JS
310 /* Clear the mailbox pointer for completion */
311 q->phba->mbox = NULL;
312 q->hba_index = ((q->hba_index + 1) % q->entry_count);
313 return 1;
314}
315
316/**
317 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
318 * @q: The Event Queue to get the first valid EQE from
319 *
320 * This routine will get the first valid Event Queue Entry from @q, update
321 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
322 * the Queue (no more work to do), or the Queue is full of EQEs that have been
323 * processed, but not popped back to the HBA then this routine will return NULL.
324 **/
325static struct lpfc_eqe *
326lpfc_sli4_eq_get(struct lpfc_queue *q)
327{
7365f6fd 328 struct lpfc_hba *phba;
2e90f4b5 329 struct lpfc_eqe *eqe;
027140ea 330 uint32_t idx;
2e90f4b5
JS
331
332 /* sanity check on queue memory */
333 if (unlikely(!q))
334 return NULL;
7365f6fd 335 phba = q->phba;
2e90f4b5 336 eqe = q->qe[q->hba_index].eqe;
4f774513
JS
337
338 /* If the next EQE is not valid then we are done */
7365f6fd 339 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
4f774513
JS
340 return NULL;
341 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
342 idx = ((q->hba_index + 1) % q->entry_count);
343 if (idx == q->host_index)
4f774513
JS
344 return NULL;
345
027140ea 346 q->hba_index = idx;
7365f6fd
JS
347 /* if the index wrapped around, toggle the valid bit */
348 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
349 q->qe_valid = (q->qe_valid) ? 0 : 1;
350
27f344eb
JS
351
352 /*
353 * insert barrier for instruction interlock : data from the hardware
354 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
355 * upon. Speculative instructions were allowing a bcopy at the start
356 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
357 * after our return, to copy data before the valid bit check above
358 * was done. As such, some of the copied data was stale. The barrier
359 * ensures the check is before any data is copied.
27f344eb
JS
360 */
361 mb();
4f774513
JS
362 return eqe;
363}
364
ba20c853
JS
365/**
366 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
367 * @q: The Event Queue to disable interrupts
368 *
369 **/
b71413dd 370inline void
ba20c853
JS
371lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
372{
373 struct lpfc_register doorbell;
374
375 doorbell.word0 = 0;
376 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
377 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
378 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
379 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
380 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
ba20c853
JS
382}
383
27d6ac0a
JS
384/**
385 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
386 * @q: The Event Queue to disable interrupts
387 *
388 **/
389inline void
390lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
391{
392 struct lpfc_register doorbell;
393
394 doorbell.word0 = 0;
aad59d5d 395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
27d6ac0a
JS
396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
397}
398
4f774513
JS
399/**
400 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
401 * @q: The Event Queue that the host has completed processing for.
402 * @arm: Indicates whether the host wants to arms this CQ.
403 *
404 * This routine will mark all Event Queue Entries on @q, from the last
405 * known completed entry to the last entry that was processed, as completed
406 * by clearing the valid bit for each completion queue entry. Then it will
407 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
408 * The internal host index in the @q will be updated by this routine to indicate
409 * that the host has finished processing the entries. The @arm parameter
410 * indicates that the queue should be rearmed when ringing the doorbell.
411 *
412 * This function will return the number of EQEs that were popped.
413 **/
414uint32_t
415lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
416{
417 uint32_t released = 0;
7365f6fd 418 struct lpfc_hba *phba;
4f774513
JS
419 struct lpfc_eqe *temp_eqe;
420 struct lpfc_register doorbell;
421
2e90f4b5
JS
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
7365f6fd 425 phba = q->phba;
2e90f4b5 426
4f774513
JS
427 /* while there are valid entries */
428 while (q->hba_index != q->host_index) {
7365f6fd
JS
429 if (!phba->sli4_hba.pc_sli4_params.eqav) {
430 temp_eqe = q->qe[q->host_index].eqe;
431 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
432 }
4f774513
JS
433 released++;
434 q->host_index = ((q->host_index + 1) % q->entry_count);
435 }
436 if (unlikely(released == 0 && !arm))
437 return 0;
438
439 /* ring doorbell for number popped */
440 doorbell.word0 = 0;
441 if (arm) {
442 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
443 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
444 }
445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
6b5151fd
JS
447 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
448 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
449 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
9dd35425 450 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
a747c9ce
JS
451 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
452 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
9dd35425 453 readl(q->phba->sli4_hba.EQDBregaddr);
4f774513
JS
454 return released;
455}
456
27d6ac0a
JS
457/**
458 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
459 * @q: The Event Queue that the host has completed processing for.
460 * @arm: Indicates whether the host wants to arms this CQ.
461 *
462 * This routine will mark all Event Queue Entries on @q, from the last
463 * known completed entry to the last entry that was processed, as completed
464 * by clearing the valid bit for each completion queue entry. Then it will
465 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
466 * The internal host index in the @q will be updated by this routine to indicate
467 * that the host has finished processing the entries. The @arm parameter
468 * indicates that the queue should be rearmed when ringing the doorbell.
469 *
470 * This function will return the number of EQEs that were popped.
471 **/
472uint32_t
473lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
474{
475 uint32_t released = 0;
7365f6fd 476 struct lpfc_hba *phba;
27d6ac0a
JS
477 struct lpfc_eqe *temp_eqe;
478 struct lpfc_register doorbell;
479
480 /* sanity check on queue memory */
481 if (unlikely(!q))
482 return 0;
7365f6fd 483 phba = q->phba;
27d6ac0a
JS
484
485 /* while there are valid entries */
486 while (q->hba_index != q->host_index) {
7365f6fd
JS
487 if (!phba->sli4_hba.pc_sli4_params.eqav) {
488 temp_eqe = q->qe[q->host_index].eqe;
489 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
490 }
27d6ac0a
JS
491 released++;
492 q->host_index = ((q->host_index + 1) % q->entry_count);
493 }
494 if (unlikely(released == 0 && !arm))
495 return 0;
496
497 /* ring doorbell for number popped */
498 doorbell.word0 = 0;
499 if (arm)
500 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
501 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
502 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
503 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
504 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
505 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
506 readl(q->phba->sli4_hba.EQDBregaddr);
507 return released;
508}
509
4f774513
JS
510/**
511 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
512 * @q: The Completion Queue to get the first valid CQE from
513 *
514 * This routine will get the first valid Completion Queue Entry from @q, update
515 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
516 * the Queue (no more work to do), or the Queue is full of CQEs that have been
517 * processed, but not popped back to the HBA then this routine will return NULL.
518 **/
519static struct lpfc_cqe *
520lpfc_sli4_cq_get(struct lpfc_queue *q)
521{
7365f6fd 522 struct lpfc_hba *phba;
4f774513 523 struct lpfc_cqe *cqe;
027140ea 524 uint32_t idx;
4f774513 525
2e90f4b5
JS
526 /* sanity check on queue memory */
527 if (unlikely(!q))
528 return NULL;
7365f6fd
JS
529 phba = q->phba;
530 cqe = q->qe[q->hba_index].cqe;
2e90f4b5 531
4f774513 532 /* If the next CQE is not valid then we are done */
7365f6fd 533 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
4f774513
JS
534 return NULL;
535 /* If the host has not yet processed the next entry then we are done */
027140ea
JS
536 idx = ((q->hba_index + 1) % q->entry_count);
537 if (idx == q->host_index)
4f774513
JS
538 return NULL;
539
027140ea 540 q->hba_index = idx;
7365f6fd
JS
541 /* if the index wrapped around, toggle the valid bit */
542 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
543 q->qe_valid = (q->qe_valid) ? 0 : 1;
27f344eb
JS
544
545 /*
546 * insert barrier for instruction interlock : data from the hardware
547 * must have the valid bit checked before it can be copied and acted
2ea259ee
JS
548 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
549 * instructions allowing action on content before valid bit checked,
550 * add barrier here as well. May not be needed as "content" is a
551 * single 32-bit entity here (vs multi word structure for cq's).
27f344eb
JS
552 */
553 mb();
4f774513
JS
554 return cqe;
555}
556
557/**
558 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
559 * @q: The Completion Queue that the host has completed processing for.
560 * @arm: Indicates whether the host wants to arms this CQ.
561 *
562 * This routine will mark all Completion queue entries on @q, from the last
563 * known completed entry to the last entry that was processed, as completed
564 * by clearing the valid bit for each completion queue entry. Then it will
565 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
566 * The internal host index in the @q will be updated by this routine to indicate
567 * that the host has finished processing the entries. The @arm parameter
568 * indicates that the queue should be rearmed when ringing the doorbell.
569 *
570 * This function will return the number of CQEs that were released.
571 **/
572uint32_t
573lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
574{
575 uint32_t released = 0;
7365f6fd 576 struct lpfc_hba *phba;
4f774513
JS
577 struct lpfc_cqe *temp_qe;
578 struct lpfc_register doorbell;
579
2e90f4b5
JS
580 /* sanity check on queue memory */
581 if (unlikely(!q))
582 return 0;
7365f6fd
JS
583 phba = q->phba;
584
4f774513
JS
585 /* while there are valid entries */
586 while (q->hba_index != q->host_index) {
7365f6fd
JS
587 if (!phba->sli4_hba.pc_sli4_params.cqav) {
588 temp_qe = q->qe[q->host_index].cqe;
589 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
590 }
4f774513
JS
591 released++;
592 q->host_index = ((q->host_index + 1) % q->entry_count);
593 }
594 if (unlikely(released == 0 && !arm))
595 return 0;
596
597 /* ring doorbell for number popped */
598 doorbell.word0 = 0;
599 if (arm)
600 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
601 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
602 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
6b5151fd
JS
603 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
604 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
605 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
9dd35425 606 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
4f774513
JS
607 return released;
608}
609
27d6ac0a
JS
610/**
611 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
612 * @q: The Completion Queue that the host has completed processing for.
613 * @arm: Indicates whether the host wants to arms this CQ.
614 *
615 * This routine will mark all Completion queue entries on @q, from the last
616 * known completed entry to the last entry that was processed, as completed
617 * by clearing the valid bit for each completion queue entry. Then it will
618 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
619 * The internal host index in the @q will be updated by this routine to indicate
620 * that the host has finished processing the entries. The @arm parameter
621 * indicates that the queue should be rearmed when ringing the doorbell.
622 *
623 * This function will return the number of CQEs that were released.
624 **/
625uint32_t
626lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
627{
628 uint32_t released = 0;
7365f6fd 629 struct lpfc_hba *phba;
27d6ac0a
JS
630 struct lpfc_cqe *temp_qe;
631 struct lpfc_register doorbell;
632
633 /* sanity check on queue memory */
634 if (unlikely(!q))
635 return 0;
7365f6fd
JS
636 phba = q->phba;
637
27d6ac0a
JS
638 /* while there are valid entries */
639 while (q->hba_index != q->host_index) {
7365f6fd
JS
640 if (!phba->sli4_hba.pc_sli4_params.cqav) {
641 temp_qe = q->qe[q->host_index].cqe;
642 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
643 }
27d6ac0a
JS
644 released++;
645 q->host_index = ((q->host_index + 1) % q->entry_count);
646 }
647 if (unlikely(released == 0 && !arm))
648 return 0;
649
650 /* ring doorbell for number popped */
651 doorbell.word0 = 0;
652 if (arm)
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
657 return released;
658}
659
4f774513
JS
660/**
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662 * @q: The Header Receive Queue to operate on.
663 * @wqe: The Receive Queue Entry to put on the Receive queue.
664 *
665 * This routine will copy the contents of @wqe to the next available entry on
666 * the @q. This function will then ring the Receive Queue Doorbell to signal the
667 * HBA to start processing the Receive Queue Entry. This function returns the
668 * index that the rqe was copied to if successful. If no entries are available
669 * on @q then this function will return -ENOMEM.
670 * The caller is expected to hold the hbalock when calling this routine.
671 **/
895427bd 672int
4f774513
JS
673lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
674 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
675{
2e90f4b5
JS
676 struct lpfc_rqe *temp_hrqe;
677 struct lpfc_rqe *temp_drqe;
4f774513 678 struct lpfc_register doorbell;
cbc5de1b
JS
679 int hq_put_index;
680 int dq_put_index;
4f774513 681
2e90f4b5
JS
682 /* sanity check on queue memory */
683 if (unlikely(!hq) || unlikely(!dq))
684 return -ENOMEM;
cbc5de1b
JS
685 hq_put_index = hq->host_index;
686 dq_put_index = dq->host_index;
687 temp_hrqe = hq->qe[hq_put_index].rqe;
688 temp_drqe = dq->qe[dq_put_index].rqe;
2e90f4b5 689
4f774513
JS
690 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
691 return -EINVAL;
cbc5de1b 692 if (hq_put_index != dq_put_index)
4f774513
JS
693 return -EINVAL;
694 /* If the host has not yet processed the next entry then we are done */
cbc5de1b 695 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
4f774513 696 return -EBUSY;
48f8fdb4
JS
697 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
698 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
4f774513
JS
699
700 /* Update the host index to point to the next slot */
cbc5de1b
JS
701 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
702 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
61f3d4bf 703 hq->RQ_buf_posted++;
4f774513
JS
704
705 /* Ring The Header Receive Queue Doorbell */
73d91e50 706 if (!(hq->host_index % hq->entry_repost)) {
4f774513 707 doorbell.word0 = 0;
962bc51b
JS
708 if (hq->db_format == LPFC_DB_RING_FORMAT) {
709 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
710 hq->entry_repost);
711 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
712 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
713 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
714 hq->entry_repost);
715 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
716 hq->host_index);
717 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
718 } else {
719 return -EINVAL;
720 }
721 writel(doorbell.word0, hq->db_regaddr);
4f774513 722 }
cbc5de1b 723 return hq_put_index;
4f774513
JS
724}
725
726/**
727 * lpfc_sli4_rq_release - Updates internal hba index for RQ
728 * @q: The Header Receive Queue to operate on.
729 *
730 * This routine will update the HBA index of a queue to reflect consumption of
731 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
732 * consumed an entry the host calls this function to update the queue's
733 * internal pointers. This routine returns the number of entries that were
734 * consumed by the HBA.
735 **/
736static uint32_t
737lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
738{
2e90f4b5
JS
739 /* sanity check on queue memory */
740 if (unlikely(!hq) || unlikely(!dq))
741 return 0;
742
4f774513
JS
743 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
744 return 0;
745 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
746 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
747 return 1;
748}
749
e59058c4 750/**
3621a710 751 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
754 *
755 * This function returns pointer to next command iocb entry
756 * in the command ring. The caller must hold hbalock to prevent
757 * other threads consume the next command iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
759 **/
ed957684
JS
760static inline IOCB_t *
761lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
762{
7e56aa25
JS
763 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
764 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
ed957684
JS
765}
766
e59058c4 767/**
3621a710 768 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
769 * @phba: Pointer to HBA context object.
770 * @pring: Pointer to driver SLI ring object.
771 *
772 * This function returns pointer to next response iocb entry
773 * in the response ring. The caller must hold hbalock to make sure
774 * that no other thread consume the next response iocb.
775 * SLI-2/SLI-3 provide different sized iocbs.
776 **/
ed957684
JS
777static inline IOCB_t *
778lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
779{
7e56aa25
JS
780 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
781 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
ed957684
JS
782}
783
e59058c4 784/**
3621a710 785 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
786 * @phba: Pointer to HBA context object.
787 *
788 * This function is called with hbalock held. This function
789 * allocates a new driver iocb object from the iocb pool. If the
790 * allocation is successful, it returns pointer to the newly
791 * allocated iocb object else it returns NULL.
792 **/
4f2e66c6 793struct lpfc_iocbq *
2e0fef85 794__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
795{
796 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
797 struct lpfc_iocbq * iocbq = NULL;
798
1c2ba475
JT
799 lockdep_assert_held(&phba->hbalock);
800
0bd4ca25 801 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
2a9bf3d0
JS
802 if (iocbq)
803 phba->iocb_cnt++;
804 if (phba->iocb_cnt > phba->iocb_max)
805 phba->iocb_max = phba->iocb_cnt;
0bd4ca25
JSEC
806 return iocbq;
807}
808
da0436e9
JS
809/**
810 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
811 * @phba: Pointer to HBA context object.
812 * @xritag: XRI value.
813 *
814 * This function clears the sglq pointer from the array of acive
815 * sglq's. The xritag that is passed in is used to index into the
816 * array. Before the xritag can be used it needs to be adjusted
817 * by subtracting the xribase.
818 *
819 * Returns sglq ponter = success, NULL = Failure.
820 **/
895427bd 821struct lpfc_sglq *
da0436e9
JS
822__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
823{
da0436e9 824 struct lpfc_sglq *sglq;
6d368e53
JS
825
826 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
827 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
da0436e9
JS
828 return sglq;
829}
830
831/**
832 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
833 * @phba: Pointer to HBA context object.
834 * @xritag: XRI value.
835 *
836 * This function returns the sglq pointer from the array of acive
837 * sglq's. The xritag that is passed in is used to index into the
838 * array. Before the xritag can be used it needs to be adjusted
839 * by subtracting the xribase.
840 *
841 * Returns sglq ponter = success, NULL = Failure.
842 **/
0f65ff68 843struct lpfc_sglq *
da0436e9
JS
844__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
845{
da0436e9 846 struct lpfc_sglq *sglq;
6d368e53
JS
847
848 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
da0436e9
JS
849 return sglq;
850}
851
19ca7609 852/**
1151e3ec 853 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
19ca7609
JS
854 * @phba: Pointer to HBA context object.
855 * @xritag: xri used in this exchange.
856 * @rrq: The RRQ to be cleared.
857 *
19ca7609 858 **/
1151e3ec
JS
859void
860lpfc_clr_rrq_active(struct lpfc_hba *phba,
861 uint16_t xritag,
862 struct lpfc_node_rrq *rrq)
19ca7609 863{
1151e3ec 864 struct lpfc_nodelist *ndlp = NULL;
19ca7609 865
1151e3ec
JS
866 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
867 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
19ca7609
JS
868
869 /* The target DID could have been swapped (cable swap)
870 * we should use the ndlp from the findnode if it is
871 * available.
872 */
1151e3ec 873 if ((!ndlp) && rrq->ndlp)
19ca7609
JS
874 ndlp = rrq->ndlp;
875
1151e3ec
JS
876 if (!ndlp)
877 goto out;
878
cff261f6 879 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
880 rrq->send_rrq = 0;
881 rrq->xritag = 0;
882 rrq->rrq_stop_time = 0;
883 }
1151e3ec 884out:
19ca7609
JS
885 mempool_free(rrq, phba->rrq_pool);
886}
887
888/**
889 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
890 * @phba: Pointer to HBA context object.
891 *
892 * This function is called with hbalock held. This function
893 * Checks if stop_time (ratov from setting rrq active) has
894 * been reached, if it has and the send_rrq flag is set then
895 * it will call lpfc_send_rrq. If the send_rrq flag is not set
896 * then it will just call the routine to clear the rrq and
897 * free the rrq resource.
898 * The timer is set to the next rrq that is going to expire before
899 * leaving the routine.
900 *
901 **/
902void
903lpfc_handle_rrq_active(struct lpfc_hba *phba)
904{
905 struct lpfc_node_rrq *rrq;
906 struct lpfc_node_rrq *nextrrq;
907 unsigned long next_time;
908 unsigned long iflags;
1151e3ec 909 LIST_HEAD(send_rrq);
19ca7609
JS
910
911 spin_lock_irqsave(&phba->hbalock, iflags);
912 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
256ec0d0 913 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
19ca7609 914 list_for_each_entry_safe(rrq, nextrrq,
1151e3ec
JS
915 &phba->active_rrq_list, list) {
916 if (time_after(jiffies, rrq->rrq_stop_time))
917 list_move(&rrq->list, &send_rrq);
918 else if (time_before(rrq->rrq_stop_time, next_time))
19ca7609
JS
919 next_time = rrq->rrq_stop_time;
920 }
921 spin_unlock_irqrestore(&phba->hbalock, iflags);
06918ac5
JS
922 if ((!list_empty(&phba->active_rrq_list)) &&
923 (!(phba->pport->load_flag & FC_UNLOADING)))
19ca7609 924 mod_timer(&phba->rrq_tmr, next_time);
1151e3ec
JS
925 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
926 list_del(&rrq->list);
927 if (!rrq->send_rrq)
928 /* this call will free the rrq */
929 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
930 else if (lpfc_send_rrq(phba, rrq)) {
931 /* if we send the rrq then the completion handler
932 * will clear the bit in the xribitmap.
933 */
934 lpfc_clr_rrq_active(phba, rrq->xritag,
935 rrq);
936 }
937 }
19ca7609
JS
938}
939
940/**
941 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
942 * @vport: Pointer to vport context object.
943 * @xri: The xri used in the exchange.
944 * @did: The targets DID for this exchange.
945 *
946 * returns NULL = rrq not found in the phba->active_rrq_list.
947 * rrq = rrq for this xri and target.
948 **/
949struct lpfc_node_rrq *
950lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
951{
952 struct lpfc_hba *phba = vport->phba;
953 struct lpfc_node_rrq *rrq;
954 struct lpfc_node_rrq *nextrrq;
955 unsigned long iflags;
956
957 if (phba->sli_rev != LPFC_SLI_REV4)
958 return NULL;
959 spin_lock_irqsave(&phba->hbalock, iflags);
960 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
961 if (rrq->vport == vport && rrq->xritag == xri &&
962 rrq->nlp_DID == did){
963 list_del(&rrq->list);
964 spin_unlock_irqrestore(&phba->hbalock, iflags);
965 return rrq;
966 }
967 }
968 spin_unlock_irqrestore(&phba->hbalock, iflags);
969 return NULL;
970}
971
972/**
973 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
974 * @vport: Pointer to vport context object.
1151e3ec
JS
975 * @ndlp: Pointer to the lpfc_node_list structure.
976 * If ndlp is NULL Remove all active RRQs for this vport from the
977 * phba->active_rrq_list and clear the rrq.
978 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
19ca7609
JS
979 **/
980void
1151e3ec 981lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
19ca7609
JS
982
983{
984 struct lpfc_hba *phba = vport->phba;
985 struct lpfc_node_rrq *rrq;
986 struct lpfc_node_rrq *nextrrq;
987 unsigned long iflags;
1151e3ec 988 LIST_HEAD(rrq_list);
19ca7609
JS
989
990 if (phba->sli_rev != LPFC_SLI_REV4)
991 return;
1151e3ec
JS
992 if (!ndlp) {
993 lpfc_sli4_vport_delete_els_xri_aborted(vport);
994 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
19ca7609 995 }
1151e3ec
JS
996 spin_lock_irqsave(&phba->hbalock, iflags);
997 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
998 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
999 list_move(&rrq->list, &rrq_list);
19ca7609 1000 spin_unlock_irqrestore(&phba->hbalock, iflags);
1151e3ec
JS
1001
1002 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1003 list_del(&rrq->list);
1004 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1005 }
19ca7609
JS
1006}
1007
19ca7609 1008/**
1151e3ec 1009 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
19ca7609
JS
1010 * @phba: Pointer to HBA context object.
1011 * @ndlp: Targets nodelist pointer for this exchange.
1012 * @xritag the xri in the bitmap to test.
1013 *
1014 * This function is called with hbalock held. This function
1015 * returns 0 = rrq not active for this xri
1016 * 1 = rrq is valid for this xri.
1017 **/
1151e3ec
JS
1018int
1019lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
19ca7609
JS
1020 uint16_t xritag)
1021{
1c2ba475 1022 lockdep_assert_held(&phba->hbalock);
19ca7609
JS
1023 if (!ndlp)
1024 return 0;
cff261f6
JS
1025 if (!ndlp->active_rrqs_xri_bitmap)
1026 return 0;
1027 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
19ca7609
JS
1028 return 1;
1029 else
1030 return 0;
1031}
1032
1033/**
1034 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1035 * @phba: Pointer to HBA context object.
1036 * @ndlp: nodelist pointer for this target.
1037 * @xritag: xri used in this exchange.
1038 * @rxid: Remote Exchange ID.
1039 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1040 *
1041 * This function takes the hbalock.
1042 * The active bit is always set in the active rrq xri_bitmap even
1043 * if there is no slot avaiable for the other rrq information.
1044 *
1045 * returns 0 rrq actived for this xri
1046 * < 0 No memory or invalid ndlp.
1047 **/
1048int
1049lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
b42c07c8 1050 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
19ca7609 1051{
19ca7609 1052 unsigned long iflags;
b42c07c8
JS
1053 struct lpfc_node_rrq *rrq;
1054 int empty;
1055
1056 if (!ndlp)
1057 return -EINVAL;
1058
1059 if (!phba->cfg_enable_rrq)
1060 return -EINVAL;
19ca7609
JS
1061
1062 spin_lock_irqsave(&phba->hbalock, iflags);
b42c07c8
JS
1063 if (phba->pport->load_flag & FC_UNLOADING) {
1064 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1065 goto out;
1066 }
1067
1068 /*
1069 * set the active bit even if there is no mem available.
1070 */
1071 if (NLP_CHK_FREE_REQ(ndlp))
1072 goto out;
1073
1074 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1075 goto out;
1076
cff261f6
JS
1077 if (!ndlp->active_rrqs_xri_bitmap)
1078 goto out;
1079
1080 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
b42c07c8
JS
1081 goto out;
1082
19ca7609 1083 spin_unlock_irqrestore(&phba->hbalock, iflags);
b42c07c8
JS
1084 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1085 if (!rrq) {
1086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1087 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1088 " DID:0x%x Send:%d\n",
1089 xritag, rxid, ndlp->nlp_DID, send_rrq);
1090 return -EINVAL;
1091 }
e5771b4d
JS
1092 if (phba->cfg_enable_rrq == 1)
1093 rrq->send_rrq = send_rrq;
1094 else
1095 rrq->send_rrq = 0;
b42c07c8 1096 rrq->xritag = xritag;
256ec0d0
JS
1097 rrq->rrq_stop_time = jiffies +
1098 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
b42c07c8
JS
1099 rrq->ndlp = ndlp;
1100 rrq->nlp_DID = ndlp->nlp_DID;
1101 rrq->vport = ndlp->vport;
1102 rrq->rxid = rxid;
b42c07c8
JS
1103 spin_lock_irqsave(&phba->hbalock, iflags);
1104 empty = list_empty(&phba->active_rrq_list);
1105 list_add_tail(&rrq->list, &phba->active_rrq_list);
1106 phba->hba_flag |= HBA_RRQ_ACTIVE;
1107 if (empty)
1108 lpfc_worker_wake_up(phba);
1109 spin_unlock_irqrestore(&phba->hbalock, iflags);
1110 return 0;
1111out:
1112 spin_unlock_irqrestore(&phba->hbalock, iflags);
1113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1114 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1115 " DID:0x%x Send:%d\n",
1116 xritag, rxid, ndlp->nlp_DID, send_rrq);
1117 return -EINVAL;
19ca7609
JS
1118}
1119
da0436e9 1120/**
895427bd 1121 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
da0436e9 1122 * @phba: Pointer to HBA context object.
19ca7609 1123 * @piocb: Pointer to the iocbq.
da0436e9 1124 *
dafe8cea 1125 * This function is called with the ring lock held. This function
6d368e53 1126 * gets a new driver sglq object from the sglq list. If the
da0436e9
JS
1127 * list is not empty then it is successful, it returns pointer to the newly
1128 * allocated sglq object else it returns NULL.
1129 **/
1130static struct lpfc_sglq *
895427bd 1131__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
da0436e9 1132{
895427bd 1133 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
da0436e9 1134 struct lpfc_sglq *sglq = NULL;
19ca7609 1135 struct lpfc_sglq *start_sglq = NULL;
19ca7609
JS
1136 struct lpfc_scsi_buf *lpfc_cmd;
1137 struct lpfc_nodelist *ndlp;
1138 int found = 0;
1139
1c2ba475
JT
1140 lockdep_assert_held(&phba->hbalock);
1141
19ca7609
JS
1142 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1143 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1144 ndlp = lpfc_cmd->rdata->pnode;
be858b65 1145 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
6c7cf486 1146 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
19ca7609 1147 ndlp = piocbq->context_un.ndlp;
6c7cf486
JS
1148 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1149 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1150 ndlp = NULL;
1151 else
1152 ndlp = piocbq->context_un.ndlp;
1153 } else {
19ca7609 1154 ndlp = piocbq->context1;
6c7cf486 1155 }
19ca7609 1156
895427bd
JS
1157 spin_lock(&phba->sli4_hba.sgl_list_lock);
1158 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
19ca7609
JS
1159 start_sglq = sglq;
1160 while (!found) {
1161 if (!sglq)
d11f54b7 1162 break;
895427bd
JS
1163 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1164 test_bit(sglq->sli4_lxritag,
1165 ndlp->active_rrqs_xri_bitmap)) {
19ca7609
JS
1166 /* This xri has an rrq outstanding for this DID.
1167 * put it back in the list and get another xri.
1168 */
895427bd 1169 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609 1170 sglq = NULL;
895427bd 1171 list_remove_head(lpfc_els_sgl_list, sglq,
19ca7609
JS
1172 struct lpfc_sglq, list);
1173 if (sglq == start_sglq) {
14041bd1 1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
19ca7609
JS
1175 sglq = NULL;
1176 break;
1177 } else
1178 continue;
1179 }
1180 sglq->ndlp = ndlp;
1181 found = 1;
6d368e53 1182 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
19ca7609
JS
1183 sglq->state = SGL_ALLOCATED;
1184 }
895427bd 1185 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1186 return sglq;
1187}
1188
f358dd0c
JS
1189/**
1190 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1191 * @phba: Pointer to HBA context object.
1192 * @piocb: Pointer to the iocbq.
1193 *
1194 * This function is called with the sgl_list lock held. This function
1195 * gets a new driver sglq object from the sglq list. If the
1196 * list is not empty then it is successful, it returns pointer to the newly
1197 * allocated sglq object else it returns NULL.
1198 **/
1199struct lpfc_sglq *
1200__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1201{
1202 struct list_head *lpfc_nvmet_sgl_list;
1203 struct lpfc_sglq *sglq = NULL;
1204
1205 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1206
1207 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1208
1209 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1210 if (!sglq)
1211 return NULL;
1212 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1213 sglq->state = SGL_ALLOCATED;
da0436e9
JS
1214 return sglq;
1215}
1216
e59058c4 1217/**
3621a710 1218 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
1219 * @phba: Pointer to HBA context object.
1220 *
1221 * This function is called with no lock held. This function
1222 * allocates a new driver iocb object from the iocb pool. If the
1223 * allocation is successful, it returns pointer to the newly
1224 * allocated iocb object else it returns NULL.
1225 **/
2e0fef85
JS
1226struct lpfc_iocbq *
1227lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1228{
1229 struct lpfc_iocbq * iocbq = NULL;
1230 unsigned long iflags;
1231
1232 spin_lock_irqsave(&phba->hbalock, iflags);
1233 iocbq = __lpfc_sli_get_iocbq(phba);
1234 spin_unlock_irqrestore(&phba->hbalock, iflags);
1235 return iocbq;
1236}
1237
4f774513
JS
1238/**
1239 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1240 * @phba: Pointer to HBA context object.
1241 * @iocbq: Pointer to driver iocb object.
1242 *
1243 * This function is called with hbalock held to release driver
1244 * iocb object to the iocb pool. The iotag in the iocb object
1245 * does not change for each use of the iocb object. This function
1246 * clears all other fields of the iocb object when it is freed.
1247 * The sqlq structure that holds the xritag and phys and virtual
1248 * mappings for the scatter gather list is retrieved from the
1249 * active array of sglq. The get of the sglq pointer also clears
1250 * the entry in the array. If the status of the IO indiactes that
1251 * this IO was aborted then the sglq entry it put on the
1252 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1253 * IO has good status or fails for any other reason then the sglq
895427bd 1254 * entry is added to the free list (lpfc_els_sgl_list).
4f774513
JS
1255 **/
1256static void
1257__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1258{
1259 struct lpfc_sglq *sglq;
1260 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
2a9bf3d0 1261 unsigned long iflag = 0;
895427bd 1262 struct lpfc_sli_ring *pring;
4f774513 1263
1c2ba475
JT
1264 lockdep_assert_held(&phba->hbalock);
1265
4f774513
JS
1266 if (iocbq->sli4_xritag == NO_XRI)
1267 sglq = NULL;
1268 else
6d368e53
JS
1269 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1270
0e9bb8d7 1271
4f774513 1272 if (sglq) {
f358dd0c
JS
1273 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1275 iflag);
1276 sglq->state = SGL_FREED;
1277 sglq->ndlp = NULL;
1278 list_add_tail(&sglq->list,
1279 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1280 spin_unlock_irqrestore(
1281 &phba->sli4_hba.sgl_list_lock, iflag);
1282 goto out;
1283 }
1284
895427bd 1285 pring = phba->sli4_hba.els_wq->pring;
0f65ff68
JS
1286 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1287 (sglq->state != SGL_XRI_ABORTED)) {
895427bd
JS
1288 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1289 iflag);
4f774513 1290 list_add(&sglq->list,
895427bd 1291 &phba->sli4_hba.lpfc_abts_els_sgl_list);
4f774513 1292 spin_unlock_irqrestore(
895427bd 1293 &phba->sli4_hba.sgl_list_lock, iflag);
0f65ff68 1294 } else {
895427bd
JS
1295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1296 iflag);
0f65ff68 1297 sglq->state = SGL_FREED;
19ca7609 1298 sglq->ndlp = NULL;
fedd3b7b 1299 list_add_tail(&sglq->list,
895427bd
JS
1300 &phba->sli4_hba.lpfc_els_sgl_list);
1301 spin_unlock_irqrestore(
1302 &phba->sli4_hba.sgl_list_lock, iflag);
2a9bf3d0
JS
1303
1304 /* Check if TXQ queue needs to be serviced */
0e9bb8d7 1305 if (!list_empty(&pring->txq))
2a9bf3d0 1306 lpfc_worker_wake_up(phba);
0f65ff68 1307 }
4f774513
JS
1308 }
1309
f358dd0c 1310out:
4f774513
JS
1311 /*
1312 * Clean all volatile data fields, preserve iotag and node struct.
1313 */
1314 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
6d368e53 1315 iocbq->sli4_lxritag = NO_XRI;
4f774513 1316 iocbq->sli4_xritag = NO_XRI;
f358dd0c
JS
1317 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1318 LPFC_IO_NVME_LS);
4f774513
JS
1319 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1320}
1321
2a9bf3d0 1322
e59058c4 1323/**
3772a991 1324 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
1325 * @phba: Pointer to HBA context object.
1326 * @iocbq: Pointer to driver iocb object.
1327 *
1328 * This function is called with hbalock held to release driver
1329 * iocb object to the iocb pool. The iotag in the iocb object
1330 * does not change for each use of the iocb object. This function
1331 * clears all other fields of the iocb object when it is freed.
1332 **/
a6ababd2 1333static void
3772a991 1334__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 1335{
2e0fef85 1336 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30 1337
1c2ba475 1338 lockdep_assert_held(&phba->hbalock);
0e9bb8d7 1339
604a3e30
JB
1340 /*
1341 * Clean all volatile data fields, preserve iotag and node struct.
1342 */
1343 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 1344 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
1345 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1346}
1347
3772a991
JS
1348/**
1349 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1350 * @phba: Pointer to HBA context object.
1351 * @iocbq: Pointer to driver iocb object.
1352 *
1353 * This function is called with hbalock held to release driver
1354 * iocb object to the iocb pool. The iotag in the iocb object
1355 * does not change for each use of the iocb object. This function
1356 * clears all other fields of the iocb object when it is freed.
1357 **/
1358static void
1359__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1360{
1c2ba475
JT
1361 lockdep_assert_held(&phba->hbalock);
1362
3772a991 1363 phba->__lpfc_sli_release_iocbq(phba, iocbq);
2a9bf3d0 1364 phba->iocb_cnt--;
3772a991
JS
1365}
1366
e59058c4 1367/**
3621a710 1368 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
1369 * @phba: Pointer to HBA context object.
1370 * @iocbq: Pointer to driver iocb object.
1371 *
1372 * This function is called with no lock held to release the iocb to
1373 * iocb pool.
1374 **/
2e0fef85
JS
1375void
1376lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1377{
1378 unsigned long iflags;
1379
1380 /*
1381 * Clean all volatile data fields, preserve iotag and node struct.
1382 */
1383 spin_lock_irqsave(&phba->hbalock, iflags);
1384 __lpfc_sli_release_iocbq(phba, iocbq);
1385 spin_unlock_irqrestore(&phba->hbalock, iflags);
1386}
1387
a257bf90
JS
1388/**
1389 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1390 * @phba: Pointer to HBA context object.
1391 * @iocblist: List of IOCBs.
1392 * @ulpstatus: ULP status in IOCB command field.
1393 * @ulpWord4: ULP word-4 in IOCB command field.
1394 *
1395 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1396 * on the list by invoking the complete callback function associated with the
1397 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1398 * fields.
1399 **/
1400void
1401lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1402 uint32_t ulpstatus, uint32_t ulpWord4)
1403{
1404 struct lpfc_iocbq *piocb;
1405
1406 while (!list_empty(iocblist)) {
1407 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
a257bf90
JS
1408 if (!piocb->iocb_cmpl)
1409 lpfc_sli_release_iocbq(phba, piocb);
1410 else {
1411 piocb->iocb.ulpStatus = ulpstatus;
1412 piocb->iocb.un.ulpWord[4] = ulpWord4;
1413 (piocb->iocb_cmpl) (phba, piocb, piocb);
1414 }
1415 }
1416 return;
1417}
1418
e59058c4 1419/**
3621a710
JS
1420 * lpfc_sli_iocb_cmd_type - Get the iocb type
1421 * @iocb_cmnd: iocb command code.
e59058c4
JS
1422 *
1423 * This function is called by ring event handler function to get the iocb type.
1424 * This function translates the iocb command to an iocb command type used to
1425 * decide the final disposition of each completed IOCB.
1426 * The function returns
1427 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1428 * LPFC_SOL_IOCB if it is a solicited iocb completion
1429 * LPFC_ABORT_IOCB if it is an abort iocb
1430 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1431 *
1432 * The caller is not required to hold any lock.
1433 **/
dea3101e 1434static lpfc_iocb_type
1435lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1436{
1437 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1438
1439 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1440 return 0;
1441
1442 switch (iocb_cmnd) {
1443 case CMD_XMIT_SEQUENCE_CR:
1444 case CMD_XMIT_SEQUENCE_CX:
1445 case CMD_XMIT_BCAST_CN:
1446 case CMD_XMIT_BCAST_CX:
1447 case CMD_ELS_REQUEST_CR:
1448 case CMD_ELS_REQUEST_CX:
1449 case CMD_CREATE_XRI_CR:
1450 case CMD_CREATE_XRI_CX:
1451 case CMD_GET_RPI_CN:
1452 case CMD_XMIT_ELS_RSP_CX:
1453 case CMD_GET_RPI_CR:
1454 case CMD_FCP_IWRITE_CR:
1455 case CMD_FCP_IWRITE_CX:
1456 case CMD_FCP_IREAD_CR:
1457 case CMD_FCP_IREAD_CX:
1458 case CMD_FCP_ICMND_CR:
1459 case CMD_FCP_ICMND_CX:
f5603511
JS
1460 case CMD_FCP_TSEND_CX:
1461 case CMD_FCP_TRSP_CX:
1462 case CMD_FCP_TRECEIVE_CX:
1463 case CMD_FCP_AUTO_TRSP_CX:
dea3101e 1464 case CMD_ADAPTER_MSG:
1465 case CMD_ADAPTER_DUMP:
1466 case CMD_XMIT_SEQUENCE64_CR:
1467 case CMD_XMIT_SEQUENCE64_CX:
1468 case CMD_XMIT_BCAST64_CN:
1469 case CMD_XMIT_BCAST64_CX:
1470 case CMD_ELS_REQUEST64_CR:
1471 case CMD_ELS_REQUEST64_CX:
1472 case CMD_FCP_IWRITE64_CR:
1473 case CMD_FCP_IWRITE64_CX:
1474 case CMD_FCP_IREAD64_CR:
1475 case CMD_FCP_IREAD64_CX:
1476 case CMD_FCP_ICMND64_CR:
1477 case CMD_FCP_ICMND64_CX:
f5603511
JS
1478 case CMD_FCP_TSEND64_CX:
1479 case CMD_FCP_TRSP64_CX:
1480 case CMD_FCP_TRECEIVE64_CX:
dea3101e 1481 case CMD_GEN_REQUEST64_CR:
1482 case CMD_GEN_REQUEST64_CX:
1483 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
1484 case DSSCMD_IWRITE64_CR:
1485 case DSSCMD_IWRITE64_CX:
1486 case DSSCMD_IREAD64_CR:
1487 case DSSCMD_IREAD64_CX:
dea3101e 1488 type = LPFC_SOL_IOCB;
1489 break;
1490 case CMD_ABORT_XRI_CN:
1491 case CMD_ABORT_XRI_CX:
1492 case CMD_CLOSE_XRI_CN:
1493 case CMD_CLOSE_XRI_CX:
1494 case CMD_XRI_ABORTED_CX:
1495 case CMD_ABORT_MXRI64_CN:
6669f9bb 1496 case CMD_XMIT_BLS_RSP64_CX:
dea3101e 1497 type = LPFC_ABORT_IOCB;
1498 break;
1499 case CMD_RCV_SEQUENCE_CX:
1500 case CMD_RCV_ELS_REQ_CX:
1501 case CMD_RCV_SEQUENCE64_CX:
1502 case CMD_RCV_ELS_REQ64_CX:
57127f15 1503 case CMD_ASYNC_STATUS:
ed957684
JS
1504 case CMD_IOCB_RCV_SEQ64_CX:
1505 case CMD_IOCB_RCV_ELS64_CX:
1506 case CMD_IOCB_RCV_CONT64_CX:
3163f725 1507 case CMD_IOCB_RET_XRI64_CX:
dea3101e 1508 type = LPFC_UNSOL_IOCB;
1509 break;
3163f725
JS
1510 case CMD_IOCB_XMIT_MSEQ64_CR:
1511 case CMD_IOCB_XMIT_MSEQ64_CX:
1512 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1513 case CMD_IOCB_RCV_ELS_LIST64_CX:
1514 case CMD_IOCB_CLOSE_EXTENDED_CN:
1515 case CMD_IOCB_ABORT_EXTENDED_CN:
1516 case CMD_IOCB_RET_HBQE64_CN:
1517 case CMD_IOCB_FCP_IBIDIR64_CR:
1518 case CMD_IOCB_FCP_IBIDIR64_CX:
1519 case CMD_IOCB_FCP_ITASKMGT64_CX:
1520 case CMD_IOCB_LOGENTRY_CN:
1521 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1522 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 1523 __func__, iocb_cmnd);
3163f725
JS
1524 type = LPFC_UNKNOWN_IOCB;
1525 break;
dea3101e 1526 default:
1527 type = LPFC_UNKNOWN_IOCB;
1528 break;
1529 }
1530
1531 return type;
1532}
1533
e59058c4 1534/**
3621a710 1535 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
1536 * @phba: Pointer to HBA context object.
1537 *
1538 * This function is called from SLI initialization code
1539 * to configure every ring of the HBA's SLI interface. The
1540 * caller is not required to hold any lock. This function issues
1541 * a config_ring mailbox command for each ring.
1542 * This function returns zero if successful else returns a negative
1543 * error code.
1544 **/
dea3101e 1545static int
ed957684 1546lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e 1547{
1548 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
1549 LPFC_MBOXQ_t *pmb;
1550 MAILBOX_t *pmbox;
1551 int i, rc, ret = 0;
dea3101e 1552
ed957684
JS
1553 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1554 if (!pmb)
1555 return -ENOMEM;
04c68496 1556 pmbox = &pmb->u.mb;
ed957684 1557 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 1558 for (i = 0; i < psli->num_rings; i++) {
dea3101e 1559 lpfc_config_ring(phba, i, pmb);
1560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1561 if (rc != MBX_SUCCESS) {
92d7f7b0 1562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1563 "0446 Adapter failed to init (%d), "
dea3101e 1564 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1565 "ring %d\n",
e8b62011
JS
1566 rc, pmbox->mbxCommand,
1567 pmbox->mbxStatus, i);
2e0fef85 1568 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
1569 ret = -ENXIO;
1570 break;
dea3101e 1571 }
1572 }
ed957684
JS
1573 mempool_free(pmb, phba->mbox_mem_pool);
1574 return ret;
dea3101e 1575}
1576
e59058c4 1577/**
3621a710 1578 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
1579 * @phba: Pointer to HBA context object.
1580 * @pring: Pointer to driver SLI ring object.
1581 * @piocb: Pointer to the driver iocb object.
1582 *
1583 * This function is called with hbalock held. The function adds the
1584 * new iocb to txcmplq of the given ring. This function always returns
1585 * 0. If this function is called for ELS ring, this function checks if
1586 * there is a vport associated with the ELS command. This function also
1587 * starts els_tmofunc timer if this is an ELS command.
1588 **/
dea3101e 1589static int
2e0fef85
JS
1590lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1591 struct lpfc_iocbq *piocb)
dea3101e 1592{
1c2ba475
JT
1593 lockdep_assert_held(&phba->hbalock);
1594
2319f847 1595 BUG_ON(!piocb);
22466da5 1596
dea3101e 1597 list_add_tail(&piocb->list, &pring->txcmplq);
4f2e66c6 1598 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
2a9bf3d0 1599
92d7f7b0
JS
1600 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1601 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2319f847
MFO
1602 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1603 BUG_ON(!piocb->vport);
1604 if (!(piocb->vport->load_flag & FC_UNLOADING))
1605 mod_timer(&piocb->vport->els_tmofunc,
1606 jiffies +
1607 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1608 }
dea3101e 1609
2e0fef85 1610 return 0;
dea3101e 1611}
1612
e59058c4 1613/**
3621a710 1614 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
1615 * @phba: Pointer to HBA context object.
1616 * @pring: Pointer to driver SLI ring object.
1617 *
1618 * This function is called with hbalock held to get next
1619 * iocb in txq of the given ring. If there is any iocb in
1620 * the txq, the function returns first iocb in the list after
1621 * removing the iocb from the list, else it returns NULL.
1622 **/
2a9bf3d0 1623struct lpfc_iocbq *
2e0fef85 1624lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1625{
dea3101e 1626 struct lpfc_iocbq *cmd_iocb;
1627
1c2ba475
JT
1628 lockdep_assert_held(&phba->hbalock);
1629
858c9f6c 1630 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
2e0fef85 1631 return cmd_iocb;
dea3101e 1632}
1633
e59058c4 1634/**
3621a710 1635 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
1636 * @phba: Pointer to HBA context object.
1637 * @pring: Pointer to driver SLI ring object.
1638 *
1639 * This function is called with hbalock held and the caller must post the
1640 * iocb without releasing the lock. If the caller releases the lock,
1641 * iocb slot returned by the function is not guaranteed to be available.
1642 * The function returns pointer to the next available iocb slot if there
1643 * is available slot in the ring, else it returns NULL.
1644 * If the get index of the ring is ahead of the put index, the function
1645 * will post an error attention event to the worker thread to take the
1646 * HBA to offline state.
1647 **/
dea3101e 1648static IOCB_t *
1649lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1650{
34b02dcd 1651 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
7e56aa25 1652 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1c2ba475
JT
1653
1654 lockdep_assert_held(&phba->hbalock);
1655
7e56aa25
JS
1656 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1657 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1658 pring->sli.sli3.next_cmdidx = 0;
dea3101e 1659
7e56aa25
JS
1660 if (unlikely(pring->sli.sli3.local_getidx ==
1661 pring->sli.sli3.next_cmdidx)) {
dea3101e 1662
7e56aa25 1663 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 1664
7e56aa25 1665 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea3101e 1666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 1667 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 1668 "is bigger than cmd ring %d\n",
e8b62011 1669 pring->ringno,
7e56aa25
JS
1670 pring->sli.sli3.local_getidx,
1671 max_cmd_idx);
dea3101e 1672
2e0fef85 1673 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1674 /*
1675 * All error attention handlers are posted to
1676 * worker thread
1677 */
1678 phba->work_ha |= HA_ERATT;
1679 phba->work_hs = HS_FFER3;
92d7f7b0 1680
5e9d9b82 1681 lpfc_worker_wake_up(phba);
dea3101e 1682
1683 return NULL;
1684 }
1685
7e56aa25 1686 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea3101e 1687 return NULL;
1688 }
1689
ed957684 1690 return lpfc_cmd_iocb(phba, pring);
dea3101e 1691}
1692
e59058c4 1693/**
3621a710 1694 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
1695 * @phba: Pointer to HBA context object.
1696 * @iocbq: Pointer to driver iocb object.
1697 *
1698 * This function gets an iotag for the iocb. If there is no unused iotag and
1699 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1700 * array and assigns a new iotag.
1701 * The function returns the allocated iotag if successful, else returns zero.
1702 * Zero is not a valid iotag.
1703 * The caller is not required to hold any lock.
1704 **/
604a3e30 1705uint16_t
2e0fef85 1706lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 1707{
2e0fef85
JS
1708 struct lpfc_iocbq **new_arr;
1709 struct lpfc_iocbq **old_arr;
604a3e30
JB
1710 size_t new_len;
1711 struct lpfc_sli *psli = &phba->sli;
1712 uint16_t iotag;
dea3101e 1713
2e0fef85 1714 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1715 iotag = psli->last_iotag;
1716 if(++iotag < psli->iocbq_lookup_len) {
1717 psli->last_iotag = iotag;
1718 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1719 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1720 iocbq->iotag = iotag;
1721 return iotag;
2e0fef85 1722 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
1723 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1724 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85 1725 spin_unlock_irq(&phba->hbalock);
6396bb22 1726 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
604a3e30
JB
1727 GFP_KERNEL);
1728 if (new_arr) {
2e0fef85 1729 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1730 old_arr = psli->iocbq_lookup;
1731 if (new_len <= psli->iocbq_lookup_len) {
1732 /* highly unprobable case */
1733 kfree(new_arr);
1734 iotag = psli->last_iotag;
1735 if(++iotag < psli->iocbq_lookup_len) {
1736 psli->last_iotag = iotag;
1737 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1738 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1739 iocbq->iotag = iotag;
1740 return iotag;
1741 }
2e0fef85 1742 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1743 return 0;
1744 }
1745 if (psli->iocbq_lookup)
1746 memcpy(new_arr, old_arr,
1747 ((psli->last_iotag + 1) *
311464ec 1748 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1749 psli->iocbq_lookup = new_arr;
1750 psli->iocbq_lookup_len = new_len;
1751 psli->last_iotag = iotag;
1752 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1753 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1754 iocbq->iotag = iotag;
1755 kfree(old_arr);
1756 return iotag;
1757 }
8f6d98d2 1758 } else
2e0fef85 1759 spin_unlock_irq(&phba->hbalock);
dea3101e 1760
bc73905a 1761 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
1762 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1763 psli->last_iotag);
dea3101e 1764
604a3e30 1765 return 0;
dea3101e 1766}
1767
e59058c4 1768/**
3621a710 1769 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1770 * @phba: Pointer to HBA context object.
1771 * @pring: Pointer to driver SLI ring object.
1772 * @iocb: Pointer to iocb slot in the ring.
1773 * @nextiocb: Pointer to driver iocb object which need to be
1774 * posted to firmware.
1775 *
1776 * This function is called with hbalock held to post a new iocb to
1777 * the firmware. This function copies the new iocb to ring iocb slot and
1778 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1779 * a completion call back for this iocb else the function will free the
1780 * iocb object.
1781 **/
dea3101e 1782static void
1783lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1784 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1785{
1c2ba475 1786 lockdep_assert_held(&phba->hbalock);
dea3101e 1787 /*
604a3e30 1788 * Set up an iotag
dea3101e 1789 */
604a3e30 1790 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1791
e2a0a9d6 1792
a58cbd52
JS
1793 if (pring->ringno == LPFC_ELS_RING) {
1794 lpfc_debugfs_slow_ring_trc(phba,
1795 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1796 *(((uint32_t *) &nextiocb->iocb) + 4),
1797 *(((uint32_t *) &nextiocb->iocb) + 6),
1798 *(((uint32_t *) &nextiocb->iocb) + 7));
1799 }
1800
dea3101e 1801 /*
1802 * Issue iocb command to adapter
1803 */
92d7f7b0 1804 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e 1805 wmb();
1806 pring->stats.iocb_cmd++;
1807
1808 /*
1809 * If there is no completion routine to call, we can release the
1810 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1811 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1812 */
1813 if (nextiocb->iocb_cmpl)
1814 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1815 else
2e0fef85 1816 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e 1817
1818 /*
1819 * Let the HBA know what IOCB slot will be the next one the
1820 * driver will put a command into.
1821 */
7e56aa25
JS
1822 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1823 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e 1824}
1825
e59058c4 1826/**
3621a710 1827 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1828 * @phba: Pointer to HBA context object.
1829 * @pring: Pointer to driver SLI ring object.
1830 *
1831 * The caller is not required to hold any lock for calling this function.
1832 * This function updates the chip attention bits for the ring to inform firmware
1833 * that there are pending work to be done for this ring and requests an
1834 * interrupt when there is space available in the ring. This function is
1835 * called when the driver is unable to post more iocbs to the ring due
1836 * to unavailability of space in the ring.
1837 **/
dea3101e 1838static void
2e0fef85 1839lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1840{
1841 int ringno = pring->ringno;
1842
1843 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1844
1845 wmb();
1846
1847 /*
1848 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1849 * The HBA will tell us when an IOCB entry is available.
1850 */
1851 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1852 readl(phba->CAregaddr); /* flush */
1853
1854 pring->stats.iocb_cmd_full++;
1855}
1856
e59058c4 1857/**
3621a710 1858 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1859 * @phba: Pointer to HBA context object.
1860 * @pring: Pointer to driver SLI ring object.
1861 *
1862 * This function updates the chip attention register bit for the
1863 * given ring to inform HBA that there is more work to be done
1864 * in this ring. The caller is not required to hold any lock.
1865 **/
dea3101e 1866static void
2e0fef85 1867lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1868{
1869 int ringno = pring->ringno;
1870
1871 /*
1872 * Tell the HBA that there is work to do in this ring.
1873 */
34b02dcd
JS
1874 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1875 wmb();
1876 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1877 readl(phba->CAregaddr); /* flush */
1878 }
dea3101e 1879}
1880
e59058c4 1881/**
3621a710 1882 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1883 * @phba: Pointer to HBA context object.
1884 * @pring: Pointer to driver SLI ring object.
1885 *
1886 * This function is called with hbalock held to post pending iocbs
1887 * in the txq to the firmware. This function is called when driver
1888 * detects space available in the ring.
1889 **/
dea3101e 1890static void
2e0fef85 1891lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 1892{
1893 IOCB_t *iocb;
1894 struct lpfc_iocbq *nextiocb;
1895
1c2ba475
JT
1896 lockdep_assert_held(&phba->hbalock);
1897
dea3101e 1898 /*
1899 * Check to see if:
1900 * (a) there is anything on the txq to send
1901 * (b) link is up
1902 * (c) link attention events can be processed (fcp ring only)
1903 * (d) IOCB processing is not blocked by the outstanding mbox command.
1904 */
0e9bb8d7
JS
1905
1906 if (lpfc_is_link_up(phba) &&
1907 (!list_empty(&pring->txq)) &&
895427bd 1908 (pring->ringno != LPFC_FCP_RING ||
0b727fea 1909 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e 1910
1911 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1912 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1913 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1914
1915 if (iocb)
1916 lpfc_sli_update_ring(phba, pring);
1917 else
1918 lpfc_sli_update_full_ring(phba, pring);
1919 }
1920
1921 return;
1922}
1923
e59058c4 1924/**
3621a710 1925 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1926 * @phba: Pointer to HBA context object.
1927 * @hbqno: HBQ number.
1928 *
1929 * This function is called with hbalock held to get the next
1930 * available slot for the given HBQ. If there is free slot
1931 * available for the HBQ it will return pointer to the next available
1932 * HBQ entry else it will return NULL.
1933 **/
a6ababd2 1934static struct lpfc_hbq_entry *
ed957684
JS
1935lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1936{
1937 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1938
1c2ba475
JT
1939 lockdep_assert_held(&phba->hbalock);
1940
ed957684
JS
1941 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1942 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1943 hbqp->next_hbqPutIdx = 0;
1944
1945 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1946 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1947 uint32_t getidx = le32_to_cpu(raw_index);
1948
1949 hbqp->local_hbqGetIdx = getidx;
1950
1951 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1952 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1953 LOG_SLI | LOG_VPORT,
e8b62011 1954 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1955 "%u is > than hbqp->entry_count %u\n",
e8b62011 1956 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1957 hbqp->entry_count);
1958
1959 phba->link_state = LPFC_HBA_ERROR;
1960 return NULL;
1961 }
1962
1963 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1964 return NULL;
1965 }
1966
51ef4c26
JS
1967 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1968 hbqp->hbqPutIdx;
ed957684
JS
1969}
1970
e59058c4 1971/**
3621a710 1972 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1973 * @phba: Pointer to HBA context object.
1974 *
1975 * This function is called with no lock held to free all the
1976 * hbq buffers while uninitializing the SLI interface. It also
1977 * frees the HBQ buffers returned by the firmware but not yet
1978 * processed by the upper layers.
1979 **/
ed957684
JS
1980void
1981lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1982{
92d7f7b0
JS
1983 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1984 struct hbq_dmabuf *hbq_buf;
3163f725 1985 unsigned long flags;
51ef4c26 1986 int i, hbq_count;
ed957684 1987
51ef4c26 1988 hbq_count = lpfc_sli_hbq_count();
ed957684 1989 /* Return all memory used by all HBQs */
3163f725 1990 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1991 for (i = 0; i < hbq_count; ++i) {
1992 list_for_each_entry_safe(dmabuf, next_dmabuf,
1993 &phba->hbqs[i].hbq_buffer_list, list) {
1994 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1995 list_del(&hbq_buf->dbuf.list);
1996 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1997 }
a8adb832 1998 phba->hbqs[i].buffer_count = 0;
ed957684 1999 }
3163f725
JS
2000
2001 /* Mark the HBQs not in use */
2002 phba->hbq_in_use = 0;
2003 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
2004}
2005
e59058c4 2006/**
3621a710 2007 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
2008 * @phba: Pointer to HBA context object.
2009 * @hbqno: HBQ number.
2010 * @hbq_buf: Pointer to HBQ buffer.
2011 *
2012 * This function is called with the hbalock held to post a
2013 * hbq buffer to the firmware. If the function finds an empty
2014 * slot in the HBQ, it will post the buffer. The function will return
2015 * pointer to the hbq entry if it successfully post the buffer
2016 * else it will return NULL.
2017 **/
3772a991 2018static int
ed957684 2019lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 2020 struct hbq_dmabuf *hbq_buf)
3772a991 2021{
1c2ba475 2022 lockdep_assert_held(&phba->hbalock);
3772a991
JS
2023 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2024}
2025
2026/**
2027 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2028 * @phba: Pointer to HBA context object.
2029 * @hbqno: HBQ number.
2030 * @hbq_buf: Pointer to HBQ buffer.
2031 *
2032 * This function is called with the hbalock held to post a hbq buffer to the
2033 * firmware. If the function finds an empty slot in the HBQ, it will post the
2034 * buffer and place it on the hbq_buffer_list. The function will return zero if
2035 * it successfully post the buffer else it will return an error.
2036 **/
2037static int
2038lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2039 struct hbq_dmabuf *hbq_buf)
ed957684
JS
2040{
2041 struct lpfc_hbq_entry *hbqe;
92d7f7b0 2042 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684 2043
1c2ba475 2044 lockdep_assert_held(&phba->hbalock);
ed957684
JS
2045 /* Get next HBQ entry slot to use */
2046 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2047 if (hbqe) {
2048 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2049
92d7f7b0
JS
2050 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2051 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
895427bd 2052 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
ed957684 2053 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
2054 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2055 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2056 /* Sync SLIM */
ed957684
JS
2057 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2058 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 2059 /* flush */
ed957684 2060 readl(phba->hbq_put + hbqno);
51ef4c26 2061 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
2062 return 0;
2063 } else
2064 return -ENOMEM;
ed957684
JS
2065}
2066
4f774513
JS
2067/**
2068 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2069 * @phba: Pointer to HBA context object.
2070 * @hbqno: HBQ number.
2071 * @hbq_buf: Pointer to HBQ buffer.
2072 *
2073 * This function is called with the hbalock held to post an RQE to the SLI4
2074 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2075 * the hbq_buffer_list and return zero, otherwise it will return an error.
2076 **/
2077static int
2078lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2079 struct hbq_dmabuf *hbq_buf)
2080{
2081 int rc;
2082 struct lpfc_rqe hrqe;
2083 struct lpfc_rqe drqe;
895427bd
JS
2084 struct lpfc_queue *hrq;
2085 struct lpfc_queue *drq;
2086
2087 if (hbqno != LPFC_ELS_HBQ)
2088 return 1;
2089 hrq = phba->sli4_hba.hdr_rq;
2090 drq = phba->sli4_hba.dat_rq;
4f774513 2091
1c2ba475 2092 lockdep_assert_held(&phba->hbalock);
4f774513
JS
2093 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2094 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2095 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2096 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
895427bd 2097 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
4f774513
JS
2098 if (rc < 0)
2099 return rc;
895427bd 2100 hbq_buf->tag = (rc | (hbqno << 16));
4f774513
JS
2101 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2102 return 0;
2103}
2104
e59058c4 2105/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
2106static struct lpfc_hbq_init lpfc_els_hbq = {
2107 .rn = 1,
def9c7a9 2108 .entry_count = 256,
92d7f7b0
JS
2109 .mask_count = 0,
2110 .profile = 0,
51ef4c26 2111 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 2112 .buffer_count = 0,
a257bf90
JS
2113 .init_count = 40,
2114 .add_count = 40,
92d7f7b0 2115};
ed957684 2116
e59058c4 2117/* Array of HBQs */
78b2d852 2118struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0
JS
2119 &lpfc_els_hbq,
2120};
ed957684 2121
e59058c4 2122/**
3621a710 2123 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
2124 * @phba: Pointer to HBA context object.
2125 * @hbqno: HBQ number.
2126 * @count: Number of HBQ buffers to be posted.
2127 *
d7c255b2
JS
2128 * This function is called with no lock held to post more hbq buffers to the
2129 * given HBQ. The function returns the number of HBQ buffers successfully
2130 * posted.
e59058c4 2131 **/
311464ec 2132static int
92d7f7b0 2133lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 2134{
d7c255b2 2135 uint32_t i, posted = 0;
3163f725 2136 unsigned long flags;
92d7f7b0 2137 struct hbq_dmabuf *hbq_buffer;
d7c255b2 2138 LIST_HEAD(hbq_buf_list);
eafe1df9 2139 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 2140 return 0;
51ef4c26 2141
d7c255b2
JS
2142 if ((phba->hbqs[hbqno].buffer_count + count) >
2143 lpfc_hbq_defs[hbqno]->entry_count)
2144 count = lpfc_hbq_defs[hbqno]->entry_count -
2145 phba->hbqs[hbqno].buffer_count;
2146 if (!count)
2147 return 0;
2148 /* Allocate HBQ entries */
2149 for (i = 0; i < count; i++) {
2150 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2151 if (!hbq_buffer)
2152 break;
2153 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2154 }
3163f725
JS
2155 /* Check whether HBQ is still in use */
2156 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 2157 if (!phba->hbq_in_use)
d7c255b2
JS
2158 goto err;
2159 while (!list_empty(&hbq_buf_list)) {
2160 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2161 dbuf.list);
2162 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2163 (hbqno << 16));
3772a991 2164 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 2165 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
2166 posted++;
2167 } else
51ef4c26 2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 2169 }
3163f725 2170 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2171 return posted;
2172err:
eafe1df9 2173 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
2174 while (!list_empty(&hbq_buf_list)) {
2175 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2176 dbuf.list);
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178 }
2179 return 0;
ed957684
JS
2180}
2181
e59058c4 2182/**
3621a710 2183 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
2184 * @phba: Pointer to HBA context object.
2185 * @qno: HBQ number.
2186 *
2187 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
2188 * is called with no lock held. The function returns the number of HBQ entries
2189 * successfully allocated.
e59058c4 2190 **/
92d7f7b0
JS
2191int
2192lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 2193{
def9c7a9
JS
2194 if (phba->sli_rev == LPFC_SLI_REV4)
2195 return 0;
2196 else
2197 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2198 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 2199}
ed957684 2200
e59058c4 2201/**
3621a710 2202 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
2203 * @phba: Pointer to HBA context object.
2204 * @qno: HBQ queue number.
2205 *
2206 * This function is called from SLI initialization code path with
2207 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 2208 * function returns the number of HBQ entries successfully allocated.
e59058c4 2209 **/
a6ababd2 2210static int
92d7f7b0
JS
2211lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2212{
def9c7a9
JS
2213 if (phba->sli_rev == LPFC_SLI_REV4)
2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
73d91e50 2215 lpfc_hbq_defs[qno]->entry_count);
def9c7a9
JS
2216 else
2217 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2218 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
2219}
2220
3772a991
JS
2221/**
2222 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2223 * @phba: Pointer to HBA context object.
2224 * @hbqno: HBQ number.
2225 *
2226 * This function removes the first hbq buffer on an hbq list and returns a
2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2228 **/
2229static struct hbq_dmabuf *
2230lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2231{
2232 struct lpfc_dmabuf *d_buf;
2233
2234 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2235 if (!d_buf)
2236 return NULL;
2237 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2238}
2239
2d7dbc4c
JS
2240/**
2241 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2242 * @phba: Pointer to HBA context object.
2243 * @hbqno: HBQ number.
2244 *
2245 * This function removes the first RQ buffer on an RQ buffer list and returns a
2246 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2247 **/
2248static struct rqb_dmabuf *
2249lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2250{
2251 struct lpfc_dmabuf *h_buf;
2252 struct lpfc_rqb *rqbp;
2253
2254 rqbp = hrq->rqbp;
2255 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2256 struct lpfc_dmabuf, list);
2257 if (!h_buf)
2258 return NULL;
2259 rqbp->buffer_count--;
2260 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2261}
2262
e59058c4 2263/**
3621a710 2264 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
2265 * @phba: Pointer to HBA context object.
2266 * @tag: Tag of the hbq buffer.
2267 *
71892418
SH
2268 * This function searches for the hbq buffer associated with the given tag in
2269 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2270 * otherwise it returns NULL.
e59058c4 2271 **/
a6ababd2 2272static struct hbq_dmabuf *
92d7f7b0 2273lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 2274{
92d7f7b0
JS
2275 struct lpfc_dmabuf *d_buf;
2276 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
2277 uint32_t hbqno;
2278
2279 hbqno = tag >> 16;
a0a74e45 2280 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 2281 return NULL;
ed957684 2282
3772a991 2283 spin_lock_irq(&phba->hbalock);
51ef4c26 2284 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 2285 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 2286 if (hbq_buf->tag == tag) {
3772a991 2287 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2288 return hbq_buf;
ed957684
JS
2289 }
2290 }
3772a991 2291 spin_unlock_irq(&phba->hbalock);
92d7f7b0 2292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 2293 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 2294 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 2295 return NULL;
ed957684
JS
2296}
2297
e59058c4 2298/**
3621a710 2299 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
2300 * @phba: Pointer to HBA context object.
2301 * @hbq_buffer: Pointer to HBQ buffer.
2302 *
2303 * This function is called with hbalock. This function gives back
2304 * the hbq buffer to firmware. If the HBQ does not have space to
2305 * post the buffer, it will free the buffer.
2306 **/
ed957684 2307void
51ef4c26 2308lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
2309{
2310 uint32_t hbqno;
2311
51ef4c26
JS
2312 if (hbq_buffer) {
2313 hbqno = hbq_buffer->tag >> 16;
3772a991 2314 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 2315 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
2316 }
2317}
2318
e59058c4 2319/**
3621a710 2320 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
2321 * @mbxCommand: mailbox command code.
2322 *
2323 * This function is called by the mailbox event handler function to verify
2324 * that the completed mailbox command is a legitimate mailbox command. If the
2325 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2326 * and the mailbox event handler will take the HBA offline.
2327 **/
dea3101e 2328static int
2329lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2330{
2331 uint8_t ret;
2332
2333 switch (mbxCommand) {
2334 case MBX_LOAD_SM:
2335 case MBX_READ_NV:
2336 case MBX_WRITE_NV:
a8adb832 2337 case MBX_WRITE_VPARMS:
dea3101e 2338 case MBX_RUN_BIU_DIAG:
2339 case MBX_INIT_LINK:
2340 case MBX_DOWN_LINK:
2341 case MBX_CONFIG_LINK:
2342 case MBX_CONFIG_RING:
2343 case MBX_RESET_RING:
2344 case MBX_READ_CONFIG:
2345 case MBX_READ_RCONFIG:
2346 case MBX_READ_SPARM:
2347 case MBX_READ_STATUS:
2348 case MBX_READ_RPI:
2349 case MBX_READ_XRI:
2350 case MBX_READ_REV:
2351 case MBX_READ_LNK_STAT:
2352 case MBX_REG_LOGIN:
2353 case MBX_UNREG_LOGIN:
dea3101e 2354 case MBX_CLEAR_LA:
2355 case MBX_DUMP_MEMORY:
2356 case MBX_DUMP_CONTEXT:
2357 case MBX_RUN_DIAGS:
2358 case MBX_RESTART:
2359 case MBX_UPDATE_CFG:
2360 case MBX_DOWN_LOAD:
2361 case MBX_DEL_LD_ENTRY:
2362 case MBX_RUN_PROGRAM:
2363 case MBX_SET_MASK:
09372820 2364 case MBX_SET_VARIABLE:
dea3101e 2365 case MBX_UNREG_D_ID:
41415862 2366 case MBX_KILL_BOARD:
dea3101e 2367 case MBX_CONFIG_FARP:
41415862 2368 case MBX_BEACON:
dea3101e 2369 case MBX_LOAD_AREA:
2370 case MBX_RUN_BIU_DIAG64:
2371 case MBX_CONFIG_PORT:
2372 case MBX_READ_SPARM64:
2373 case MBX_READ_RPI64:
2374 case MBX_REG_LOGIN64:
76a95d75 2375 case MBX_READ_TOPOLOGY:
09372820 2376 case MBX_WRITE_WWN:
dea3101e 2377 case MBX_SET_DEBUG:
2378 case MBX_LOAD_EXP_ROM:
57127f15 2379 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
2380 case MBX_REG_VPI:
2381 case MBX_UNREG_VPI:
858c9f6c 2382 case MBX_HEARTBEAT:
84774a4d
JS
2383 case MBX_PORT_CAPABILITIES:
2384 case MBX_PORT_IOV_CONTROL:
04c68496
JS
2385 case MBX_SLI4_CONFIG:
2386 case MBX_SLI4_REQ_FTRS:
2387 case MBX_REG_FCFI:
2388 case MBX_UNREG_FCFI:
2389 case MBX_REG_VFI:
2390 case MBX_UNREG_VFI:
2391 case MBX_INIT_VPI:
2392 case MBX_INIT_VFI:
2393 case MBX_RESUME_RPI:
c7495937
JS
2394 case MBX_READ_EVENT_LOG_STATUS:
2395 case MBX_READ_EVENT_LOG:
dcf2a4e0
JS
2396 case MBX_SECURITY_MGMT:
2397 case MBX_AUTH_PORT:
940eb687 2398 case MBX_ACCESS_VDATA:
dea3101e 2399 ret = mbxCommand;
2400 break;
2401 default:
2402 ret = MBX_SHUTDOWN;
2403 break;
2404 }
2e0fef85 2405 return ret;
dea3101e 2406}
e59058c4
JS
2407
2408/**
3621a710 2409 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
2410 * @phba: Pointer to HBA context object.
2411 * @pmboxq: Pointer to mailbox command.
2412 *
2413 * This is completion handler function for mailbox commands issued from
2414 * lpfc_sli_issue_mbox_wait function. This function is called by the
2415 * mailbox event handler function with no lock held. This function
2416 * will wake up thread waiting on the wait queue pointed by context1
2417 * of the mailbox.
2418 **/
04c68496 2419void
2e0fef85 2420lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e 2421{
858c9f6c 2422 unsigned long drvr_flag;
e29d74f8 2423 struct completion *pmbox_done;
dea3101e 2424
2425 /*
e29d74f8 2426 * If pmbox_done is empty, the driver thread gave up waiting and
dea3101e 2427 * continued running.
2428 */
7054a606 2429 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 2430 spin_lock_irqsave(&phba->hbalock, drvr_flag);
e29d74f8
JS
2431 pmbox_done = (struct completion *)pmboxq->context3;
2432 if (pmbox_done)
2433 complete(pmbox_done);
858c9f6c 2434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 2435 return;
2436}
2437
e59058c4
JS
2438
2439/**
3621a710 2440 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
2441 * @phba: Pointer to HBA context object.
2442 * @pmb: Pointer to mailbox object.
2443 *
2444 * This function is the default mailbox completion handler. It
2445 * frees the memory resources associated with the completed mailbox
2446 * command. If the completed command is a REG_LOGIN mailbox command,
2447 * this function will issue a UREG_LOGIN to re-claim the RPI.
2448 **/
dea3101e 2449void
2e0fef85 2450lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2451{
d439d286 2452 struct lpfc_vport *vport = pmb->vport;
dea3101e 2453 struct lpfc_dmabuf *mp;
d439d286 2454 struct lpfc_nodelist *ndlp;
5af5eee7 2455 struct Scsi_Host *shost;
04c68496 2456 uint16_t rpi, vpi;
7054a606
JS
2457 int rc;
2458
3e1f0718 2459 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
7054a606 2460
dea3101e 2461 if (mp) {
2462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2463 kfree(mp);
2464 }
7054a606
JS
2465
2466 /*
2467 * If a REG_LOGIN succeeded after node is destroyed or node
2468 * is in re-discovery driver need to cleanup the RPI.
2469 */
2e0fef85 2470 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
2471 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2472 !pmb->u.mb.mbxStatus) {
2473 rpi = pmb->u.mb.un.varWords[0];
6d368e53 2474 vpi = pmb->u.mb.un.varRegLogin.vpi;
04c68496 2475 lpfc_unreg_login(phba, vpi, rpi, pmb);
de96e9c5 2476 pmb->vport = vport;
92d7f7b0 2477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
2478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2479 if (rc != MBX_NOT_FINISHED)
2480 return;
2481 }
2482
695a814e
JS
2483 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2484 !(phba->pport->load_flag & FC_UNLOADING) &&
2485 !pmb->u.mb.mbxStatus) {
5af5eee7
JS
2486 shost = lpfc_shost_from_vport(vport);
2487 spin_lock_irq(shost->host_lock);
2488 vport->vpi_state |= LPFC_VPI_REGISTERED;
2489 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2490 spin_unlock_irq(shost->host_lock);
695a814e
JS
2491 }
2492
d439d286 2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 2494 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
d439d286 2495 lpfc_nlp_put(ndlp);
dea16bda
JS
2496 pmb->ctx_buf = NULL;
2497 pmb->ctx_ndlp = NULL;
2498 }
2499
2500 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2501 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2502
2503 /* Check to see if there are any deferred events to process */
2504 if (ndlp) {
2505 lpfc_printf_vlog(
2506 vport,
2507 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2508 "1438 UNREG cmpl deferred mbox x%x "
2509 "on NPort x%x Data: x%x x%x %p\n",
2510 ndlp->nlp_rpi, ndlp->nlp_DID,
2511 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2512
2513 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2514 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
00292e03 2515 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2516 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2517 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
00292e03
JS
2518 } else {
2519 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda 2520 }
dea16bda 2521 }
3e1f0718 2522 pmb->ctx_ndlp = NULL;
d439d286
JS
2523 }
2524
dcf2a4e0
JS
2525 /* Check security permission status on INIT_LINK mailbox command */
2526 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2527 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2528 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2529 "2860 SLI authentication is required "
2530 "for INIT_LINK but has not done yet\n");
2531
04c68496
JS
2532 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2533 lpfc_sli4_mbox_cmd_free(phba, pmb);
2534 else
2535 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2536}
be6bb941
JS
2537 /**
2538 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2539 * @phba: Pointer to HBA context object.
2540 * @pmb: Pointer to mailbox object.
2541 *
2542 * This function is the unreg rpi mailbox completion handler. It
2543 * frees the memory resources associated with the completed mailbox
2544 * command. An additional refrenece is put on the ndlp to prevent
2545 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2546 * the unreg mailbox command completes, this routine puts the
2547 * reference back.
2548 *
2549 **/
2550void
2551lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2552{
2553 struct lpfc_vport *vport = pmb->vport;
2554 struct lpfc_nodelist *ndlp;
2555
3e1f0718 2556 ndlp = pmb->ctx_ndlp;
be6bb941
JS
2557 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2558 if (phba->sli_rev == LPFC_SLI_REV4 &&
2559 (bf_get(lpfc_sli_intf_if_type,
27d6ac0a 2560 &phba->sli4_hba.sli_intf) >=
be6bb941
JS
2561 LPFC_SLI_INTF_IF_TYPE_2)) {
2562 if (ndlp) {
dea16bda
JS
2563 lpfc_printf_vlog(
2564 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2565 "0010 UNREG_LOGIN vpi:%x "
2566 "rpi:%x DID:%x defer x%x flg x%x "
2567 "map:%x %p\n",
2568 vport->vpi, ndlp->nlp_rpi,
2569 ndlp->nlp_DID, ndlp->nlp_defer_did,
2570 ndlp->nlp_flag,
2571 ndlp->nlp_usg_map, ndlp);
7c5e518c 2572 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
be6bb941 2573 lpfc_nlp_put(ndlp);
dea16bda
JS
2574
2575 /* Check to see if there are any deferred
2576 * events to process
2577 */
2578 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2579 (ndlp->nlp_defer_did !=
2580 NLP_EVT_NOTHING_PENDING)) {
2581 lpfc_printf_vlog(
2582 vport, KERN_INFO, LOG_DISCOVERY,
2583 "4111 UNREG cmpl deferred "
2584 "clr x%x on "
2585 "NPort x%x Data: x%x %p\n",
2586 ndlp->nlp_rpi, ndlp->nlp_DID,
2587 ndlp->nlp_defer_did, ndlp);
00292e03 2588 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda
JS
2589 ndlp->nlp_defer_did =
2590 NLP_EVT_NOTHING_PENDING;
2591 lpfc_issue_els_plogi(
2592 vport, ndlp->nlp_DID, 0);
00292e03
JS
2593 } else {
2594 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda 2595 }
be6bb941
JS
2596 }
2597 }
2598 }
2599
2600 mempool_free(pmb, phba->mbox_mem_pool);
2601}
dea3101e 2602
e59058c4 2603/**
3621a710 2604 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
2605 * @phba: Pointer to HBA context object.
2606 *
2607 * This function is called with no lock held. This function processes all
2608 * the completed mailbox commands and gives it to upper layers. The interrupt
2609 * service routine processes mailbox completion interrupt and adds completed
2610 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2611 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2612 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2613 * function returns the mailbox commands to the upper layer by calling the
2614 * completion handler function of each mailbox.
2615 **/
dea3101e 2616int
2e0fef85 2617lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 2618{
92d7f7b0 2619 MAILBOX_t *pmbox;
dea3101e 2620 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
2621 int rc;
2622 LIST_HEAD(cmplq);
dea3101e 2623
2624 phba->sli.slistat.mbox_event++;
2625
92d7f7b0
JS
2626 /* Get all completed mailboxe buffers into the cmplq */
2627 spin_lock_irq(&phba->hbalock);
2628 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2629 spin_unlock_irq(&phba->hbalock);
dea3101e 2630
92d7f7b0
JS
2631 /* Get a Mailbox buffer to setup mailbox commands for callback */
2632 do {
2633 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2634 if (pmb == NULL)
2635 break;
2e0fef85 2636
04c68496 2637 pmbox = &pmb->u.mb;
dea3101e 2638
858c9f6c
JS
2639 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2640 if (pmb->vport) {
2641 lpfc_debugfs_disc_trc(pmb->vport,
2642 LPFC_DISC_TRC_MBOX_VPORT,
2643 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2644 (uint32_t)pmbox->mbxCommand,
2645 pmbox->un.varWords[0],
2646 pmbox->un.varWords[1]);
2647 }
2648 else {
2649 lpfc_debugfs_disc_trc(phba->pport,
2650 LPFC_DISC_TRC_MBOX,
2651 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2652 (uint32_t)pmbox->mbxCommand,
2653 pmbox->un.varWords[0],
2654 pmbox->un.varWords[1]);
2655 }
2656 }
2657
dea3101e 2658 /*
2659 * It is a fatal error if unknown mbox command completion.
2660 */
2661 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2662 MBX_SHUTDOWN) {
af901ca1 2663 /* Unknown mailbox command compl */
92d7f7b0 2664 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 2665 "(%d):0323 Unknown Mailbox command "
a183a15f 2666 "x%x (x%x/x%x) Cmpl\n",
92d7f7b0 2667 pmb->vport ? pmb->vport->vpi : 0,
04c68496 2668 pmbox->mbxCommand,
a183a15f
JS
2669 lpfc_sli_config_mbox_subsys_get(phba,
2670 pmb),
2671 lpfc_sli_config_mbox_opcode_get(phba,
2672 pmb));
2e0fef85 2673 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2674 phba->work_hs = HS_FFER3;
2675 lpfc_handle_eratt(phba);
92d7f7b0 2676 continue;
dea3101e 2677 }
2678
dea3101e 2679 if (pmbox->mbxStatus) {
2680 phba->sli.slistat.mbox_stat_err++;
2681 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2682 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0 2683 lpfc_printf_log(phba, KERN_INFO,
a183a15f
JS
2684 LOG_MBOX | LOG_SLI,
2685 "(%d):0305 Mbox cmd cmpl "
2686 "error - RETRYing Data: x%x "
2687 "(x%x/x%x) x%x x%x x%x\n",
2688 pmb->vport ? pmb->vport->vpi : 0,
2689 pmbox->mbxCommand,
2690 lpfc_sli_config_mbox_subsys_get(phba,
2691 pmb),
2692 lpfc_sli_config_mbox_opcode_get(phba,
2693 pmb),
2694 pmbox->mbxStatus,
2695 pmbox->un.varWords[0],
2696 pmb->vport->port_state);
dea3101e 2697 pmbox->mbxStatus = 0;
2698 pmbox->mbxOwner = OWN_HOST;
dea3101e 2699 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 2700 if (rc != MBX_NOT_FINISHED)
92d7f7b0 2701 continue;
dea3101e 2702 }
2703 }
2704
2705 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 2706 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 2707 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
e74c03c8
JS
2708 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2709 "x%x x%x x%x\n",
92d7f7b0 2710 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 2711 pmbox->mbxCommand,
a183a15f
JS
2712 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2713 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea3101e 2714 pmb->mbox_cmpl,
2715 *((uint32_t *) pmbox),
2716 pmbox->un.varWords[0],
2717 pmbox->un.varWords[1],
2718 pmbox->un.varWords[2],
2719 pmbox->un.varWords[3],
2720 pmbox->un.varWords[4],
2721 pmbox->un.varWords[5],
2722 pmbox->un.varWords[6],
e74c03c8
JS
2723 pmbox->un.varWords[7],
2724 pmbox->un.varWords[8],
2725 pmbox->un.varWords[9],
2726 pmbox->un.varWords[10]);
dea3101e 2727
92d7f7b0 2728 if (pmb->mbox_cmpl)
dea3101e 2729 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
2730 } while (1);
2731 return 0;
2732}
dea3101e 2733
e59058c4 2734/**
3621a710 2735 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
2736 * @phba: Pointer to HBA context object.
2737 * @pring: Pointer to driver SLI ring object.
2738 * @tag: buffer tag.
2739 *
2740 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2741 * is set in the tag the buffer is posted for a particular exchange,
2742 * the function will return the buffer without replacing the buffer.
2743 * If the buffer is for unsolicited ELS or CT traffic, this function
2744 * returns the buffer and also posts another buffer to the firmware.
2745 **/
76bb24ef
JS
2746static struct lpfc_dmabuf *
2747lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
2748 struct lpfc_sli_ring *pring,
2749 uint32_t tag)
76bb24ef 2750{
9f1e1b50
JS
2751 struct hbq_dmabuf *hbq_entry;
2752
76bb24ef
JS
2753 if (tag & QUE_BUFTAG_BIT)
2754 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
2755 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2756 if (!hbq_entry)
2757 return NULL;
2758 return &hbq_entry->dbuf;
76bb24ef 2759}
57127f15 2760
3772a991
JS
2761/**
2762 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2763 * @phba: Pointer to HBA context object.
2764 * @pring: Pointer to driver SLI ring object.
2765 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2766 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2767 * @fch_type: the type for the first frame of the sequence.
2768 *
2769 * This function is called with no lock held. This function uses the r_ctl and
2770 * type of the received sequence to find the correct callback function to call
2771 * to process the sequence.
2772 **/
2773static int
2774lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2775 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2776 uint32_t fch_type)
2777{
2778 int i;
2779
f358dd0c
JS
2780 switch (fch_type) {
2781 case FC_TYPE_NVME:
d613b6a7 2782 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
f358dd0c
JS
2783 return 1;
2784 default:
2785 break;
2786 }
2787
3772a991
JS
2788 /* unSolicited Responses */
2789 if (pring->prt[0].profile) {
2790 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2791 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2792 saveq);
2793 return 1;
2794 }
2795 /* We must search, based on rctl / type
2796 for the right routine */
2797 for (i = 0; i < pring->num_mask; i++) {
2798 if ((pring->prt[i].rctl == fch_r_ctl) &&
2799 (pring->prt[i].type == fch_type)) {
2800 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2801 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2802 (phba, pring, saveq);
2803 return 1;
2804 }
2805 }
2806 return 0;
2807}
e59058c4
JS
2808
2809/**
3621a710 2810 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
2811 * @phba: Pointer to HBA context object.
2812 * @pring: Pointer to driver SLI ring object.
2813 * @saveq: Pointer to the unsolicited iocb.
2814 *
2815 * This function is called with no lock held by the ring event handler
2816 * when there is an unsolicited iocb posted to the response ring by the
2817 * firmware. This function gets the buffer associated with the iocbs
2818 * and calls the event handler for the ring. This function handles both
2819 * qring buffers and hbq buffers.
2820 * When the function returns 1 the caller can free the iocb object otherwise
2821 * upper layer functions will free the iocb objects.
2822 **/
dea3101e 2823static int
2824lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2825 struct lpfc_iocbq *saveq)
2826{
2827 IOCB_t * irsp;
2828 WORD5 * w5p;
2829 uint32_t Rctl, Type;
76bb24ef 2830 struct lpfc_iocbq *iocbq;
3163f725 2831 struct lpfc_dmabuf *dmzbuf;
dea3101e 2832
dea3101e 2833 irsp = &(saveq->iocb);
57127f15
JS
2834
2835 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2836 if (pring->lpfc_sli_rcv_async_status)
2837 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2838 else
2839 lpfc_printf_log(phba,
2840 KERN_WARNING,
2841 LOG_SLI,
2842 "0316 Ring %d handler: unexpected "
2843 "ASYNC_STATUS iocb received evt_code "
2844 "0x%x\n",
2845 pring->ringno,
2846 irsp->un.asyncstat.evt_code);
2847 return 1;
2848 }
2849
3163f725
JS
2850 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2851 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2852 if (irsp->ulpBdeCount > 0) {
2853 dmzbuf = lpfc_sli_get_buff(phba, pring,
2854 irsp->un.ulpWord[3]);
2855 lpfc_in_buf_free(phba, dmzbuf);
2856 }
2857
2858 if (irsp->ulpBdeCount > 1) {
2859 dmzbuf = lpfc_sli_get_buff(phba, pring,
2860 irsp->unsli3.sli3Words[3]);
2861 lpfc_in_buf_free(phba, dmzbuf);
2862 }
2863
2864 if (irsp->ulpBdeCount > 2) {
2865 dmzbuf = lpfc_sli_get_buff(phba, pring,
2866 irsp->unsli3.sli3Words[7]);
2867 lpfc_in_buf_free(phba, dmzbuf);
2868 }
2869
2870 return 1;
2871 }
2872
92d7f7b0 2873 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2874 if (irsp->ulpBdeCount != 0) {
2875 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2876 irsp->un.ulpWord[3]);
2877 if (!saveq->context2)
2878 lpfc_printf_log(phba,
2879 KERN_ERR,
2880 LOG_SLI,
2881 "0341 Ring %d Cannot find buffer for "
2882 "an unsolicited iocb. tag 0x%x\n",
2883 pring->ringno,
2884 irsp->un.ulpWord[3]);
76bb24ef
JS
2885 }
2886 if (irsp->ulpBdeCount == 2) {
2887 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2888 irsp->unsli3.sli3Words[7]);
2889 if (!saveq->context3)
2890 lpfc_printf_log(phba,
2891 KERN_ERR,
2892 LOG_SLI,
2893 "0342 Ring %d Cannot find buffer for an"
2894 " unsolicited iocb. tag 0x%x\n",
2895 pring->ringno,
2896 irsp->unsli3.sli3Words[7]);
2897 }
2898 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2899 irsp = &(iocbq->iocb);
76bb24ef
JS
2900 if (irsp->ulpBdeCount != 0) {
2901 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2902 irsp->un.ulpWord[3]);
9c2face6 2903 if (!iocbq->context2)
76bb24ef
JS
2904 lpfc_printf_log(phba,
2905 KERN_ERR,
2906 LOG_SLI,
2907 "0343 Ring %d Cannot find "
2908 "buffer for an unsolicited iocb"
2909 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2910 irsp->un.ulpWord[3]);
76bb24ef
JS
2911 }
2912 if (irsp->ulpBdeCount == 2) {
2913 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2914 irsp->unsli3.sli3Words[7]);
9c2face6 2915 if (!iocbq->context3)
76bb24ef
JS
2916 lpfc_printf_log(phba,
2917 KERN_ERR,
2918 LOG_SLI,
2919 "0344 Ring %d Cannot find "
2920 "buffer for an unsolicited "
2921 "iocb. tag 0x%x\n",
2922 pring->ringno,
2923 irsp->unsli3.sli3Words[7]);
2924 }
2925 }
92d7f7b0 2926 }
9c2face6
JS
2927 if (irsp->ulpBdeCount != 0 &&
2928 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2929 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2930 int found = 0;
2931
2932 /* search continue save q for same XRI */
2933 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7851fe2c
JS
2934 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2935 saveq->iocb.unsli3.rcvsli3.ox_id) {
9c2face6
JS
2936 list_add_tail(&saveq->list, &iocbq->list);
2937 found = 1;
2938 break;
2939 }
2940 }
2941 if (!found)
2942 list_add_tail(&saveq->clist,
2943 &pring->iocb_continue_saveq);
2944 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2945 list_del_init(&iocbq->clist);
2946 saveq = iocbq;
2947 irsp = &(saveq->iocb);
2948 } else
2949 return 0;
2950 }
2951 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2952 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2953 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2954 Rctl = FC_RCTL_ELS_REQ;
2955 Type = FC_TYPE_ELS;
9c2face6
JS
2956 } else {
2957 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2958 Rctl = w5p->hcsw.Rctl;
2959 Type = w5p->hcsw.Type;
2960
2961 /* Firmware Workaround */
2962 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2963 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2964 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2965 Rctl = FC_RCTL_ELS_REQ;
2966 Type = FC_TYPE_ELS;
9c2face6
JS
2967 w5p->hcsw.Rctl = Rctl;
2968 w5p->hcsw.Type = Type;
2969 }
2970 }
92d7f7b0 2971
3772a991 2972 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2973 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2974 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2975 "Type x%x received\n",
e8b62011 2976 pring->ringno, Rctl, Type);
3772a991 2977
92d7f7b0 2978 return 1;
dea3101e 2979}
2980
e59058c4 2981/**
3621a710 2982 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2983 * @phba: Pointer to HBA context object.
2984 * @pring: Pointer to driver SLI ring object.
2985 * @prspiocb: Pointer to response iocb object.
2986 *
2987 * This function looks up the iocb_lookup table to get the command iocb
2988 * corresponding to the given response iocb using the iotag of the
341b2aa8
DK
2989 * response iocb. This function is called with the hbalock held
2990 * for sli3 devices or the ring_lock for sli4 devices.
e59058c4
JS
2991 * This function returns the command iocb object if it finds the command
2992 * iocb else returns NULL.
2993 **/
dea3101e 2994static struct lpfc_iocbq *
2e0fef85
JS
2995lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2996 struct lpfc_sli_ring *pring,
2997 struct lpfc_iocbq *prspiocb)
dea3101e 2998{
dea3101e 2999 struct lpfc_iocbq *cmd_iocb = NULL;
3000 uint16_t iotag;
1c2ba475 3001 lockdep_assert_held(&phba->hbalock);
dea3101e 3002
604a3e30
JB
3003 iotag = prspiocb->iocb.ulpIoTag;
3004
3005 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3006 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6 3007 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
89533e9b
JS
3008 /* remove from txcmpl queue list */
3009 list_del_init(&cmd_iocb->list);
4f2e66c6 3010 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
89533e9b 3011 return cmd_iocb;
2a9bf3d0 3012 }
dea3101e 3013 }
3014
dea3101e 3015 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
89533e9b 3016 "0317 iotag x%x is out of "
604a3e30 3017 "range: max iotag x%x wd0 x%x\n",
e8b62011 3018 iotag, phba->sli.last_iotag,
604a3e30 3019 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e 3020 return NULL;
3021}
3022
3772a991
JS
3023/**
3024 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3025 * @phba: Pointer to HBA context object.
3026 * @pring: Pointer to driver SLI ring object.
3027 * @iotag: IOCB tag.
3028 *
3029 * This function looks up the iocb_lookup table to get the command iocb
3030 * corresponding to the given iotag. This function is called with the
3031 * hbalock held.
3032 * This function returns the command iocb object if it finds the command
3033 * iocb else returns NULL.
3034 **/
3035static struct lpfc_iocbq *
3036lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3037 struct lpfc_sli_ring *pring, uint16_t iotag)
3038{
895427bd 3039 struct lpfc_iocbq *cmd_iocb = NULL;
3772a991 3040
1c2ba475 3041 lockdep_assert_held(&phba->hbalock);
3772a991
JS
3042 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3043 cmd_iocb = phba->sli.iocbq_lookup[iotag];
4f2e66c6
JS
3044 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3045 /* remove from txcmpl queue list */
3046 list_del_init(&cmd_iocb->list);
3047 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4f2e66c6 3048 return cmd_iocb;
2a9bf3d0 3049 }
3772a991 3050 }
89533e9b 3051
3772a991 3052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd
JS
3053 "0372 iotag x%x lookup error: max iotag (x%x) "
3054 "iocb_flag x%x\n",
3055 iotag, phba->sli.last_iotag,
3056 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3772a991
JS
3057 return NULL;
3058}
3059
e59058c4 3060/**
3621a710 3061 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
3062 * @phba: Pointer to HBA context object.
3063 * @pring: Pointer to driver SLI ring object.
3064 * @saveq: Pointer to the response iocb to be processed.
3065 *
3066 * This function is called by the ring event handler for non-fcp
3067 * rings when there is a new response iocb in the response ring.
3068 * The caller is not required to hold any locks. This function
3069 * gets the command iocb associated with the response iocb and
3070 * calls the completion handler for the command iocb. If there
3071 * is no completion handler, the function will free the resources
3072 * associated with command iocb. If the response iocb is for
3073 * an already aborted command iocb, the status of the completion
3074 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3075 * This function always returns 1.
3076 **/
dea3101e 3077static int
2e0fef85 3078lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e 3079 struct lpfc_iocbq *saveq)
3080{
2e0fef85 3081 struct lpfc_iocbq *cmdiocbp;
dea3101e 3082 int rc = 1;
3083 unsigned long iflag;
3084
3085 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
341b2aa8
DK
3086 if (phba->sli_rev == LPFC_SLI_REV4)
3087 spin_lock_irqsave(&pring->ring_lock, iflag);
3088 else
3089 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 3090 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
341b2aa8
DK
3091 if (phba->sli_rev == LPFC_SLI_REV4)
3092 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3093 else
3094 spin_unlock_irqrestore(&phba->hbalock, iflag);
2e0fef85 3095
dea3101e 3096 if (cmdiocbp) {
3097 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
3098 /*
3099 * If an ELS command failed send an event to mgmt
3100 * application.
3101 */
3102 if (saveq->iocb.ulpStatus &&
3103 (pring->ringno == LPFC_ELS_RING) &&
3104 (cmdiocbp->iocb.ulpCommand ==
3105 CMD_ELS_REQUEST64_CR))
3106 lpfc_send_els_failure_event(phba,
3107 cmdiocbp, saveq);
3108
dea3101e 3109 /*
3110 * Post all ELS completions to the worker thread.
3111 * All other are passed to the completion callback.
3112 */
3113 if (pring->ringno == LPFC_ELS_RING) {
341af102
JS
3114 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3115 (cmdiocbp->iocb_flag &
3116 LPFC_DRIVER_ABORTED)) {
3117 spin_lock_irqsave(&phba->hbalock,
3118 iflag);
07951076
JS
3119 cmdiocbp->iocb_flag &=
3120 ~LPFC_DRIVER_ABORTED;
341af102
JS
3121 spin_unlock_irqrestore(&phba->hbalock,
3122 iflag);
07951076
JS
3123 saveq->iocb.ulpStatus =
3124 IOSTAT_LOCAL_REJECT;
3125 saveq->iocb.un.ulpWord[4] =
3126 IOERR_SLI_ABORTED;
0ff10d46
JS
3127
3128 /* Firmware could still be in progress
3129 * of DMAing payload, so don't free data
3130 * buffer till after a hbeat.
3131 */
341af102
JS
3132 spin_lock_irqsave(&phba->hbalock,
3133 iflag);
0ff10d46 3134 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
341af102
JS
3135 spin_unlock_irqrestore(&phba->hbalock,
3136 iflag);
3137 }
0f65ff68
JS
3138 if (phba->sli_rev == LPFC_SLI_REV4) {
3139 if (saveq->iocb_flag &
3140 LPFC_EXCHANGE_BUSY) {
3141 /* Set cmdiocb flag for the
3142 * exchange busy so sgl (xri)
3143 * will not be released until
3144 * the abort xri is received
3145 * from hba.
3146 */
3147 spin_lock_irqsave(
3148 &phba->hbalock, iflag);
3149 cmdiocbp->iocb_flag |=
3150 LPFC_EXCHANGE_BUSY;
3151 spin_unlock_irqrestore(
3152 &phba->hbalock, iflag);
3153 }
3154 if (cmdiocbp->iocb_flag &
3155 LPFC_DRIVER_ABORTED) {
3156 /*
3157 * Clear LPFC_DRIVER_ABORTED
3158 * bit in case it was driver
3159 * initiated abort.
3160 */
3161 spin_lock_irqsave(
3162 &phba->hbalock, iflag);
3163 cmdiocbp->iocb_flag &=
3164 ~LPFC_DRIVER_ABORTED;
3165 spin_unlock_irqrestore(
3166 &phba->hbalock, iflag);
3167 cmdiocbp->iocb.ulpStatus =
3168 IOSTAT_LOCAL_REJECT;
3169 cmdiocbp->iocb.un.ulpWord[4] =
3170 IOERR_ABORT_REQUESTED;
3171 /*
3172 * For SLI4, irsiocb contains
3173 * NO_XRI in sli_xritag, it
3174 * shall not affect releasing
3175 * sgl (xri) process.
3176 */
3177 saveq->iocb.ulpStatus =
3178 IOSTAT_LOCAL_REJECT;
3179 saveq->iocb.un.ulpWord[4] =
3180 IOERR_SLI_ABORTED;
3181 spin_lock_irqsave(
3182 &phba->hbalock, iflag);
3183 saveq->iocb_flag |=
3184 LPFC_DELAY_MEM_FREE;
3185 spin_unlock_irqrestore(
3186 &phba->hbalock, iflag);
3187 }
07951076 3188 }
dea3101e 3189 }
2e0fef85 3190 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
3191 } else
3192 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e 3193 } else {
3194 /*
3195 * Unknown initiating command based on the response iotag.
3196 * This could be the case on the ELS ring because of
3197 * lpfc_els_abort().
3198 */
3199 if (pring->ringno != LPFC_ELS_RING) {
3200 /*
3201 * Ring <ringno> handler: unexpected completion IoTag
3202 * <IoTag>
3203 */
a257bf90 3204 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
3205 "0322 Ring %d handler: "
3206 "unexpected completion IoTag x%x "
3207 "Data: x%x x%x x%x x%x\n",
3208 pring->ringno,
3209 saveq->iocb.ulpIoTag,
3210 saveq->iocb.ulpStatus,
3211 saveq->iocb.un.ulpWord[4],
3212 saveq->iocb.ulpCommand,
3213 saveq->iocb.ulpContext);
dea3101e 3214 }
3215 }
68876920 3216
dea3101e 3217 return rc;
3218}
3219
e59058c4 3220/**
3621a710 3221 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
3222 * @phba: Pointer to HBA context object.
3223 * @pring: Pointer to driver SLI ring object.
3224 *
3225 * This function is called from the iocb ring event handlers when
3226 * put pointer is ahead of the get pointer for a ring. This function signal
3227 * an error attention condition to the worker thread and the worker
3228 * thread will transition the HBA to offline state.
3229 **/
2e0fef85
JS
3230static void
3231lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 3232{
34b02dcd 3233 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 3234 /*
025dfdaf 3235 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
3236 * rsp ring <portRspMax>
3237 */
3238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3239 "0312 Ring %d handler: portRspPut %d "
025dfdaf 3240 "is bigger than rsp ring %d\n",
e8b62011 3241 pring->ringno, le32_to_cpu(pgp->rspPutInx),
7e56aa25 3242 pring->sli.sli3.numRiocb);
875fbdfe 3243
2e0fef85 3244 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
3245
3246 /*
3247 * All error attention handlers are posted to
3248 * worker thread
3249 */
3250 phba->work_ha |= HA_ERATT;
3251 phba->work_hs = HS_FFER3;
92d7f7b0 3252
5e9d9b82 3253 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
3254
3255 return;
3256}
3257
9399627f 3258/**
3621a710 3259 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
3260 * @ptr: Pointer to address of HBA context object.
3261 *
3262 * This function is invoked by the Error Attention polling timer when the
3263 * timer times out. It will check the SLI Error Attention register for
3264 * possible attention events. If so, it will post an Error Attention event
3265 * and wake up worker thread to process it. Otherwise, it will set up the
3266 * Error Attention polling timer for the next poll.
3267 **/
f22eb4d3 3268void lpfc_poll_eratt(struct timer_list *t)
9399627f
JS
3269{
3270 struct lpfc_hba *phba;
eb016566 3271 uint32_t eratt = 0;
aa6fbb75 3272 uint64_t sli_intr, cnt;
9399627f 3273
f22eb4d3 3274 phba = from_timer(phba, t, eratt_poll);
9399627f 3275
aa6fbb75
JS
3276 /* Here we will also keep track of interrupts per sec of the hba */
3277 sli_intr = phba->sli.slistat.sli_intr;
3278
3279 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3280 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3281 sli_intr);
3282 else
3283 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3284
65791f1f
JS
3285 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3286 do_div(cnt, phba->eratt_poll_interval);
aa6fbb75
JS
3287 phba->sli.slistat.sli_ips = cnt;
3288
3289 phba->sli.slistat.sli_prev_intr = sli_intr;
3290
9399627f
JS
3291 /* Check chip HA register for error event */
3292 eratt = lpfc_sli_check_eratt(phba);
3293
3294 if (eratt)
3295 /* Tell the worker thread there is work to do */
3296 lpfc_worker_wake_up(phba);
3297 else
3298 /* Restart the timer for next eratt poll */
256ec0d0
JS
3299 mod_timer(&phba->eratt_poll,
3300 jiffies +
65791f1f 3301 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9399627f
JS
3302 return;
3303}
3304
875fbdfe 3305
e59058c4 3306/**
3621a710 3307 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
3308 * @phba: Pointer to HBA context object.
3309 * @pring: Pointer to driver SLI ring object.
3310 * @mask: Host attention register mask for this ring.
3311 *
3312 * This function is called from the interrupt context when there is a ring
3313 * event for the fcp ring. The caller does not hold any lock.
3314 * The function processes each response iocb in the response ring until it
25985edc 3315 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
e59058c4
JS
3316 * LE bit set. The function will call the completion handler of the command iocb
3317 * if the response iocb indicates a completion for a command iocb or it is
3318 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3319 * function if this is an unsolicited iocb.
dea3101e 3320 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
3321 * to check it explicitly.
3322 */
3323int
2e0fef85
JS
3324lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3325 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3326{
34b02dcd 3327 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 3328 IOCB_t *irsp = NULL;
87f6eaff 3329 IOCB_t *entry = NULL;
dea3101e 3330 struct lpfc_iocbq *cmdiocbq = NULL;
3331 struct lpfc_iocbq rspiocbq;
dea3101e 3332 uint32_t status;
3333 uint32_t portRspPut, portRspMax;
3334 int rc = 1;
3335 lpfc_iocb_type type;
3336 unsigned long iflag;
3337 uint32_t rsp_cmpl = 0;
dea3101e 3338
2e0fef85 3339 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3340 pring->stats.iocb_event++;
3341
dea3101e 3342 /*
3343 * The next available response entry should never exceed the maximum
3344 * entries. If it does, treat it as an adapter hardware error.
3345 */
7e56aa25 3346 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3347 portRspPut = le32_to_cpu(pgp->rspPutInx);
3348 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 3349 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 3350 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3351 return 1;
3352 }
45ed1190
JS
3353 if (phba->fcp_ring_in_use) {
3354 spin_unlock_irqrestore(&phba->hbalock, iflag);
3355 return 1;
3356 } else
3357 phba->fcp_ring_in_use = 1;
dea3101e 3358
3359 rmb();
7e56aa25 3360 while (pring->sli.sli3.rspidx != portRspPut) {
87f6eaff
JSEC
3361 /*
3362 * Fetch an entry off the ring and copy it into a local data
3363 * structure. The copy involves a byte-swap since the
3364 * network byte order and pci byte orders are different.
3365 */
ed957684 3366 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 3367 phba->last_completion_time = jiffies;
875fbdfe 3368
7e56aa25
JS
3369 if (++pring->sli.sli3.rspidx >= portRspMax)
3370 pring->sli.sli3.rspidx = 0;
875fbdfe 3371
87f6eaff
JSEC
3372 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3373 (uint32_t *) &rspiocbq.iocb,
ed957684 3374 phba->iocb_rsp_size);
a4bc3379 3375 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
3376 irsp = &rspiocbq.iocb;
3377
dea3101e 3378 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3379 pring->stats.iocb_rsp++;
3380 rsp_cmpl++;
3381
3382 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
3383 /*
3384 * If resource errors reported from HBA, reduce
3385 * queuedepths of the SCSI device.
3386 */
3387 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3388 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3389 IOERR_NO_RESOURCES)) {
92d7f7b0 3390 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3391 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
3392 spin_lock_irqsave(&phba->hbalock, iflag);
3393 }
3394
dea3101e 3395 /* Rsp ring <ringno> error: IOCB */
3396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 3397 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 3398 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 3399 pring->ringno,
92d7f7b0
JS
3400 irsp->un.ulpWord[0],
3401 irsp->un.ulpWord[1],
3402 irsp->un.ulpWord[2],
3403 irsp->un.ulpWord[3],
3404 irsp->un.ulpWord[4],
3405 irsp->un.ulpWord[5],
d7c255b2
JS
3406 *(uint32_t *)&irsp->un1,
3407 *((uint32_t *)&irsp->un1 + 1));
dea3101e 3408 }
3409
3410 switch (type) {
3411 case LPFC_ABORT_IOCB:
3412 case LPFC_SOL_IOCB:
3413 /*
3414 * Idle exchange closed via ABTS from port. No iocb
3415 * resources need to be recovered.
3416 */
3417 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 3418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3419 "0333 IOCB cmd 0x%x"
dca9479b 3420 " processed. Skipping"
92d7f7b0 3421 " completion\n",
dca9479b 3422 irsp->ulpCommand);
dea3101e 3423 break;
3424 }
3425
604a3e30
JB
3426 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3427 &rspiocbq);
0f65ff68
JS
3428 if (unlikely(!cmdiocbq))
3429 break;
3430 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3431 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3432 if (cmdiocbq->iocb_cmpl) {
3433 spin_unlock_irqrestore(&phba->hbalock, iflag);
3434 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3435 &rspiocbq);
3436 spin_lock_irqsave(&phba->hbalock, iflag);
3437 }
dea3101e 3438 break;
a4bc3379 3439 case LPFC_UNSOL_IOCB:
2e0fef85 3440 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 3441 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 3442 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 3443 break;
dea3101e 3444 default:
3445 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3446 char adaptermsg[LPFC_MAX_ADPTMSG];
3447 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3448 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3449 MAX_MSG_DATA);
898eb71c
JP
3450 dev_warn(&((phba->pcidev)->dev),
3451 "lpfc%d: %s\n",
dea3101e 3452 phba->brd_no, adaptermsg);
3453 } else {
3454 /* Unknown IOCB command */
3455 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3456 "0334 Unknown IOCB command "
92d7f7b0 3457 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 3458 type, irsp->ulpCommand,
92d7f7b0
JS
3459 irsp->ulpStatus,
3460 irsp->ulpIoTag,
3461 irsp->ulpContext);
dea3101e 3462 }
3463 break;
3464 }
3465
3466 /*
3467 * The response IOCB has been processed. Update the ring
3468 * pointer in SLIM. If the port response put pointer has not
3469 * been updated, sync the pgp->rspPutInx and fetch the new port
3470 * response put pointer.
3471 */
7e56aa25
JS
3472 writel(pring->sli.sli3.rspidx,
3473 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3474
7e56aa25 3475 if (pring->sli.sli3.rspidx == portRspPut)
dea3101e 3476 portRspPut = le32_to_cpu(pgp->rspPutInx);
3477 }
3478
3479 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3480 pring->stats.iocb_rsp_full++;
3481 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3482 writel(status, phba->CAregaddr);
3483 readl(phba->CAregaddr);
3484 }
3485 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3486 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3487 pring->stats.iocb_cmd_empty++;
3488
3489 /* Force update of the local copy of cmdGetInx */
7e56aa25 3490 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3491 lpfc_sli_resume_iocb(phba, pring);
3492
3493 if ((pring->lpfc_sli_cmd_available))
3494 (pring->lpfc_sli_cmd_available) (phba, pring);
3495
3496 }
3497
45ed1190 3498 phba->fcp_ring_in_use = 0;
2e0fef85 3499 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3500 return rc;
3501}
3502
e59058c4 3503/**
3772a991
JS
3504 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3505 * @phba: Pointer to HBA context object.
3506 * @pring: Pointer to driver SLI ring object.
3507 * @rspiocbp: Pointer to driver response IOCB object.
3508 *
3509 * This function is called from the worker thread when there is a slow-path
3510 * response IOCB to process. This function chains all the response iocbs until
3511 * seeing the iocb with the LE bit set. The function will call
3512 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3513 * completion of a command iocb. The function will call the
3514 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3515 * The function frees the resources or calls the completion handler if this
3516 * iocb is an abort completion. The function returns NULL when the response
3517 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3518 * this function shall chain the iocb on to the iocb_continueq and return the
3519 * response iocb passed in.
3520 **/
3521static struct lpfc_iocbq *
3522lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3523 struct lpfc_iocbq *rspiocbp)
3524{
3525 struct lpfc_iocbq *saveq;
3526 struct lpfc_iocbq *cmdiocbp;
3527 struct lpfc_iocbq *next_iocb;
3528 IOCB_t *irsp = NULL;
3529 uint32_t free_saveq;
3530 uint8_t iocb_cmd_type;
3531 lpfc_iocb_type type;
3532 unsigned long iflag;
3533 int rc;
3534
3535 spin_lock_irqsave(&phba->hbalock, iflag);
3536 /* First add the response iocb to the countinueq list */
3537 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3538 pring->iocb_continueq_cnt++;
3539
70f23fd6 3540 /* Now, determine whether the list is completed for processing */
3772a991
JS
3541 irsp = &rspiocbp->iocb;
3542 if (irsp->ulpLe) {
3543 /*
3544 * By default, the driver expects to free all resources
3545 * associated with this iocb completion.
3546 */
3547 free_saveq = 1;
3548 saveq = list_get_first(&pring->iocb_continueq,
3549 struct lpfc_iocbq, list);
3550 irsp = &(saveq->iocb);
3551 list_del_init(&pring->iocb_continueq);
3552 pring->iocb_continueq_cnt = 0;
3553
3554 pring->stats.iocb_rsp++;
3555
3556 /*
3557 * If resource errors reported from HBA, reduce
3558 * queuedepths of the SCSI device.
3559 */
3560 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
e3d2b802
JS
3561 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3562 IOERR_NO_RESOURCES)) {
3772a991
JS
3563 spin_unlock_irqrestore(&phba->hbalock, iflag);
3564 phba->lpfc_rampdown_queue_depth(phba);
3565 spin_lock_irqsave(&phba->hbalock, iflag);
3566 }
3567
3568 if (irsp->ulpStatus) {
3569 /* Rsp ring <ringno> error: IOCB */
3570 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3571 "0328 Rsp Ring %d error: "
3572 "IOCB Data: "
3573 "x%x x%x x%x x%x "
3574 "x%x x%x x%x x%x "
3575 "x%x x%x x%x x%x "
3576 "x%x x%x x%x x%x\n",
3577 pring->ringno,
3578 irsp->un.ulpWord[0],
3579 irsp->un.ulpWord[1],
3580 irsp->un.ulpWord[2],
3581 irsp->un.ulpWord[3],
3582 irsp->un.ulpWord[4],
3583 irsp->un.ulpWord[5],
3584 *(((uint32_t *) irsp) + 6),
3585 *(((uint32_t *) irsp) + 7),
3586 *(((uint32_t *) irsp) + 8),
3587 *(((uint32_t *) irsp) + 9),
3588 *(((uint32_t *) irsp) + 10),
3589 *(((uint32_t *) irsp) + 11),
3590 *(((uint32_t *) irsp) + 12),
3591 *(((uint32_t *) irsp) + 13),
3592 *(((uint32_t *) irsp) + 14),
3593 *(((uint32_t *) irsp) + 15));
3594 }
3595
3596 /*
3597 * Fetch the IOCB command type and call the correct completion
3598 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3599 * get freed back to the lpfc_iocb_list by the discovery
3600 * kernel thread.
3601 */
3602 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3603 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3604 switch (type) {
3605 case LPFC_SOL_IOCB:
3606 spin_unlock_irqrestore(&phba->hbalock, iflag);
3607 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3608 spin_lock_irqsave(&phba->hbalock, iflag);
3609 break;
3610
3611 case LPFC_UNSOL_IOCB:
3612 spin_unlock_irqrestore(&phba->hbalock, iflag);
3613 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3614 spin_lock_irqsave(&phba->hbalock, iflag);
3615 if (!rc)
3616 free_saveq = 0;
3617 break;
3618
3619 case LPFC_ABORT_IOCB:
3620 cmdiocbp = NULL;
3621 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3622 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3623 saveq);
3624 if (cmdiocbp) {
3625 /* Call the specified completion routine */
3626 if (cmdiocbp->iocb_cmpl) {
3627 spin_unlock_irqrestore(&phba->hbalock,
3628 iflag);
3629 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3630 saveq);
3631 spin_lock_irqsave(&phba->hbalock,
3632 iflag);
3633 } else
3634 __lpfc_sli_release_iocbq(phba,
3635 cmdiocbp);
3636 }
3637 break;
3638
3639 case LPFC_UNKNOWN_IOCB:
3640 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3641 char adaptermsg[LPFC_MAX_ADPTMSG];
3642 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3643 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3644 MAX_MSG_DATA);
3645 dev_warn(&((phba->pcidev)->dev),
3646 "lpfc%d: %s\n",
3647 phba->brd_no, adaptermsg);
3648 } else {
3649 /* Unknown IOCB command */
3650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3651 "0335 Unknown IOCB "
3652 "command Data: x%x "
3653 "x%x x%x x%x\n",
3654 irsp->ulpCommand,
3655 irsp->ulpStatus,
3656 irsp->ulpIoTag,
3657 irsp->ulpContext);
3658 }
3659 break;
3660 }
3661
3662 if (free_saveq) {
3663 list_for_each_entry_safe(rspiocbp, next_iocb,
3664 &saveq->list, list) {
61f35bff 3665 list_del_init(&rspiocbp->list);
3772a991
JS
3666 __lpfc_sli_release_iocbq(phba, rspiocbp);
3667 }
3668 __lpfc_sli_release_iocbq(phba, saveq);
3669 }
3670 rspiocbp = NULL;
3671 }
3672 spin_unlock_irqrestore(&phba->hbalock, iflag);
3673 return rspiocbp;
3674}
3675
3676/**
3677 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
3678 * @phba: Pointer to HBA context object.
3679 * @pring: Pointer to driver SLI ring object.
3680 * @mask: Host attention register mask for this ring.
3681 *
3772a991
JS
3682 * This routine wraps the actual slow_ring event process routine from the
3683 * API jump table function pointer from the lpfc_hba struct.
e59058c4 3684 **/
3772a991 3685void
2e0fef85
JS
3686lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3687 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
3688{
3689 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3690}
3691
3692/**
3693 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3694 * @phba: Pointer to HBA context object.
3695 * @pring: Pointer to driver SLI ring object.
3696 * @mask: Host attention register mask for this ring.
3697 *
3698 * This function is called from the worker thread when there is a ring event
3699 * for non-fcp rings. The caller does not hold any lock. The function will
3700 * remove each response iocb in the response ring and calls the handle
3701 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3702 **/
3703static void
3704lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3705 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 3706{
34b02dcd 3707 struct lpfc_pgp *pgp;
dea3101e 3708 IOCB_t *entry;
3709 IOCB_t *irsp = NULL;
3710 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 3711 uint32_t portRspPut, portRspMax;
dea3101e 3712 unsigned long iflag;
3772a991 3713 uint32_t status;
dea3101e 3714
34b02dcd 3715 pgp = &phba->port_gp[pring->ringno];
2e0fef85 3716 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3717 pring->stats.iocb_event++;
3718
dea3101e 3719 /*
3720 * The next available response entry should never exceed the maximum
3721 * entries. If it does, treat it as an adapter hardware error.
3722 */
7e56aa25 3723 portRspMax = pring->sli.sli3.numRiocb;
dea3101e 3724 portRspPut = le32_to_cpu(pgp->rspPutInx);
3725 if (portRspPut >= portRspMax) {
3726 /*
025dfdaf 3727 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e 3728 * rsp ring <portRspMax>
3729 */
ed957684 3730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 3731 "0303 Ring %d handler: portRspPut %d "
025dfdaf 3732 "is bigger than rsp ring %d\n",
e8b62011 3733 pring->ringno, portRspPut, portRspMax);
dea3101e 3734
2e0fef85
JS
3735 phba->link_state = LPFC_HBA_ERROR;
3736 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 3737
3738 phba->work_hs = HS_FFER3;
3739 lpfc_handle_eratt(phba);
3740
3772a991 3741 return;
dea3101e 3742 }
3743
3744 rmb();
7e56aa25 3745 while (pring->sli.sli3.rspidx != portRspPut) {
dea3101e 3746 /*
3747 * Build a completion list and call the appropriate handler.
3748 * The process is to get the next available response iocb, get
3749 * a free iocb from the list, copy the response data into the
3750 * free iocb, insert to the continuation list, and update the
3751 * next response index to slim. This process makes response
3752 * iocb's in the ring available to DMA as fast as possible but
3753 * pays a penalty for a copy operation. Since the iocb is
3754 * only 32 bytes, this penalty is considered small relative to
3755 * the PCI reads for register values and a slim write. When
3756 * the ulpLe field is set, the entire Command has been
3757 * received.
3758 */
ed957684
JS
3759 entry = lpfc_resp_iocb(phba, pring);
3760
858c9f6c 3761 phba->last_completion_time = jiffies;
2e0fef85 3762 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 3763 if (rspiocbp == NULL) {
3764 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 3765 "completion.\n", __func__);
dea3101e 3766 break;
3767 }
3768
ed957684
JS
3769 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3770 phba->iocb_rsp_size);
dea3101e 3771 irsp = &rspiocbp->iocb;
3772
7e56aa25
JS
3773 if (++pring->sli.sli3.rspidx >= portRspMax)
3774 pring->sli.sli3.rspidx = 0;
dea3101e 3775
a58cbd52
JS
3776 if (pring->ringno == LPFC_ELS_RING) {
3777 lpfc_debugfs_slow_ring_trc(phba,
3778 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3779 *(((uint32_t *) irsp) + 4),
3780 *(((uint32_t *) irsp) + 6),
3781 *(((uint32_t *) irsp) + 7));
3782 }
3783
7e56aa25
JS
3784 writel(pring->sli.sli3.rspidx,
3785 &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 3786
3772a991
JS
3787 spin_unlock_irqrestore(&phba->hbalock, iflag);
3788 /* Handle the response IOCB */
3789 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3790 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 3791
3792 /*
3793 * If the port response put pointer has not been updated, sync
3794 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3795 * response put pointer.
3796 */
7e56aa25 3797 if (pring->sli.sli3.rspidx == portRspPut) {
dea3101e 3798 portRspPut = le32_to_cpu(pgp->rspPutInx);
3799 }
7e56aa25 3800 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea3101e 3801
92d7f7b0 3802 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e 3803 /* At least one response entry has been freed */
3804 pring->stats.iocb_rsp_full++;
3805 /* SET RxRE_RSP in Chip Att register */
3806 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3807 writel(status, phba->CAregaddr);
3808 readl(phba->CAregaddr); /* flush */
3809 }
3810 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3811 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3812 pring->stats.iocb_cmd_empty++;
3813
3814 /* Force update of the local copy of cmdGetInx */
7e56aa25 3815 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea3101e 3816 lpfc_sli_resume_iocb(phba, pring);
3817
3818 if ((pring->lpfc_sli_cmd_available))
3819 (pring->lpfc_sli_cmd_available) (phba, pring);
3820
3821 }
3822
2e0fef85 3823 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 3824 return;
dea3101e 3825}
3826
4f774513
JS
3827/**
3828 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3829 * @phba: Pointer to HBA context object.
3830 * @pring: Pointer to driver SLI ring object.
3831 * @mask: Host attention register mask for this ring.
3832 *
3833 * This function is called from the worker thread when there is a pending
3834 * ELS response iocb on the driver internal slow-path response iocb worker
3835 * queue. The caller does not hold any lock. The function will remove each
3836 * response iocb from the response worker queue and calls the handle
3837 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3838 **/
3839static void
3840lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3841 struct lpfc_sli_ring *pring, uint32_t mask)
3842{
3843 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
3844 struct hbq_dmabuf *dmabuf;
3845 struct lpfc_cq_event *cq_event;
4f774513 3846 unsigned long iflag;
0ef01a2d 3847 int count = 0;
4f774513 3848
45ed1190
JS
3849 spin_lock_irqsave(&phba->hbalock, iflag);
3850 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3851 spin_unlock_irqrestore(&phba->hbalock, iflag);
3852 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
3853 /* Get the response iocb from the head of work queue */
3854 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 3855 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 3856 cq_event, struct lpfc_cq_event, list);
4f774513 3857 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
3858
3859 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3860 case CQE_CODE_COMPL_WQE:
3861 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3862 cq_event);
45ed1190
JS
3863 /* Translate ELS WCQE to response IOCBQ */
3864 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3865 irspiocbq);
3866 if (irspiocbq)
3867 lpfc_sli_sp_handle_rspiocb(phba, pring,
3868 irspiocbq);
0ef01a2d 3869 count++;
4d9ab994
JS
3870 break;
3871 case CQE_CODE_RECEIVE:
7851fe2c 3872 case CQE_CODE_RECEIVE_V1:
4d9ab994
JS
3873 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3874 cq_event);
3875 lpfc_sli4_handle_received_buffer(phba, dmabuf);
0ef01a2d 3876 count++;
4d9ab994
JS
3877 break;
3878 default:
3879 break;
3880 }
0ef01a2d
JS
3881
3882 /* Limit the number of events to 64 to avoid soft lockups */
3883 if (count == 64)
3884 break;
4f774513
JS
3885 }
3886}
3887
e59058c4 3888/**
3621a710 3889 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
3890 * @phba: Pointer to HBA context object.
3891 * @pring: Pointer to driver SLI ring object.
3892 *
3893 * This function aborts all iocbs in the given ring and frees all the iocb
3894 * objects in txq. This function issues an abort iocb for all the iocb commands
3895 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3896 * the return of this function. The caller is not required to hold any locks.
3897 **/
2e0fef85 3898void
dea3101e 3899lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3900{
2534ba75 3901 LIST_HEAD(completions);
dea3101e 3902 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 3903
92d7f7b0
JS
3904 if (pring->ringno == LPFC_ELS_RING) {
3905 lpfc_fabric_abort_hba(phba);
3906 }
3907
dea3101e 3908 /* Error everything on txq and txcmplq
3909 * First do the txq.
3910 */
db55fba8
JS
3911 if (phba->sli_rev >= LPFC_SLI_REV4) {
3912 spin_lock_irq(&pring->ring_lock);
3913 list_splice_init(&pring->txq, &completions);
3914 pring->txq_cnt = 0;
3915 spin_unlock_irq(&pring->ring_lock);
dea3101e 3916
db55fba8
JS
3917 spin_lock_irq(&phba->hbalock);
3918 /* Next issue ABTS for everything on the txcmplq */
3919 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3920 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3921 spin_unlock_irq(&phba->hbalock);
3922 } else {
3923 spin_lock_irq(&phba->hbalock);
3924 list_splice_init(&pring->txq, &completions);
3925 pring->txq_cnt = 0;
dea3101e 3926
db55fba8
JS
3927 /* Next issue ABTS for everything on the txcmplq */
3928 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3929 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3930 spin_unlock_irq(&phba->hbalock);
3931 }
dea3101e 3932
a257bf90
JS
3933 /* Cancel all the IOCBs from the completions list */
3934 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3935 IOERR_SLI_ABORTED);
dea3101e 3936}
3937
895427bd
JS
3938/**
3939 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3940 * @phba: Pointer to HBA context object.
3941 * @pring: Pointer to driver SLI ring object.
3942 *
3943 * This function aborts all iocbs in the given ring and frees all the iocb
3944 * objects in txq. This function issues an abort iocb for all the iocb commands
3945 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3946 * the return of this function. The caller is not required to hold any locks.
3947 **/
3948void
3949lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3950{
3951 LIST_HEAD(completions);
3952 struct lpfc_iocbq *iocb, *next_iocb;
3953
3954 if (pring->ringno == LPFC_ELS_RING)
3955 lpfc_fabric_abort_hba(phba);
3956
3957 spin_lock_irq(&phba->hbalock);
3958 /* Next issue ABTS for everything on the txcmplq */
3959 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3960 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3961 spin_unlock_irq(&phba->hbalock);
3962}
3963
3964
db55fba8
JS
3965/**
3966 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3967 * @phba: Pointer to HBA context object.
3968 * @pring: Pointer to driver SLI ring object.
3969 *
3970 * This function aborts all iocbs in FCP rings and frees all the iocb
3971 * objects in txq. This function issues an abort iocb for all the iocb commands
3972 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3973 * the return of this function. The caller is not required to hold any locks.
3974 **/
3975void
3976lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3977{
3978 struct lpfc_sli *psli = &phba->sli;
3979 struct lpfc_sli_ring *pring;
3980 uint32_t i;
3981
3982 /* Look on all the FCP Rings for the iotag */
3983 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
3984 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3985 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
3986 lpfc_sli_abort_iocb_ring(phba, pring);
3987 }
3988 } else {
895427bd 3989 pring = &psli->sli3_ring[LPFC_FCP_RING];
db55fba8
JS
3990 lpfc_sli_abort_iocb_ring(phba, pring);
3991 }
3992}
3993
895427bd
JS
3994/**
3995 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3996 * @phba: Pointer to HBA context object.
3997 *
3998 * This function aborts all wqes in NVME rings. This function issues an
3999 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
4000 * the txcmplq is not guaranteed to complete before the return of this
4001 * function. The caller is not required to hold any locks.
4002 **/
4003void
4004lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
4005{
4006 struct lpfc_sli_ring *pring;
4007 uint32_t i;
4008
cdb42bec
JS
4009 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4010 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
895427bd
JS
4011 return;
4012
4013 /* Abort all IO on each NVME ring. */
cdb42bec
JS
4014 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4015 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
895427bd
JS
4016 lpfc_sli_abort_wqe_ring(phba, pring);
4017 }
4018}
4019
db55fba8 4020
a8e497d5 4021/**
3621a710 4022 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
4023 * @phba: Pointer to HBA context object.
4024 *
4025 * This function flushes all iocbs in the fcp ring and frees all the iocb
4026 * objects in txq and txcmplq. This function will not issue abort iocbs
4027 * for all the iocb commands in txcmplq, they will just be returned with
4028 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4029 * slot has been permanently disabled.
4030 **/
4031void
4032lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4033{
4034 LIST_HEAD(txq);
4035 LIST_HEAD(txcmplq);
a8e497d5
JS
4036 struct lpfc_sli *psli = &phba->sli;
4037 struct lpfc_sli_ring *pring;
db55fba8 4038 uint32_t i;
c1dd9111 4039 struct lpfc_iocbq *piocb, *next_iocb;
a8e497d5
JS
4040
4041 spin_lock_irq(&phba->hbalock);
4f2e66c6
JS
4042 /* Indicate the I/O queues are flushed */
4043 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
a8e497d5
JS
4044 spin_unlock_irq(&phba->hbalock);
4045
db55fba8
JS
4046 /* Look on all the FCP Rings for the iotag */
4047 if (phba->sli_rev >= LPFC_SLI_REV4) {
cdb42bec
JS
4048 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4049 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
db55fba8
JS
4050
4051 spin_lock_irq(&pring->ring_lock);
4052 /* Retrieve everything on txq */
4053 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4054 list_for_each_entry_safe(piocb, next_iocb,
4055 &pring->txcmplq, list)
4056 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4057 /* Retrieve everything on the txcmplq */
4058 list_splice_init(&pring->txcmplq, &txcmplq);
4059 pring->txq_cnt = 0;
4060 pring->txcmplq_cnt = 0;
4061 spin_unlock_irq(&pring->ring_lock);
4062
4063 /* Flush the txq */
4064 lpfc_sli_cancel_iocbs(phba, &txq,
4065 IOSTAT_LOCAL_REJECT,
4066 IOERR_SLI_DOWN);
4067 /* Flush the txcmpq */
4068 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4069 IOSTAT_LOCAL_REJECT,
4070 IOERR_SLI_DOWN);
4071 }
4072 } else {
895427bd 4073 pring = &psli->sli3_ring[LPFC_FCP_RING];
a8e497d5 4074
db55fba8
JS
4075 spin_lock_irq(&phba->hbalock);
4076 /* Retrieve everything on txq */
4077 list_splice_init(&pring->txq, &txq);
c1dd9111
JS
4078 list_for_each_entry_safe(piocb, next_iocb,
4079 &pring->txcmplq, list)
4080 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
db55fba8
JS
4081 /* Retrieve everything on the txcmplq */
4082 list_splice_init(&pring->txcmplq, &txcmplq);
4083 pring->txq_cnt = 0;
4084 pring->txcmplq_cnt = 0;
4085 spin_unlock_irq(&phba->hbalock);
4086
4087 /* Flush the txq */
4088 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4089 IOERR_SLI_DOWN);
4090 /* Flush the txcmpq */
4091 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4092 IOERR_SLI_DOWN);
4093 }
a8e497d5
JS
4094}
4095
895427bd
JS
4096/**
4097 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4098 * @phba: Pointer to HBA context object.
4099 *
4100 * This function flushes all wqes in the nvme rings and frees all resources
4101 * in the txcmplq. This function does not issue abort wqes for the IO
4102 * commands in txcmplq, they will just be returned with
4103 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4104 * slot has been permanently disabled.
4105 **/
4106void
4107lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4108{
4109 LIST_HEAD(txcmplq);
4110 struct lpfc_sli_ring *pring;
4111 uint32_t i;
c1dd9111 4112 struct lpfc_iocbq *piocb, *next_iocb;
895427bd 4113
cdb42bec
JS
4114 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4115 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
895427bd
JS
4116 return;
4117
4118 /* Hint to other driver operations that a flush is in progress. */
4119 spin_lock_irq(&phba->hbalock);
4120 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4121 spin_unlock_irq(&phba->hbalock);
4122
4123 /* Cycle through all NVME rings and complete each IO with
4124 * a local driver reason code. This is a flush so no
4125 * abort exchange to FW.
4126 */
cdb42bec
JS
4127 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4128 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
895427bd 4129
895427bd 4130 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
4131 list_for_each_entry_safe(piocb, next_iocb,
4132 &pring->txcmplq, list)
4133 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4134 /* Retrieve everything on the txcmplq */
895427bd
JS
4135 list_splice_init(&pring->txcmplq, &txcmplq);
4136 pring->txcmplq_cnt = 0;
4137 spin_unlock_irq(&pring->ring_lock);
4138
4139 /* Flush the txcmpq &&&PAE */
4140 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4141 IOSTAT_LOCAL_REJECT,
4142 IOERR_SLI_DOWN);
4143 }
4144}
4145
e59058c4 4146/**
3772a991 4147 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
4148 * @phba: Pointer to HBA context object.
4149 * @mask: Bit mask to be checked.
4150 *
4151 * This function reads the host status register and compares
4152 * with the provided bit mask to check if HBA completed
4153 * the restart. This function will wait in a loop for the
4154 * HBA to complete restart. If the HBA does not restart within
4155 * 15 iterations, the function will reset the HBA again. The
4156 * function returns 1 when HBA fail to restart otherwise returns
4157 * zero.
4158 **/
3772a991
JS
4159static int
4160lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 4161{
41415862
JW
4162 uint32_t status;
4163 int i = 0;
4164 int retval = 0;
dea3101e 4165
41415862 4166 /* Read the HBA Host Status Register */
9940b97b
JS
4167 if (lpfc_readl(phba->HSregaddr, &status))
4168 return 1;
dea3101e 4169
41415862
JW
4170 /*
4171 * Check status register every 100ms for 5 retries, then every
4172 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4173 * every 2.5 sec for 4.
4174 * Break our of the loop if errors occurred during init.
4175 */
4176 while (((status & mask) != mask) &&
4177 !(status & HS_FFERM) &&
4178 i++ < 20) {
dea3101e 4179
41415862
JW
4180 if (i <= 5)
4181 msleep(10);
4182 else if (i <= 10)
4183 msleep(500);
4184 else
4185 msleep(2500);
dea3101e 4186
41415862 4187 if (i == 15) {
2e0fef85 4188 /* Do post */
92d7f7b0 4189 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
4190 lpfc_sli_brdrestart(phba);
4191 }
4192 /* Read the HBA Host Status Register */
9940b97b
JS
4193 if (lpfc_readl(phba->HSregaddr, &status)) {
4194 retval = 1;
4195 break;
4196 }
41415862 4197 }
dea3101e 4198
41415862
JW
4199 /* Check to see if any errors occurred during init */
4200 if ((status & HS_FFERM) || (i >= 20)) {
e40a02c1
JS
4201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4202 "2751 Adapter failed to restart, "
4203 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4204 status,
4205 readl(phba->MBslimaddr + 0xa8),
4206 readl(phba->MBslimaddr + 0xac));
2e0fef85 4207 phba->link_state = LPFC_HBA_ERROR;
41415862 4208 retval = 1;
dea3101e 4209 }
dea3101e 4210
41415862
JW
4211 return retval;
4212}
dea3101e 4213
da0436e9
JS
4214/**
4215 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4216 * @phba: Pointer to HBA context object.
4217 * @mask: Bit mask to be checked.
4218 *
4219 * This function checks the host status register to check if HBA is
4220 * ready. This function will wait in a loop for the HBA to be ready
4221 * If the HBA is not ready , the function will will reset the HBA PCI
4222 * function again. The function returns 1 when HBA fail to be ready
4223 * otherwise returns zero.
4224 **/
4225static int
4226lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4227{
4228 uint32_t status;
4229 int retval = 0;
4230
4231 /* Read the HBA Host Status Register */
4232 status = lpfc_sli4_post_status_check(phba);
4233
4234 if (status) {
4235 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4236 lpfc_sli_brdrestart(phba);
4237 status = lpfc_sli4_post_status_check(phba);
4238 }
4239
4240 /* Check to see if any errors occurred during init */
4241 if (status) {
4242 phba->link_state = LPFC_HBA_ERROR;
4243 retval = 1;
4244 } else
4245 phba->sli4_hba.intr_enable = 0;
4246
4247 return retval;
4248}
4249
4250/**
4251 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4252 * @phba: Pointer to HBA context object.
4253 * @mask: Bit mask to be checked.
4254 *
4255 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4256 * from the API jump table function pointer from the lpfc_hba struct.
4257 **/
4258int
4259lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4260{
4261 return phba->lpfc_sli_brdready(phba, mask);
4262}
4263
9290831f
JS
4264#define BARRIER_TEST_PATTERN (0xdeadbeef)
4265
e59058c4 4266/**
3621a710 4267 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
4268 * @phba: Pointer to HBA context object.
4269 *
1b51197d
JS
4270 * This function is called before resetting an HBA. This function is called
4271 * with hbalock held and requests HBA to quiesce DMAs before a reset.
e59058c4 4272 **/
2e0fef85 4273void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 4274{
65a29c16
JS
4275 uint32_t __iomem *resp_buf;
4276 uint32_t __iomem *mbox_buf;
9290831f 4277 volatile uint32_t mbox;
9940b97b 4278 uint32_t hc_copy, ha_copy, resp_data;
9290831f
JS
4279 int i;
4280 uint8_t hdrtype;
4281
1c2ba475
JT
4282 lockdep_assert_held(&phba->hbalock);
4283
9290831f
JS
4284 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4285 if (hdrtype != 0x80 ||
4286 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4287 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4288 return;
4289
4290 /*
4291 * Tell the other part of the chip to suspend temporarily all
4292 * its DMA activity.
4293 */
65a29c16 4294 resp_buf = phba->MBslimaddr;
9290831f
JS
4295
4296 /* Disable the error attention */
9940b97b
JS
4297 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4298 return;
9290831f
JS
4299 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4300 readl(phba->HCregaddr); /* flush */
2e0fef85 4301 phba->link_flag |= LS_IGNORE_ERATT;
9290831f 4302
9940b97b
JS
4303 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4304 return;
4305 if (ha_copy & HA_ERATT) {
9290831f
JS
4306 /* Clear Chip error bit */
4307 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4308 phba->pport->stopped = 1;
9290831f
JS
4309 }
4310
4311 mbox = 0;
4312 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4313 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4314
4315 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 4316 mbox_buf = phba->MBslimaddr;
9290831f
JS
4317 writel(mbox, mbox_buf);
4318
9940b97b
JS
4319 for (i = 0; i < 50; i++) {
4320 if (lpfc_readl((resp_buf + 1), &resp_data))
4321 return;
4322 if (resp_data != ~(BARRIER_TEST_PATTERN))
4323 mdelay(1);
4324 else
4325 break;
4326 }
4327 resp_data = 0;
4328 if (lpfc_readl((resp_buf + 1), &resp_data))
4329 return;
4330 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 4331 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 4332 phba->pport->stopped)
9290831f
JS
4333 goto restore_hc;
4334 else
4335 goto clear_errat;
4336 }
4337
4338 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
9940b97b
JS
4339 resp_data = 0;
4340 for (i = 0; i < 500; i++) {
4341 if (lpfc_readl(resp_buf, &resp_data))
4342 return;
4343 if (resp_data != mbox)
4344 mdelay(1);
4345 else
4346 break;
4347 }
9290831f
JS
4348
4349clear_errat:
4350
9940b97b
JS
4351 while (++i < 500) {
4352 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4353 return;
4354 if (!(ha_copy & HA_ERATT))
4355 mdelay(1);
4356 else
4357 break;
4358 }
9290831f
JS
4359
4360 if (readl(phba->HAregaddr) & HA_ERATT) {
4361 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4362 phba->pport->stopped = 1;
9290831f
JS
4363 }
4364
4365restore_hc:
2e0fef85 4366 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
4367 writel(hc_copy, phba->HCregaddr);
4368 readl(phba->HCregaddr); /* flush */
4369}
4370
e59058c4 4371/**
3621a710 4372 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
4373 * @phba: Pointer to HBA context object.
4374 *
4375 * This function issues a kill_board mailbox command and waits for
4376 * the error attention interrupt. This function is called for stopping
4377 * the firmware processing. The caller is not required to hold any
4378 * locks. This function calls lpfc_hba_down_post function to free
4379 * any pending commands after the kill. The function will return 1 when it
4380 * fails to kill the board else will return 0.
4381 **/
41415862 4382int
2e0fef85 4383lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
4384{
4385 struct lpfc_sli *psli;
4386 LPFC_MBOXQ_t *pmb;
4387 uint32_t status;
4388 uint32_t ha_copy;
4389 int retval;
4390 int i = 0;
dea3101e 4391
41415862 4392 psli = &phba->sli;
dea3101e 4393
41415862 4394 /* Kill HBA */
ed957684 4395 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
4396 "0329 Kill HBA Data: x%x x%x\n",
4397 phba->pport->port_state, psli->sli_flag);
41415862 4398
98c9ea5c
JS
4399 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4400 if (!pmb)
41415862 4401 return 1;
41415862
JW
4402
4403 /* Disable the error attention */
2e0fef85 4404 spin_lock_irq(&phba->hbalock);
9940b97b
JS
4405 if (lpfc_readl(phba->HCregaddr, &status)) {
4406 spin_unlock_irq(&phba->hbalock);
4407 mempool_free(pmb, phba->mbox_mem_pool);
4408 return 1;
4409 }
41415862
JW
4410 status &= ~HC_ERINT_ENA;
4411 writel(status, phba->HCregaddr);
4412 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
4413 phba->link_flag |= LS_IGNORE_ERATT;
4414 spin_unlock_irq(&phba->hbalock);
41415862
JW
4415
4416 lpfc_kill_board(phba, pmb);
4417 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4418 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4419
4420 if (retval != MBX_SUCCESS) {
4421 if (retval != MBX_BUSY)
4422 mempool_free(pmb, phba->mbox_mem_pool);
e40a02c1
JS
4423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4424 "2752 KILL_BOARD command failed retval %d\n",
4425 retval);
2e0fef85
JS
4426 spin_lock_irq(&phba->hbalock);
4427 phba->link_flag &= ~LS_IGNORE_ERATT;
4428 spin_unlock_irq(&phba->hbalock);
41415862
JW
4429 return 1;
4430 }
4431
f4b4c68f
JS
4432 spin_lock_irq(&phba->hbalock);
4433 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4434 spin_unlock_irq(&phba->hbalock);
9290831f 4435
41415862
JW
4436 mempool_free(pmb, phba->mbox_mem_pool);
4437
4438 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4439 * attention every 100ms for 3 seconds. If we don't get ERATT after
4440 * 3 seconds we still set HBA_ERROR state because the status of the
4441 * board is now undefined.
4442 */
9940b97b
JS
4443 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4444 return 1;
41415862
JW
4445 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4446 mdelay(100);
9940b97b
JS
4447 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4448 return 1;
41415862
JW
4449 }
4450
4451 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
4452 if (ha_copy & HA_ERATT) {
4453 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 4454 phba->pport->stopped = 1;
9290831f 4455 }
2e0fef85 4456 spin_lock_irq(&phba->hbalock);
41415862 4457 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 4458 psli->mbox_active = NULL;
2e0fef85
JS
4459 phba->link_flag &= ~LS_IGNORE_ERATT;
4460 spin_unlock_irq(&phba->hbalock);
41415862 4461
41415862 4462 lpfc_hba_down_post(phba);
2e0fef85 4463 phba->link_state = LPFC_HBA_ERROR;
41415862 4464
2e0fef85 4465 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e 4466}
4467
e59058c4 4468/**
3772a991 4469 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
4470 * @phba: Pointer to HBA context object.
4471 *
4472 * This function resets the HBA by writing HC_INITFF to the control
4473 * register. After the HBA resets, this function resets all the iocb ring
4474 * indices. This function disables PCI layer parity checking during
4475 * the reset.
4476 * This function returns 0 always.
4477 * The caller is not required to hold any locks.
4478 **/
41415862 4479int
2e0fef85 4480lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 4481{
41415862 4482 struct lpfc_sli *psli;
dea3101e 4483 struct lpfc_sli_ring *pring;
41415862 4484 uint16_t cfg_value;
dea3101e 4485 int i;
dea3101e 4486
41415862 4487 psli = &phba->sli;
dea3101e 4488
41415862
JW
4489 /* Reset HBA */
4490 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4491 "0325 Reset HBA Data: x%x x%x\n",
4492b739
JS
4492 (phba->pport) ? phba->pport->port_state : 0,
4493 psli->sli_flag);
dea3101e 4494
4495 /* perform board reset */
4496 phba->fc_eventTag = 0;
4d9ab994 4497 phba->link_events = 0;
4492b739
JS
4498 if (phba->pport) {
4499 phba->pport->fc_myDID = 0;
4500 phba->pport->fc_prevDID = 0;
4501 }
dea3101e 4502
41415862
JW
4503 /* Turn off parity checking and serr during the physical reset */
4504 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4505 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4506 (cfg_value &
4507 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4508
3772a991
JS
4509 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4510
41415862
JW
4511 /* Now toggle INITFF bit in the Host Control Register */
4512 writel(HC_INITFF, phba->HCregaddr);
4513 mdelay(1);
4514 readl(phba->HCregaddr); /* flush */
4515 writel(0, phba->HCregaddr);
4516 readl(phba->HCregaddr); /* flush */
4517
4518 /* Restore PCI cmd register */
4519 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e 4520
4521 /* Initialize relevant SLI info */
41415862 4522 for (i = 0; i < psli->num_rings; i++) {
895427bd 4523 pring = &psli->sli3_ring[i];
dea3101e 4524 pring->flag = 0;
7e56aa25
JS
4525 pring->sli.sli3.rspidx = 0;
4526 pring->sli.sli3.next_cmdidx = 0;
4527 pring->sli.sli3.local_getidx = 0;
4528 pring->sli.sli3.cmdidx = 0;
dea3101e 4529 pring->missbufcnt = 0;
4530 }
dea3101e 4531
2e0fef85 4532 phba->link_state = LPFC_WARM_START;
41415862
JW
4533 return 0;
4534}
4535
e59058c4 4536/**
da0436e9
JS
4537 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4538 * @phba: Pointer to HBA context object.
4539 *
4540 * This function resets a SLI4 HBA. This function disables PCI layer parity
4541 * checking during resets the device. The caller is not required to hold
4542 * any locks.
4543 *
4544 * This function returns 0 always.
4545 **/
4546int
4547lpfc_sli4_brdreset(struct lpfc_hba *phba)
4548{
4549 struct lpfc_sli *psli = &phba->sli;
4550 uint16_t cfg_value;
0293635e 4551 int rc = 0;
da0436e9
JS
4552
4553 /* Reset HBA */
4554 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0293635e
JS
4555 "0295 Reset HBA Data: x%x x%x x%x\n",
4556 phba->pport->port_state, psli->sli_flag,
4557 phba->hba_flag);
da0436e9
JS
4558
4559 /* perform board reset */
4560 phba->fc_eventTag = 0;
4d9ab994 4561 phba->link_events = 0;
da0436e9
JS
4562 phba->pport->fc_myDID = 0;
4563 phba->pport->fc_prevDID = 0;
4564
da0436e9
JS
4565 spin_lock_irq(&phba->hbalock);
4566 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4567 phba->fcf.fcf_flag = 0;
da0436e9
JS
4568 spin_unlock_irq(&phba->hbalock);
4569
0293635e
JS
4570 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4571 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4572 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4573 return rc;
4574 }
4575
da0436e9
JS
4576 /* Now physically reset the device */
4577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4578 "0389 Performing PCI function reset!\n");
be858b65
JS
4579
4580 /* Turn off parity checking and serr during the physical reset */
4581 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4582 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4583 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4584
88318816 4585 /* Perform FCoE PCI function reset before freeing queue memory */
27b01b82 4586 rc = lpfc_pci_function_reset(phba);
da0436e9 4587
be858b65
JS
4588 /* Restore PCI cmd register */
4589 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4590
27b01b82 4591 return rc;
da0436e9
JS
4592}
4593
4594/**
4595 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
4596 * @phba: Pointer to HBA context object.
4597 *
4598 * This function is called in the SLI initialization code path to
4599 * restart the HBA. The caller is not required to hold any lock.
4600 * This function writes MBX_RESTART mailbox command to the SLIM and
4601 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4602 * function to free any pending commands. The function enables
4603 * POST only during the first initialization. The function returns zero.
4604 * The function does not guarantee completion of MBX_RESTART mailbox
4605 * command before the return of this function.
4606 **/
da0436e9
JS
4607static int
4608lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
4609{
4610 MAILBOX_t *mb;
4611 struct lpfc_sli *psli;
41415862
JW
4612 volatile uint32_t word0;
4613 void __iomem *to_slim;
0d878419 4614 uint32_t hba_aer_enabled;
41415862 4615
2e0fef85 4616 spin_lock_irq(&phba->hbalock);
41415862 4617
0d878419
JS
4618 /* Take PCIe device Advanced Error Reporting (AER) state */
4619 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4620
41415862
JW
4621 psli = &phba->sli;
4622
4623 /* Restart HBA */
4624 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 4625 "0337 Restart HBA Data: x%x x%x\n",
4492b739
JS
4626 (phba->pport) ? phba->pport->port_state : 0,
4627 psli->sli_flag);
41415862
JW
4628
4629 word0 = 0;
4630 mb = (MAILBOX_t *) &word0;
4631 mb->mbxCommand = MBX_RESTART;
4632 mb->mbxHc = 1;
4633
9290831f
JS
4634 lpfc_reset_barrier(phba);
4635
41415862
JW
4636 to_slim = phba->MBslimaddr;
4637 writel(*(uint32_t *) mb, to_slim);
4638 readl(to_slim); /* flush */
4639
4640 /* Only skip post after fc_ffinit is completed */
4492b739 4641 if (phba->pport && phba->pport->port_state)
41415862 4642 word0 = 1; /* This is really setting up word1 */
eaf15d5b 4643 else
41415862 4644 word0 = 0; /* This is really setting up word1 */
65a29c16 4645 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
4646 writel(*(uint32_t *) mb, to_slim);
4647 readl(to_slim); /* flush */
dea3101e 4648
41415862 4649 lpfc_sli_brdreset(phba);
4492b739
JS
4650 if (phba->pport)
4651 phba->pport->stopped = 0;
2e0fef85 4652 phba->link_state = LPFC_INIT_START;
da0436e9 4653 phba->hba_flag = 0;
2e0fef85 4654 spin_unlock_irq(&phba->hbalock);
41415862 4655
64ba8818 4656 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4657 psli->stats_start = ktime_get_seconds();
64ba8818 4658
eaf15d5b
JS
4659 /* Give the INITFF and Post time to settle. */
4660 mdelay(100);
41415862 4661
0d878419
JS
4662 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4663 if (hba_aer_enabled)
4664 pci_disable_pcie_error_reporting(phba->pcidev);
4665
41415862 4666 lpfc_hba_down_post(phba);
dea3101e 4667
4668 return 0;
4669}
4670
da0436e9
JS
4671/**
4672 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4673 * @phba: Pointer to HBA context object.
4674 *
4675 * This function is called in the SLI initialization code path to restart
4676 * a SLI4 HBA. The caller is not required to hold any lock.
4677 * At the end of the function, it calls lpfc_hba_down_post function to
4678 * free any pending commands.
4679 **/
4680static int
4681lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4682{
4683 struct lpfc_sli *psli = &phba->sli;
75baf696 4684 uint32_t hba_aer_enabled;
27b01b82 4685 int rc;
da0436e9
JS
4686
4687 /* Restart HBA */
4688 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4689 "0296 Restart HBA Data: x%x x%x\n",
4690 phba->pport->port_state, psli->sli_flag);
4691
75baf696
JS
4692 /* Take PCIe device Advanced Error Reporting (AER) state */
4693 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4694
27b01b82 4695 rc = lpfc_sli4_brdreset(phba);
5a9eeff5
JS
4696 if (rc)
4697 return rc;
da0436e9
JS
4698
4699 spin_lock_irq(&phba->hbalock);
4700 phba->pport->stopped = 0;
4701 phba->link_state = LPFC_INIT_START;
4702 phba->hba_flag = 0;
4703 spin_unlock_irq(&phba->hbalock);
4704
4705 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
c4d6204d 4706 psli->stats_start = ktime_get_seconds();
da0436e9 4707
75baf696
JS
4708 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4709 if (hba_aer_enabled)
4710 pci_disable_pcie_error_reporting(phba->pcidev);
4711
da0436e9 4712 lpfc_hba_down_post(phba);
569dbe84 4713 lpfc_sli4_queue_destroy(phba);
da0436e9 4714
27b01b82 4715 return rc;
da0436e9
JS
4716}
4717
4718/**
4719 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4720 * @phba: Pointer to HBA context object.
4721 *
4722 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4723 * API jump table function pointer from the lpfc_hba struct.
4724**/
4725int
4726lpfc_sli_brdrestart(struct lpfc_hba *phba)
4727{
4728 return phba->lpfc_sli_brdrestart(phba);
4729}
4730
e59058c4 4731/**
3621a710 4732 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
4733 * @phba: Pointer to HBA context object.
4734 *
4735 * This function is called after a HBA restart to wait for successful
4736 * restart of the HBA. Successful restart of the HBA is indicated by
4737 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4738 * iteration, the function will restart the HBA again. The function returns
4739 * zero if HBA successfully restarted else returns negative error code.
4740 **/
4492b739 4741int
dea3101e 4742lpfc_sli_chipset_init(struct lpfc_hba *phba)
4743{
4744 uint32_t status, i = 0;
4745
4746 /* Read the HBA Host Status Register */
9940b97b
JS
4747 if (lpfc_readl(phba->HSregaddr, &status))
4748 return -EIO;
dea3101e 4749
4750 /* Check status register to see what current state is */
4751 i = 0;
4752 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4753
dcf2a4e0
JS
4754 /* Check every 10ms for 10 retries, then every 100ms for 90
4755 * retries, then every 1 sec for 50 retires for a total of
4756 * ~60 seconds before reset the board again and check every
4757 * 1 sec for 50 retries. The up to 60 seconds before the
4758 * board ready is required by the Falcon FIPS zeroization
4759 * complete, and any reset the board in between shall cause
4760 * restart of zeroization, further delay the board ready.
dea3101e 4761 */
dcf2a4e0 4762 if (i++ >= 200) {
dea3101e 4763 /* Adapter failed to init, timeout, status reg
4764 <status> */
ed957684 4765 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4766 "0436 Adapter failed to init, "
09372820
JS
4767 "timeout, status reg x%x, "
4768 "FW Data: A8 x%x AC x%x\n", status,
4769 readl(phba->MBslimaddr + 0xa8),
4770 readl(phba->MBslimaddr + 0xac));
2e0fef85 4771 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4772 return -ETIMEDOUT;
4773 }
4774
4775 /* Check to see if any errors occurred during init */
4776 if (status & HS_FFERM) {
4777 /* ERROR: During chipset initialization */
4778 /* Adapter failed to init, chipset, status reg
4779 <status> */
ed957684 4780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4781 "0437 Adapter failed to init, "
09372820
JS
4782 "chipset, status reg x%x, "
4783 "FW Data: A8 x%x AC x%x\n", status,
4784 readl(phba->MBslimaddr + 0xa8),
4785 readl(phba->MBslimaddr + 0xac));
2e0fef85 4786 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4787 return -EIO;
4788 }
4789
dcf2a4e0 4790 if (i <= 10)
dea3101e 4791 msleep(10);
dcf2a4e0
JS
4792 else if (i <= 100)
4793 msleep(100);
4794 else
4795 msleep(1000);
dea3101e 4796
dcf2a4e0
JS
4797 if (i == 150) {
4798 /* Do post */
92d7f7b0 4799 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4800 lpfc_sli_brdrestart(phba);
dea3101e 4801 }
4802 /* Read the HBA Host Status Register */
9940b97b
JS
4803 if (lpfc_readl(phba->HSregaddr, &status))
4804 return -EIO;
dea3101e 4805 }
4806
4807 /* Check to see if any errors occurred during init */
4808 if (status & HS_FFERM) {
4809 /* ERROR: During chipset initialization */
4810 /* Adapter failed to init, chipset, status reg <status> */
ed957684 4811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 4812 "0438 Adapter failed to init, chipset, "
09372820
JS
4813 "status reg x%x, "
4814 "FW Data: A8 x%x AC x%x\n", status,
4815 readl(phba->MBslimaddr + 0xa8),
4816 readl(phba->MBslimaddr + 0xac));
2e0fef85 4817 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4818 return -EIO;
4819 }
4820
4821 /* Clear all interrupt enable conditions */
4822 writel(0, phba->HCregaddr);
4823 readl(phba->HCregaddr); /* flush */
4824
4825 /* setup host attn register */
4826 writel(0xffffffff, phba->HAregaddr);
4827 readl(phba->HAregaddr); /* flush */
4828 return 0;
4829}
4830
e59058c4 4831/**
3621a710 4832 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
4833 *
4834 * This function calculates and returns the number of HBQs required to be
4835 * configured.
4836 **/
78b2d852 4837int
ed957684
JS
4838lpfc_sli_hbq_count(void)
4839{
92d7f7b0 4840 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
4841}
4842
e59058c4 4843/**
3621a710 4844 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
4845 *
4846 * This function adds the number of hbq entries in every HBQ to get
4847 * the total number of hbq entries required for the HBA and returns
4848 * the total count.
4849 **/
ed957684
JS
4850static int
4851lpfc_sli_hbq_entry_count(void)
4852{
4853 int hbq_count = lpfc_sli_hbq_count();
4854 int count = 0;
4855 int i;
4856
4857 for (i = 0; i < hbq_count; ++i)
92d7f7b0 4858 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
4859 return count;
4860}
4861
e59058c4 4862/**
3621a710 4863 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
4864 *
4865 * This function calculates amount of memory required for all hbq entries
4866 * to be configured and returns the total memory required.
4867 **/
dea3101e 4868int
ed957684
JS
4869lpfc_sli_hbq_size(void)
4870{
4871 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4872}
4873
e59058c4 4874/**
3621a710 4875 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
4876 * @phba: Pointer to HBA context object.
4877 *
4878 * This function is called during the SLI initialization to configure
4879 * all the HBQs and post buffers to the HBQ. The caller is not
4880 * required to hold any locks. This function will return zero if successful
4881 * else it will return negative error code.
4882 **/
ed957684
JS
4883static int
4884lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4885{
4886 int hbq_count = lpfc_sli_hbq_count();
4887 LPFC_MBOXQ_t *pmb;
4888 MAILBOX_t *pmbox;
4889 uint32_t hbqno;
4890 uint32_t hbq_entry_index;
ed957684 4891
92d7f7b0
JS
4892 /* Get a Mailbox buffer to setup mailbox
4893 * commands for HBA initialization
4894 */
ed957684
JS
4895 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4896
4897 if (!pmb)
4898 return -ENOMEM;
4899
04c68496 4900 pmbox = &pmb->u.mb;
ed957684
JS
4901
4902 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4903 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 4904 phba->hbq_in_use = 1;
ed957684
JS
4905
4906 hbq_entry_index = 0;
4907 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4908 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4909 phba->hbqs[hbqno].hbqPutIdx = 0;
4910 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4911 phba->hbqs[hbqno].entry_count =
92d7f7b0 4912 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
4913 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4914 hbq_entry_index, pmb);
ed957684
JS
4915 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4916
4917 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4918 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4919 mbxStatus <status>, ring <num> */
4920
4921 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4922 LOG_SLI | LOG_VPORT,
e8b62011 4923 "1805 Adapter failed to init. "
ed957684 4924 "Data: x%x x%x x%x\n",
e8b62011 4925 pmbox->mbxCommand,
ed957684
JS
4926 pmbox->mbxStatus, hbqno);
4927
4928 phba->link_state = LPFC_HBA_ERROR;
4929 mempool_free(pmb, phba->mbox_mem_pool);
6e7288d9 4930 return -ENXIO;
ed957684
JS
4931 }
4932 }
4933 phba->hbq_count = hbq_count;
4934
ed957684
JS
4935 mempool_free(pmb, phba->mbox_mem_pool);
4936
92d7f7b0 4937 /* Initially populate or replenish the HBQs */
d7c255b2
JS
4938 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4939 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
4940 return 0;
4941}
4942
4f774513
JS
4943/**
4944 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4945 * @phba: Pointer to HBA context object.
4946 *
4947 * This function is called during the SLI initialization to configure
4948 * all the HBQs and post buffers to the HBQ. The caller is not
4949 * required to hold any locks. This function will return zero if successful
4950 * else it will return negative error code.
4951 **/
4952static int
4953lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4954{
4955 phba->hbq_in_use = 1;
895427bd
JS
4956 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4957 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4f774513 4958 phba->hbq_count = 1;
895427bd 4959 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4f774513 4960 /* Initially populate or replenish the HBQs */
4f774513
JS
4961 return 0;
4962}
4963
e59058c4 4964/**
3621a710 4965 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
4966 * @phba: Pointer to HBA context object.
4967 * @sli_mode: sli mode - 2/3
4968 *
183b8021 4969 * This function is called by the sli initialization code path
e59058c4
JS
4970 * to issue config_port mailbox command. This function restarts the
4971 * HBA firmware and issues a config_port mailbox command to configure
4972 * the SLI interface in the sli mode specified by sli_mode
4973 * variable. The caller is not required to hold any locks.
4974 * The function returns 0 if successful, else returns negative error
4975 * code.
4976 **/
9399627f
JS
4977int
4978lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e 4979{
4980 LPFC_MBOXQ_t *pmb;
4981 uint32_t resetcount = 0, rc = 0, done = 0;
4982
4983 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4984 if (!pmb) {
2e0fef85 4985 phba->link_state = LPFC_HBA_ERROR;
dea3101e 4986 return -ENOMEM;
4987 }
4988
ed957684 4989 phba->sli_rev = sli_mode;
dea3101e 4990 while (resetcount < 2 && !done) {
2e0fef85 4991 spin_lock_irq(&phba->hbalock);
1c067a42 4992 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4993 spin_unlock_irq(&phba->hbalock);
92d7f7b0 4994 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 4995 lpfc_sli_brdrestart(phba);
dea3101e 4996 rc = lpfc_sli_chipset_init(phba);
4997 if (rc)
4998 break;
4999
2e0fef85 5000 spin_lock_irq(&phba->hbalock);
1c067a42 5001 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 5002 spin_unlock_irq(&phba->hbalock);
dea3101e 5003 resetcount++;
5004
ed957684
JS
5005 /* Call pre CONFIG_PORT mailbox command initialization. A
5006 * value of 0 means the call was successful. Any other
5007 * nonzero value is a failure, but if ERESTART is returned,
5008 * the driver may reset the HBA and try again.
5009 */
dea3101e 5010 rc = lpfc_config_port_prep(phba);
5011 if (rc == -ERESTART) {
ed957684 5012 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 5013 continue;
34b02dcd 5014 } else if (rc)
dea3101e 5015 break;
6d368e53 5016
2e0fef85 5017 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 5018 lpfc_config_port(phba, pmb);
5019 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
5020 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5021 LPFC_SLI3_HBQ_ENABLED |
5022 LPFC_SLI3_CRP_ENABLED |
bc73905a 5023 LPFC_SLI3_DSS_ENABLED);
ed957684 5024 if (rc != MBX_SUCCESS) {
dea3101e 5025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5026 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 5027 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 5028 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 5029 spin_lock_irq(&phba->hbalock);
04c68496 5030 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
5031 spin_unlock_irq(&phba->hbalock);
5032 rc = -ENXIO;
04c68496
JS
5033 } else {
5034 /* Allow asynchronous mailbox command to go through */
5035 spin_lock_irq(&phba->hbalock);
5036 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5037 spin_unlock_irq(&phba->hbalock);
ed957684 5038 done = 1;
cb69f7de
JS
5039
5040 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5041 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5042 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5043 "3110 Port did not grant ASABT\n");
04c68496 5044 }
dea3101e 5045 }
ed957684
JS
5046 if (!done) {
5047 rc = -EINVAL;
5048 goto do_prep_failed;
5049 }
04c68496
JS
5050 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5051 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
5052 rc = -ENXIO;
5053 goto do_prep_failed;
5054 }
04c68496 5055 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 5056 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
5057 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5058 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5059 phba->max_vpi : phba->max_vports;
5060
34b02dcd
JS
5061 } else
5062 phba->max_vpi = 0;
bc73905a
JS
5063 phba->fips_level = 0;
5064 phba->fips_spec_rev = 0;
5065 if (pmb->u.mb.un.varCfgPort.gdss) {
04c68496 5066 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
bc73905a
JS
5067 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5068 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5070 "2850 Security Crypto Active. FIPS x%d "
5071 "(Spec Rev: x%d)",
5072 phba->fips_level, phba->fips_spec_rev);
5073 }
5074 if (pmb->u.mb.un.varCfgPort.sec_err) {
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2856 Config Port Security Crypto "
5077 "Error: x%x ",
5078 pmb->u.mb.un.varCfgPort.sec_err);
5079 }
04c68496 5080 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 5081 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 5082 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 5083 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
6e7288d9
JS
5084
5085 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5086 phba->port_gp = phba->mbox->us.s3_pgp.port;
e2a0a9d6 5087
f44ac12f
JS
5088 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5089 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5090 phba->cfg_enable_bg = 0;
5091 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
e2a0a9d6
JS
5092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5093 "0443 Adapter did not grant "
5094 "BlockGuard\n");
f44ac12f 5095 }
e2a0a9d6 5096 }
34b02dcd 5097 } else {
8f34f4ce 5098 phba->hbq_get = NULL;
34b02dcd 5099 phba->port_gp = phba->mbox->us.s2.port;
d7c255b2 5100 phba->max_vpi = 0;
ed957684 5101 }
92d7f7b0 5102do_prep_failed:
ed957684
JS
5103 mempool_free(pmb, phba->mbox_mem_pool);
5104 return rc;
5105}
5106
e59058c4
JS
5107
5108/**
183b8021 5109 * lpfc_sli_hba_setup - SLI initialization function
e59058c4
JS
5110 * @phba: Pointer to HBA context object.
5111 *
183b8021
MY
5112 * This function is the main SLI initialization function. This function
5113 * is called by the HBA initialization code, HBA reset code and HBA
e59058c4
JS
5114 * error attention handler code. Caller is not required to hold any
5115 * locks. This function issues config_port mailbox command to configure
5116 * the SLI, setup iocb rings and HBQ rings. In the end the function
5117 * calls the config_port_post function to issue init_link mailbox
5118 * command and to start the discovery. The function will return zero
5119 * if successful, else it will return negative error code.
5120 **/
ed957684
JS
5121int
5122lpfc_sli_hba_setup(struct lpfc_hba *phba)
5123{
5124 uint32_t rc;
6d368e53
JS
5125 int mode = 3, i;
5126 int longs;
ed957684 5127
12247e81 5128 switch (phba->cfg_sli_mode) {
ed957684 5129 case 2:
78b2d852 5130 if (phba->cfg_enable_npiv) {
92d7f7b0 5131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81 5132 "1824 NPIV enabled: Override sli_mode "
92d7f7b0 5133 "parameter (%d) to auto (0).\n",
12247e81 5134 phba->cfg_sli_mode);
92d7f7b0
JS
5135 break;
5136 }
ed957684
JS
5137 mode = 2;
5138 break;
5139 case 0:
5140 case 3:
5141 break;
5142 default:
92d7f7b0 5143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
12247e81
JS
5144 "1819 Unrecognized sli_mode parameter: %d.\n",
5145 phba->cfg_sli_mode);
ed957684
JS
5146
5147 break;
5148 }
b5c53958 5149 phba->fcp_embed_io = 0; /* SLI4 FC support only */
ed957684 5150
9399627f
JS
5151 rc = lpfc_sli_config_port(phba, mode);
5152
12247e81 5153 if (rc && phba->cfg_sli_mode == 3)
92d7f7b0 5154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
5155 "1820 Unable to select SLI-3. "
5156 "Not supported by adapter.\n");
ed957684 5157 if (rc && mode != 2)
9399627f 5158 rc = lpfc_sli_config_port(phba, 2);
4597663f
JS
5159 else if (rc && mode == 2)
5160 rc = lpfc_sli_config_port(phba, 3);
ed957684 5161 if (rc)
dea3101e 5162 goto lpfc_sli_hba_setup_error;
5163
0d878419
JS
5164 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5165 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5166 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5167 if (!rc) {
5168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5169 "2709 This device supports "
5170 "Advanced Error Reporting (AER)\n");
5171 spin_lock_irq(&phba->hbalock);
5172 phba->hba_flag |= HBA_AER_ENABLED;
5173 spin_unlock_irq(&phba->hbalock);
5174 } else {
5175 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5176 "2708 This device does not support "
b069d7eb
JS
5177 "Advanced Error Reporting (AER): %d\n",
5178 rc);
0d878419
JS
5179 phba->cfg_aer_support = 0;
5180 }
5181 }
5182
ed957684
JS
5183 if (phba->sli_rev == 3) {
5184 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5185 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
5186 } else {
5187 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5188 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 5189 phba->sli3_options = 0;
ed957684
JS
5190 }
5191
5192 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
5193 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5194 phba->sli_rev, phba->max_vpi);
ed957684 5195 rc = lpfc_sli_ring_map(phba);
dea3101e 5196
5197 if (rc)
5198 goto lpfc_sli_hba_setup_error;
5199
6d368e53
JS
5200 /* Initialize VPIs. */
5201 if (phba->sli_rev == LPFC_SLI_REV3) {
5202 /*
5203 * The VPI bitmask and physical ID array are allocated
5204 * and initialized once only - at driver load. A port
5205 * reset doesn't need to reinitialize this memory.
5206 */
5207 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5208 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
6396bb22
KC
5209 phba->vpi_bmask = kcalloc(longs,
5210 sizeof(unsigned long),
6d368e53
JS
5211 GFP_KERNEL);
5212 if (!phba->vpi_bmask) {
5213 rc = -ENOMEM;
5214 goto lpfc_sli_hba_setup_error;
5215 }
5216
6396bb22
KC
5217 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5218 sizeof(uint16_t),
5219 GFP_KERNEL);
6d368e53
JS
5220 if (!phba->vpi_ids) {
5221 kfree(phba->vpi_bmask);
5222 rc = -ENOMEM;
5223 goto lpfc_sli_hba_setup_error;
5224 }
5225 for (i = 0; i < phba->max_vpi; i++)
5226 phba->vpi_ids[i] = i;
5227 }
5228 }
5229
9399627f 5230 /* Init HBQs */
ed957684
JS
5231 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5232 rc = lpfc_sli_hbq_setup(phba);
5233 if (rc)
5234 goto lpfc_sli_hba_setup_error;
5235 }
04c68496 5236 spin_lock_irq(&phba->hbalock);
dea3101e 5237 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 5238 spin_unlock_irq(&phba->hbalock);
dea3101e 5239
5240 rc = lpfc_config_port_post(phba);
5241 if (rc)
5242 goto lpfc_sli_hba_setup_error;
5243
ed957684
JS
5244 return rc;
5245
92d7f7b0 5246lpfc_sli_hba_setup_error:
2e0fef85 5247 phba->link_state = LPFC_HBA_ERROR;
e40a02c1 5248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 5249 "0445 Firmware initialization failed\n");
dea3101e 5250 return rc;
5251}
5252
e59058c4 5253/**
da0436e9
JS
5254 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5255 * @phba: Pointer to HBA context object.
5256 * @mboxq: mailbox pointer.
5257 * This function issue a dump mailbox command to read config region
5258 * 23 and parse the records in the region and populate driver
5259 * data structure.
e59058c4 5260 **/
da0436e9 5261static int
ff78d8f9 5262lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
dea3101e 5263{
ff78d8f9 5264 LPFC_MBOXQ_t *mboxq;
da0436e9
JS
5265 struct lpfc_dmabuf *mp;
5266 struct lpfc_mqe *mqe;
5267 uint32_t data_length;
5268 int rc;
dea3101e 5269
da0436e9
JS
5270 /* Program the default value of vlan_id and fc_map */
5271 phba->valid_vlan = 0;
5272 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5273 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5274 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 5275
ff78d8f9
JS
5276 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5277 if (!mboxq)
da0436e9
JS
5278 return -ENOMEM;
5279
ff78d8f9
JS
5280 mqe = &mboxq->u.mqe;
5281 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5282 rc = -ENOMEM;
5283 goto out_free_mboxq;
5284 }
5285
3e1f0718 5286 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
5287 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5288
5289 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5290 "(%d):2571 Mailbox cmd x%x Status x%x "
5291 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5292 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5293 "CQ: x%x x%x x%x x%x\n",
5294 mboxq->vport ? mboxq->vport->vpi : 0,
5295 bf_get(lpfc_mqe_command, mqe),
5296 bf_get(lpfc_mqe_status, mqe),
5297 mqe->un.mb_words[0], mqe->un.mb_words[1],
5298 mqe->un.mb_words[2], mqe->un.mb_words[3],
5299 mqe->un.mb_words[4], mqe->un.mb_words[5],
5300 mqe->un.mb_words[6], mqe->un.mb_words[7],
5301 mqe->un.mb_words[8], mqe->un.mb_words[9],
5302 mqe->un.mb_words[10], mqe->un.mb_words[11],
5303 mqe->un.mb_words[12], mqe->un.mb_words[13],
5304 mqe->un.mb_words[14], mqe->un.mb_words[15],
5305 mqe->un.mb_words[16], mqe->un.mb_words[50],
5306 mboxq->mcqe.word0,
5307 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5308 mboxq->mcqe.trailer);
5309
5310 if (rc) {
5311 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5312 kfree(mp);
ff78d8f9
JS
5313 rc = -EIO;
5314 goto out_free_mboxq;
da0436e9
JS
5315 }
5316 data_length = mqe->un.mb_words[5];
a0c87cbd 5317 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
5318 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5319 kfree(mp);
ff78d8f9
JS
5320 rc = -EIO;
5321 goto out_free_mboxq;
d11e31dd 5322 }
dea3101e 5323
da0436e9
JS
5324 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5325 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5326 kfree(mp);
ff78d8f9
JS
5327 rc = 0;
5328
5329out_free_mboxq:
5330 mempool_free(mboxq, phba->mbox_mem_pool);
5331 return rc;
da0436e9 5332}
e59058c4
JS
5333
5334/**
da0436e9
JS
5335 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5336 * @phba: pointer to lpfc hba data structure.
5337 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5338 * @vpd: pointer to the memory to hold resulting port vpd data.
5339 * @vpd_size: On input, the number of bytes allocated to @vpd.
5340 * On output, the number of data bytes in @vpd.
e59058c4 5341 *
da0436e9
JS
5342 * This routine executes a READ_REV SLI4 mailbox command. In
5343 * addition, this routine gets the port vpd data.
5344 *
5345 * Return codes
af901ca1 5346 * 0 - successful
d439d286 5347 * -ENOMEM - could not allocated memory.
e59058c4 5348 **/
da0436e9
JS
5349static int
5350lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5351 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 5352{
da0436e9
JS
5353 int rc = 0;
5354 uint32_t dma_size;
5355 struct lpfc_dmabuf *dmabuf;
5356 struct lpfc_mqe *mqe;
dea3101e 5357
da0436e9
JS
5358 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5359 if (!dmabuf)
5360 return -ENOMEM;
5361
5362 /*
5363 * Get a DMA buffer for the vpd data resulting from the READ_REV
5364 * mailbox command.
a257bf90 5365 */
da0436e9 5366 dma_size = *vpd_size;
1aee383d
JP
5367 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5368 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
5369 if (!dmabuf->virt) {
5370 kfree(dmabuf);
5371 return -ENOMEM;
a257bf90
JS
5372 }
5373
da0436e9
JS
5374 /*
5375 * The SLI4 implementation of READ_REV conflicts at word1,
5376 * bits 31:16 and SLI4 adds vpd functionality not present
5377 * in SLI3. This code corrects the conflicts.
1dcb58e5 5378 */
da0436e9
JS
5379 lpfc_read_rev(phba, mboxq);
5380 mqe = &mboxq->u.mqe;
5381 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5382 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5383 mqe->un.read_rev.word1 &= 0x0000FFFF;
5384 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5385 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5386
5387 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5388 if (rc) {
5389 dma_free_coherent(&phba->pcidev->dev, dma_size,
5390 dmabuf->virt, dmabuf->phys);
def9c7a9 5391 kfree(dmabuf);
da0436e9
JS
5392 return -EIO;
5393 }
1dcb58e5 5394
da0436e9
JS
5395 /*
5396 * The available vpd length cannot be bigger than the
5397 * DMA buffer passed to the port. Catch the less than
5398 * case and update the caller's size.
5399 */
5400 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5401 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 5402
d7c47992
JS
5403 memcpy(vpd, dmabuf->virt, *vpd_size);
5404
da0436e9
JS
5405 dma_free_coherent(&phba->pcidev->dev, dma_size,
5406 dmabuf->virt, dmabuf->phys);
5407 kfree(dmabuf);
5408 return 0;
dea3101e 5409}
5410
cd1c8301
JS
5411/**
5412 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5413 * @phba: pointer to lpfc hba data structure.
5414 *
5415 * This routine retrieves SLI4 device physical port name this PCI function
5416 * is attached to.
5417 *
5418 * Return codes
4907cb7b 5419 * 0 - successful
cd1c8301
JS
5420 * otherwise - failed to retrieve physical port name
5421 **/
5422static int
5423lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5424{
5425 LPFC_MBOXQ_t *mboxq;
cd1c8301
JS
5426 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5427 struct lpfc_controller_attribute *cntl_attr;
5428 struct lpfc_mbx_get_port_name *get_port_name;
5429 void *virtaddr = NULL;
5430 uint32_t alloclen, reqlen;
5431 uint32_t shdr_status, shdr_add_status;
5432 union lpfc_sli4_cfg_shdr *shdr;
5433 char cport_name = 0;
5434 int rc;
5435
5436 /* We assume nothing at this point */
5437 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5438 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5439
5440 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5441 if (!mboxq)
5442 return -ENOMEM;
cd1c8301 5443 /* obtain link type and link number via READ_CONFIG */
ff78d8f9
JS
5444 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5445 lpfc_sli4_read_config(phba);
5446 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5447 goto retrieve_ppname;
cd1c8301
JS
5448
5449 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5450 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5451 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5452 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5453 LPFC_SLI4_MBX_NEMBED);
5454 if (alloclen < reqlen) {
5455 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5456 "3084 Allocated DMA memory size (%d) is "
5457 "less than the requested DMA memory size "
5458 "(%d)\n", alloclen, reqlen);
5459 rc = -ENOMEM;
5460 goto out_free_mboxq;
5461 }
5462 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5463 virtaddr = mboxq->sge_array->addr[0];
5464 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5465 shdr = &mbx_cntl_attr->cfg_shdr;
5466 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5467 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5468 if (shdr_status || shdr_add_status || rc) {
5469 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5470 "3085 Mailbox x%x (x%x/x%x) failed, "
5471 "rc:x%x, status:x%x, add_status:x%x\n",
5472 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5473 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5474 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5475 rc, shdr_status, shdr_add_status);
5476 rc = -ENXIO;
5477 goto out_free_mboxq;
5478 }
5479 cntl_attr = &mbx_cntl_attr->cntl_attr;
5480 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5481 phba->sli4_hba.lnk_info.lnk_tp =
5482 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5483 phba->sli4_hba.lnk_info.lnk_no =
5484 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5486 "3086 lnk_type:%d, lnk_numb:%d\n",
5487 phba->sli4_hba.lnk_info.lnk_tp,
5488 phba->sli4_hba.lnk_info.lnk_no);
5489
5490retrieve_ppname:
5491 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5492 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5493 sizeof(struct lpfc_mbx_get_port_name) -
5494 sizeof(struct lpfc_sli4_cfg_mhdr),
5495 LPFC_SLI4_MBX_EMBED);
5496 get_port_name = &mboxq->u.mqe.un.get_port_name;
5497 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5498 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5499 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5500 phba->sli4_hba.lnk_info.lnk_tp);
5501 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5502 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5503 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5504 if (shdr_status || shdr_add_status || rc) {
5505 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5506 "3087 Mailbox x%x (x%x/x%x) failed: "
5507 "rc:x%x, status:x%x, add_status:x%x\n",
5508 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5509 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5510 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5511 rc, shdr_status, shdr_add_status);
5512 rc = -ENXIO;
5513 goto out_free_mboxq;
5514 }
5515 switch (phba->sli4_hba.lnk_info.lnk_no) {
5516 case LPFC_LINK_NUMBER_0:
5517 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5518 &get_port_name->u.response);
5519 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5520 break;
5521 case LPFC_LINK_NUMBER_1:
5522 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5523 &get_port_name->u.response);
5524 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5525 break;
5526 case LPFC_LINK_NUMBER_2:
5527 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5528 &get_port_name->u.response);
5529 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5530 break;
5531 case LPFC_LINK_NUMBER_3:
5532 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5533 &get_port_name->u.response);
5534 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5535 break;
5536 default:
5537 break;
5538 }
5539
5540 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5541 phba->Port[0] = cport_name;
5542 phba->Port[1] = '\0';
5543 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5544 "3091 SLI get port name: %s\n", phba->Port);
5545 }
5546
5547out_free_mboxq:
5548 if (rc != MBX_TIMEOUT) {
5549 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5550 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5551 else
5552 mempool_free(mboxq, phba->mbox_mem_pool);
5553 }
5554 return rc;
5555}
5556
e59058c4 5557/**
da0436e9
JS
5558 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5559 * @phba: pointer to lpfc hba data structure.
e59058c4 5560 *
da0436e9
JS
5561 * This routine is called to explicitly arm the SLI4 device's completion and
5562 * event queues
5563 **/
5564static void
5565lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5566{
895427bd 5567 int qidx;
b71413dd 5568 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
cdb42bec 5569 struct lpfc_sli4_hdw_queue *qp;
da0436e9 5570
b71413dd
JS
5571 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5572 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5573 if (sli4_hba->nvmels_cq)
5574 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
895427bd
JS
5575 LPFC_QUEUE_REARM);
5576
cdb42bec
JS
5577 qp = sli4_hba->hdwq;
5578 if (sli4_hba->hdwq) {
5579 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5580 sli4_hba->sli4_cq_release(qp[qidx].fcp_cq,
895427bd 5581 LPFC_QUEUE_REARM);
cdb42bec 5582 sli4_hba->sli4_cq_release(qp[qidx].nvme_cq,
895427bd 5583 LPFC_QUEUE_REARM);
cdb42bec 5584 }
1ba981fd 5585
cdb42bec
JS
5586 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++)
5587 sli4_hba->sli4_eq_release(qp[qidx].hba_eq,
5588 LPFC_QUEUE_REARM);
5589 }
1ba981fd 5590
2d7dbc4c
JS
5591 if (phba->nvmet_support) {
5592 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
b71413dd
JS
5593 sli4_hba->sli4_cq_release(
5594 sli4_hba->nvmet_cqset[qidx],
2d7dbc4c
JS
5595 LPFC_QUEUE_REARM);
5596 }
2e90f4b5 5597 }
da0436e9
JS
5598}
5599
6d368e53
JS
5600/**
5601 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5602 * @phba: Pointer to HBA context object.
5603 * @type: The resource extent type.
b76f2dc9
JS
5604 * @extnt_count: buffer to hold port available extent count.
5605 * @extnt_size: buffer to hold element count per extent.
6d368e53 5606 *
b76f2dc9
JS
5607 * This function calls the port and retrievs the number of available
5608 * extents and their size for a particular extent type.
5609 *
5610 * Returns: 0 if successful. Nonzero otherwise.
6d368e53 5611 **/
b76f2dc9 5612int
6d368e53
JS
5613lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5614 uint16_t *extnt_count, uint16_t *extnt_size)
5615{
5616 int rc = 0;
5617 uint32_t length;
5618 uint32_t mbox_tmo;
5619 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5620 LPFC_MBOXQ_t *mbox;
5621
5622 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5623 if (!mbox)
5624 return -ENOMEM;
5625
5626 /* Find out how many extents are available for this resource type */
5627 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5628 sizeof(struct lpfc_sli4_cfg_mhdr));
5629 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5630 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5631 length, LPFC_SLI4_MBX_EMBED);
5632
5633 /* Send an extents count of 0 - the GET doesn't use it. */
5634 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5635 LPFC_SLI4_MBX_EMBED);
5636 if (unlikely(rc)) {
5637 rc = -EIO;
5638 goto err_exit;
5639 }
5640
5641 if (!phba->sli4_hba.intr_enable)
5642 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5643 else {
a183a15f 5644 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5645 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5646 }
5647 if (unlikely(rc)) {
5648 rc = -EIO;
5649 goto err_exit;
5650 }
5651
5652 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5653 if (bf_get(lpfc_mbox_hdr_status,
5654 &rsrc_info->header.cfg_shdr.response)) {
5655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5656 "2930 Failed to get resource extents "
5657 "Status 0x%x Add'l Status 0x%x\n",
5658 bf_get(lpfc_mbox_hdr_status,
5659 &rsrc_info->header.cfg_shdr.response),
5660 bf_get(lpfc_mbox_hdr_add_status,
5661 &rsrc_info->header.cfg_shdr.response));
5662 rc = -EIO;
5663 goto err_exit;
5664 }
5665
5666 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5667 &rsrc_info->u.rsp);
5668 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5669 &rsrc_info->u.rsp);
8a9d2e80
JS
5670
5671 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5672 "3162 Retrieved extents type-%d from port: count:%d, "
5673 "size:%d\n", type, *extnt_count, *extnt_size);
5674
5675err_exit:
6d368e53
JS
5676 mempool_free(mbox, phba->mbox_mem_pool);
5677 return rc;
5678}
5679
5680/**
5681 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5682 * @phba: Pointer to HBA context object.
5683 * @type: The extent type to check.
5684 *
5685 * This function reads the current available extents from the port and checks
5686 * if the extent count or extent size has changed since the last access.
5687 * Callers use this routine post port reset to understand if there is a
5688 * extent reprovisioning requirement.
5689 *
5690 * Returns:
5691 * -Error: error indicates problem.
5692 * 1: Extent count or size has changed.
5693 * 0: No changes.
5694 **/
5695static int
5696lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5697{
5698 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5699 uint16_t size_diff, rsrc_ext_size;
5700 int rc = 0;
5701 struct lpfc_rsrc_blks *rsrc_entry;
5702 struct list_head *rsrc_blk_list = NULL;
5703
5704 size_diff = 0;
5705 curr_ext_cnt = 0;
5706 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5707 &rsrc_ext_cnt,
5708 &rsrc_ext_size);
5709 if (unlikely(rc))
5710 return -EIO;
5711
5712 switch (type) {
5713 case LPFC_RSC_TYPE_FCOE_RPI:
5714 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5715 break;
5716 case LPFC_RSC_TYPE_FCOE_VPI:
5717 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5718 break;
5719 case LPFC_RSC_TYPE_FCOE_XRI:
5720 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5721 break;
5722 case LPFC_RSC_TYPE_FCOE_VFI:
5723 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5724 break;
5725 default:
5726 break;
5727 }
5728
5729 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5730 curr_ext_cnt++;
5731 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5732 size_diff++;
5733 }
5734
5735 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5736 rc = 1;
5737
5738 return rc;
5739}
5740
5741/**
5742 * lpfc_sli4_cfg_post_extnts -
5743 * @phba: Pointer to HBA context object.
5744 * @extnt_cnt - number of available extents.
5745 * @type - the extent type (rpi, xri, vfi, vpi).
5746 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5747 * @mbox - pointer to the caller's allocated mailbox structure.
5748 *
5749 * This function executes the extents allocation request. It also
5750 * takes care of the amount of memory needed to allocate or get the
5751 * allocated extents. It is the caller's responsibility to evaluate
5752 * the response.
5753 *
5754 * Returns:
5755 * -Error: Error value describes the condition found.
5756 * 0: if successful
5757 **/
5758static int
8a9d2e80 5759lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6d368e53
JS
5760 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5761{
5762 int rc = 0;
5763 uint32_t req_len;
5764 uint32_t emb_len;
5765 uint32_t alloc_len, mbox_tmo;
5766
5767 /* Calculate the total requested length of the dma memory */
8a9d2e80 5768 req_len = extnt_cnt * sizeof(uint16_t);
6d368e53
JS
5769
5770 /*
5771 * Calculate the size of an embedded mailbox. The uint32_t
5772 * accounts for extents-specific word.
5773 */
5774 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5775 sizeof(uint32_t);
5776
5777 /*
5778 * Presume the allocation and response will fit into an embedded
5779 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5780 */
5781 *emb = LPFC_SLI4_MBX_EMBED;
5782 if (req_len > emb_len) {
8a9d2e80 5783 req_len = extnt_cnt * sizeof(uint16_t) +
6d368e53
JS
5784 sizeof(union lpfc_sli4_cfg_shdr) +
5785 sizeof(uint32_t);
5786 *emb = LPFC_SLI4_MBX_NEMBED;
5787 }
5788
5789 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5790 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5791 req_len, *emb);
5792 if (alloc_len < req_len) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
b76f2dc9 5794 "2982 Allocated DMA memory size (x%x) is "
6d368e53
JS
5795 "less than the requested DMA memory "
5796 "size (x%x)\n", alloc_len, req_len);
5797 return -ENOMEM;
5798 }
8a9d2e80 5799 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6d368e53
JS
5800 if (unlikely(rc))
5801 return -EIO;
5802
5803 if (!phba->sli4_hba.intr_enable)
5804 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5805 else {
a183a15f 5806 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
5807 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5808 }
5809
5810 if (unlikely(rc))
5811 rc = -EIO;
5812 return rc;
5813}
5814
5815/**
5816 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5817 * @phba: Pointer to HBA context object.
5818 * @type: The resource extent type to allocate.
5819 *
5820 * This function allocates the number of elements for the specified
5821 * resource type.
5822 **/
5823static int
5824lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5825{
5826 bool emb = false;
5827 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5828 uint16_t rsrc_id, rsrc_start, j, k;
5829 uint16_t *ids;
5830 int i, rc;
5831 unsigned long longs;
5832 unsigned long *bmask;
5833 struct lpfc_rsrc_blks *rsrc_blks;
5834 LPFC_MBOXQ_t *mbox;
5835 uint32_t length;
5836 struct lpfc_id_range *id_array = NULL;
5837 void *virtaddr = NULL;
5838 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5839 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5840 struct list_head *ext_blk_list;
5841
5842 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5843 &rsrc_cnt,
5844 &rsrc_size);
5845 if (unlikely(rc))
5846 return -EIO;
5847
5848 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5849 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5850 "3009 No available Resource Extents "
5851 "for resource type 0x%x: Count: 0x%x, "
5852 "Size 0x%x\n", type, rsrc_cnt,
5853 rsrc_size);
5854 return -ENOMEM;
5855 }
5856
8a9d2e80
JS
5857 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5858 "2903 Post resource extents type-0x%x: "
5859 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6d368e53
JS
5860
5861 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5862 if (!mbox)
5863 return -ENOMEM;
5864
8a9d2e80 5865 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6d368e53
JS
5866 if (unlikely(rc)) {
5867 rc = -EIO;
5868 goto err_exit;
5869 }
5870
5871 /*
5872 * Figure out where the response is located. Then get local pointers
5873 * to the response data. The port does not guarantee to respond to
5874 * all extents counts request so update the local variable with the
5875 * allocated count from the port.
5876 */
5877 if (emb == LPFC_SLI4_MBX_EMBED) {
5878 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5879 id_array = &rsrc_ext->u.rsp.id[0];
5880 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5881 } else {
5882 virtaddr = mbox->sge_array->addr[0];
5883 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5884 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5885 id_array = &n_rsrc->id;
5886 }
5887
5888 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5889 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5890
5891 /*
5892 * Based on the resource size and count, correct the base and max
5893 * resource values.
5894 */
5895 length = sizeof(struct lpfc_rsrc_blks);
5896 switch (type) {
5897 case LPFC_RSC_TYPE_FCOE_RPI:
6396bb22 5898 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
5899 sizeof(unsigned long),
5900 GFP_KERNEL);
5901 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5902 rc = -ENOMEM;
5903 goto err_exit;
5904 }
6396bb22 5905 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5906 sizeof(uint16_t),
5907 GFP_KERNEL);
5908 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5909 kfree(phba->sli4_hba.rpi_bmask);
5910 rc = -ENOMEM;
5911 goto err_exit;
5912 }
5913
5914 /*
5915 * The next_rpi was initialized with the maximum available
5916 * count but the port may allocate a smaller number. Catch
5917 * that case and update the next_rpi.
5918 */
5919 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5920
5921 /* Initialize local ptrs for common extent processing later. */
5922 bmask = phba->sli4_hba.rpi_bmask;
5923 ids = phba->sli4_hba.rpi_ids;
5924 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5925 break;
5926 case LPFC_RSC_TYPE_FCOE_VPI:
6396bb22 5927 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
5928 GFP_KERNEL);
5929 if (unlikely(!phba->vpi_bmask)) {
5930 rc = -ENOMEM;
5931 goto err_exit;
5932 }
6396bb22 5933 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6d368e53
JS
5934 GFP_KERNEL);
5935 if (unlikely(!phba->vpi_ids)) {
5936 kfree(phba->vpi_bmask);
5937 rc = -ENOMEM;
5938 goto err_exit;
5939 }
5940
5941 /* Initialize local ptrs for common extent processing later. */
5942 bmask = phba->vpi_bmask;
5943 ids = phba->vpi_ids;
5944 ext_blk_list = &phba->lpfc_vpi_blk_list;
5945 break;
5946 case LPFC_RSC_TYPE_FCOE_XRI:
6396bb22 5947 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
5948 sizeof(unsigned long),
5949 GFP_KERNEL);
5950 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5951 rc = -ENOMEM;
5952 goto err_exit;
5953 }
8a9d2e80 5954 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 5955 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5956 sizeof(uint16_t),
5957 GFP_KERNEL);
5958 if (unlikely(!phba->sli4_hba.xri_ids)) {
5959 kfree(phba->sli4_hba.xri_bmask);
5960 rc = -ENOMEM;
5961 goto err_exit;
5962 }
5963
5964 /* Initialize local ptrs for common extent processing later. */
5965 bmask = phba->sli4_hba.xri_bmask;
5966 ids = phba->sli4_hba.xri_ids;
5967 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5968 break;
5969 case LPFC_RSC_TYPE_FCOE_VFI:
6396bb22 5970 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
5971 sizeof(unsigned long),
5972 GFP_KERNEL);
5973 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5974 rc = -ENOMEM;
5975 goto err_exit;
5976 }
6396bb22 5977 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6d368e53
JS
5978 sizeof(uint16_t),
5979 GFP_KERNEL);
5980 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5981 kfree(phba->sli4_hba.vfi_bmask);
5982 rc = -ENOMEM;
5983 goto err_exit;
5984 }
5985
5986 /* Initialize local ptrs for common extent processing later. */
5987 bmask = phba->sli4_hba.vfi_bmask;
5988 ids = phba->sli4_hba.vfi_ids;
5989 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5990 break;
5991 default:
5992 /* Unsupported Opcode. Fail call. */
5993 id_array = NULL;
5994 bmask = NULL;
5995 ids = NULL;
5996 ext_blk_list = NULL;
5997 goto err_exit;
5998 }
5999
6000 /*
6001 * Complete initializing the extent configuration with the
6002 * allocated ids assigned to this function. The bitmask serves
6003 * as an index into the array and manages the available ids. The
6004 * array just stores the ids communicated to the port via the wqes.
6005 */
6006 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6007 if ((i % 2) == 0)
6008 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6009 &id_array[k]);
6010 else
6011 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6012 &id_array[k]);
6013
6014 rsrc_blks = kzalloc(length, GFP_KERNEL);
6015 if (unlikely(!rsrc_blks)) {
6016 rc = -ENOMEM;
6017 kfree(bmask);
6018 kfree(ids);
6019 goto err_exit;
6020 }
6021 rsrc_blks->rsrc_start = rsrc_id;
6022 rsrc_blks->rsrc_size = rsrc_size;
6023 list_add_tail(&rsrc_blks->list, ext_blk_list);
6024 rsrc_start = rsrc_id;
895427bd 6025 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
0794d601 6026 phba->sli4_hba.common_xri_start = rsrc_start +
895427bd 6027 lpfc_sli4_get_iocb_cnt(phba);
895427bd 6028 }
6d368e53
JS
6029
6030 while (rsrc_id < (rsrc_start + rsrc_size)) {
6031 ids[j] = rsrc_id;
6032 rsrc_id++;
6033 j++;
6034 }
6035 /* Entire word processed. Get next word.*/
6036 if ((i % 2) == 1)
6037 k++;
6038 }
6039 err_exit:
6040 lpfc_sli4_mbox_cmd_free(phba, mbox);
6041 return rc;
6042}
6043
895427bd
JS
6044
6045
6d368e53
JS
6046/**
6047 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6048 * @phba: Pointer to HBA context object.
6049 * @type: the extent's type.
6050 *
6051 * This function deallocates all extents of a particular resource type.
6052 * SLI4 does not allow for deallocating a particular extent range. It
6053 * is the caller's responsibility to release all kernel memory resources.
6054 **/
6055static int
6056lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6057{
6058 int rc;
6059 uint32_t length, mbox_tmo = 0;
6060 LPFC_MBOXQ_t *mbox;
6061 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6062 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6063
6064 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6065 if (!mbox)
6066 return -ENOMEM;
6067
6068 /*
6069 * This function sends an embedded mailbox because it only sends the
6070 * the resource type. All extents of this type are released by the
6071 * port.
6072 */
6073 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6074 sizeof(struct lpfc_sli4_cfg_mhdr));
6075 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6076 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6077 length, LPFC_SLI4_MBX_EMBED);
6078
6079 /* Send an extents count of 0 - the dealloc doesn't use it. */
6080 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6081 LPFC_SLI4_MBX_EMBED);
6082 if (unlikely(rc)) {
6083 rc = -EIO;
6084 goto out_free_mbox;
6085 }
6086 if (!phba->sli4_hba.intr_enable)
6087 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6088 else {
a183a15f 6089 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
6090 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6091 }
6092 if (unlikely(rc)) {
6093 rc = -EIO;
6094 goto out_free_mbox;
6095 }
6096
6097 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6098 if (bf_get(lpfc_mbox_hdr_status,
6099 &dealloc_rsrc->header.cfg_shdr.response)) {
6100 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6101 "2919 Failed to release resource extents "
6102 "for type %d - Status 0x%x Add'l Status 0x%x. "
6103 "Resource memory not released.\n",
6104 type,
6105 bf_get(lpfc_mbox_hdr_status,
6106 &dealloc_rsrc->header.cfg_shdr.response),
6107 bf_get(lpfc_mbox_hdr_add_status,
6108 &dealloc_rsrc->header.cfg_shdr.response));
6109 rc = -EIO;
6110 goto out_free_mbox;
6111 }
6112
6113 /* Release kernel memory resources for the specific type. */
6114 switch (type) {
6115 case LPFC_RSC_TYPE_FCOE_VPI:
6116 kfree(phba->vpi_bmask);
6117 kfree(phba->vpi_ids);
6118 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6119 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6120 &phba->lpfc_vpi_blk_list, list) {
6121 list_del_init(&rsrc_blk->list);
6122 kfree(rsrc_blk);
6123 }
16a3a208 6124 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6125 break;
6126 case LPFC_RSC_TYPE_FCOE_XRI:
6127 kfree(phba->sli4_hba.xri_bmask);
6128 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6129 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6130 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6131 list_del_init(&rsrc_blk->list);
6132 kfree(rsrc_blk);
6133 }
6134 break;
6135 case LPFC_RSC_TYPE_FCOE_VFI:
6136 kfree(phba->sli4_hba.vfi_bmask);
6137 kfree(phba->sli4_hba.vfi_ids);
6138 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6139 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6140 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6141 list_del_init(&rsrc_blk->list);
6142 kfree(rsrc_blk);
6143 }
6144 break;
6145 case LPFC_RSC_TYPE_FCOE_RPI:
6146 /* RPI bitmask and physical id array are cleaned up earlier. */
6147 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6148 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6149 list_del_init(&rsrc_blk->list);
6150 kfree(rsrc_blk);
6151 }
6152 break;
6153 default:
6154 break;
6155 }
6156
6157 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6158
6159 out_free_mbox:
6160 mempool_free(mbox, phba->mbox_mem_pool);
6161 return rc;
6162}
6163
bd4b3e5c 6164static void
7bdedb34
JS
6165lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6166 uint32_t feature)
65791f1f 6167{
65791f1f 6168 uint32_t len;
65791f1f 6169
65791f1f
JS
6170 len = sizeof(struct lpfc_mbx_set_feature) -
6171 sizeof(struct lpfc_sli4_cfg_mhdr);
6172 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6173 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6174 LPFC_SLI4_MBX_EMBED);
7bdedb34
JS
6175
6176 switch (feature) {
6177 case LPFC_SET_UE_RECOVERY:
6178 bf_set(lpfc_mbx_set_feature_UER,
6179 &mbox->u.mqe.un.set_feature, 1);
6180 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6181 mbox->u.mqe.un.set_feature.param_len = 8;
6182 break;
6183 case LPFC_SET_MDS_DIAGS:
6184 bf_set(lpfc_mbx_set_feature_mds,
6185 &mbox->u.mqe.un.set_feature, 1);
6186 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
ae9e28f3 6187 &mbox->u.mqe.un.set_feature, 1);
7bdedb34
JS
6188 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6189 mbox->u.mqe.un.set_feature.param_len = 8;
6190 break;
65791f1f 6191 }
7bdedb34
JS
6192
6193 return;
65791f1f
JS
6194}
6195
1165a5c2
JS
6196/**
6197 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6198 * @phba: Pointer to HBA context object.
6199 *
6200 * Disable FW logging into host memory on the adapter. To
6201 * be done before reading logs from the host memory.
6202 **/
6203void
6204lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6205{
6206 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6207
6208 ras_fwlog->ras_active = false;
6209
6210 /* Disable FW logging to host memory */
6211 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6212 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6213}
6214
d2cc9bcd
JS
6215/**
6216 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6217 * @phba: Pointer to HBA context object.
6218 *
6219 * This function is called to free memory allocated for RAS FW logging
6220 * support in the driver.
6221 **/
6222void
6223lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6224{
6225 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6226 struct lpfc_dmabuf *dmabuf, *next;
6227
6228 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6229 list_for_each_entry_safe(dmabuf, next,
6230 &ras_fwlog->fwlog_buff_list,
6231 list) {
6232 list_del(&dmabuf->list);
6233 dma_free_coherent(&phba->pcidev->dev,
6234 LPFC_RAS_MAX_ENTRY_SIZE,
6235 dmabuf->virt, dmabuf->phys);
6236 kfree(dmabuf);
6237 }
6238 }
6239
6240 if (ras_fwlog->lwpd.virt) {
6241 dma_free_coherent(&phba->pcidev->dev,
6242 sizeof(uint32_t) * 2,
6243 ras_fwlog->lwpd.virt,
6244 ras_fwlog->lwpd.phys);
6245 ras_fwlog->lwpd.virt = NULL;
6246 }
6247
6248 ras_fwlog->ras_active = false;
6249}
6250
6251/**
6252 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6253 * @phba: Pointer to HBA context object.
6254 * @fwlog_buff_count: Count of buffers to be created.
6255 *
6256 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6257 * to update FW log is posted to the adapter.
6258 * Buffer count is calculated based on module param ras_fwlog_buffsize
6259 * Size of each buffer posted to FW is 64K.
6260 **/
6261
6262static int
6263lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6264 uint32_t fwlog_buff_count)
6265{
6266 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6267 struct lpfc_dmabuf *dmabuf;
6268 int rc = 0, i = 0;
6269
6270 /* Initialize List */
6271 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6272
6273 /* Allocate memory for the LWPD */
6274 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6275 sizeof(uint32_t) * 2,
6276 &ras_fwlog->lwpd.phys,
6277 GFP_KERNEL);
6278 if (!ras_fwlog->lwpd.virt) {
cb34990b 6279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6280 "6185 LWPD Memory Alloc Failed\n");
6281
6282 return -ENOMEM;
6283 }
6284
6285 ras_fwlog->fw_buffcount = fwlog_buff_count;
6286 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6287 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6288 GFP_KERNEL);
6289 if (!dmabuf) {
6290 rc = -ENOMEM;
6291 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6292 "6186 Memory Alloc failed FW logging");
6293 goto free_mem;
6294 }
6295
359d0ac1 6296 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
d2cc9bcd
JS
6297 LPFC_RAS_MAX_ENTRY_SIZE,
6298 &dmabuf->phys,
6299 GFP_KERNEL);
6300 if (!dmabuf->virt) {
6301 kfree(dmabuf);
6302 rc = -ENOMEM;
6303 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6304 "6187 DMA Alloc Failed FW logging");
6305 goto free_mem;
6306 }
d2cc9bcd
JS
6307 dmabuf->buffer_tag = i;
6308 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6309 }
6310
6311free_mem:
6312 if (rc)
6313 lpfc_sli4_ras_dma_free(phba);
6314
6315 return rc;
6316}
6317
6318/**
6319 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6320 * @phba: pointer to lpfc hba data structure.
6321 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6322 *
6323 * Completion handler for driver's RAS MBX command to the device.
6324 **/
6325static void
6326lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6327{
6328 MAILBOX_t *mb;
6329 union lpfc_sli4_cfg_shdr *shdr;
6330 uint32_t shdr_status, shdr_add_status;
6331 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6332
6333 mb = &pmb->u.mb;
6334
6335 shdr = (union lpfc_sli4_cfg_shdr *)
6336 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6337 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6338 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6339
6340 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
cb34990b 6341 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
d2cc9bcd
JS
6342 "6188 FW LOG mailbox "
6343 "completed with status x%x add_status x%x,"
6344 " mbx status x%x\n",
6345 shdr_status, shdr_add_status, mb->mbxStatus);
cb34990b
JS
6346
6347 ras_fwlog->ras_hwsupport = false;
d2cc9bcd
JS
6348 goto disable_ras;
6349 }
6350
6351 ras_fwlog->ras_active = true;
6352 mempool_free(pmb, phba->mbox_mem_pool);
6353
6354 return;
6355
6356disable_ras:
6357 /* Free RAS DMA memory */
6358 lpfc_sli4_ras_dma_free(phba);
6359 mempool_free(pmb, phba->mbox_mem_pool);
6360}
6361
6362/**
6363 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6364 * @phba: pointer to lpfc hba data structure.
6365 * @fwlog_level: Logging verbosity level.
6366 * @fwlog_enable: Enable/Disable logging.
6367 *
6368 * Initialize memory and post mailbox command to enable FW logging in host
6369 * memory.
6370 **/
6371int
6372lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6373 uint32_t fwlog_level,
6374 uint32_t fwlog_enable)
6375{
6376 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6377 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6378 struct lpfc_dmabuf *dmabuf;
6379 LPFC_MBOXQ_t *mbox;
6380 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6381 int rc = 0;
6382
6383 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6384 phba->cfg_ras_fwlog_buffsize);
6385 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6386
6387 /*
6388 * If re-enabling FW logging support use earlier allocated
6389 * DMA buffers while posting MBX command.
6390 **/
6391 if (!ras_fwlog->lwpd.virt) {
6392 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6393 if (rc) {
6394 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
cb34990b 6395 "6189 FW Log Memory Allocation Failed");
d2cc9bcd
JS
6396 return rc;
6397 }
6398 }
6399
6400 /* Setup Mailbox command */
6401 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6402 if (!mbox) {
cb34990b 6403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d2cc9bcd
JS
6404 "6190 RAS MBX Alloc Failed");
6405 rc = -ENOMEM;
6406 goto mem_free;
6407 }
6408
6409 ras_fwlog->fw_loglevel = fwlog_level;
6410 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6411 sizeof(struct lpfc_sli4_cfg_mhdr));
6412
6413 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6414 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6415 len, LPFC_SLI4_MBX_EMBED);
6416
6417 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6418 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6419 fwlog_enable);
6420 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6421 ras_fwlog->fw_loglevel);
6422 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6423 ras_fwlog->fw_buffcount);
6424 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6425 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6426
6427 /* Update DMA buffer address */
6428 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6429 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6430
6431 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6432 putPaddrLow(dmabuf->phys);
6433
6434 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6435 putPaddrHigh(dmabuf->phys);
6436 }
6437
6438 /* Update LPWD address */
6439 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6440 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6441
6442 mbox->vport = phba->pport;
6443 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6444
6445 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6446
6447 if (rc == MBX_NOT_FINISHED) {
cb34990b
JS
6448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6449 "6191 FW-Log Mailbox failed. "
d2cc9bcd
JS
6450 "status %d mbxStatus : x%x", rc,
6451 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6452 mempool_free(mbox, phba->mbox_mem_pool);
6453 rc = -EIO;
6454 goto mem_free;
6455 } else
6456 rc = 0;
6457mem_free:
6458 if (rc)
6459 lpfc_sli4_ras_dma_free(phba);
6460
6461 return rc;
6462}
6463
6464/**
6465 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6466 * @phba: Pointer to HBA context object.
6467 *
6468 * Check if RAS is supported on the adapter and initialize it.
6469 **/
6470void
6471lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6472{
6473 /* Check RAS FW Log needs to be enabled or not */
6474 if (lpfc_check_fwlog_support(phba))
6475 return;
6476
6477 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6478 LPFC_RAS_ENABLE_LOGGING);
6479}
6480
6d368e53
JS
6481/**
6482 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6483 * @phba: Pointer to HBA context object.
6484 *
6485 * This function allocates all SLI4 resource identifiers.
6486 **/
6487int
6488lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6489{
6490 int i, rc, error = 0;
6491 uint16_t count, base;
6492 unsigned long longs;
6493
ff78d8f9
JS
6494 if (!phba->sli4_hba.rpi_hdrs_in_use)
6495 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6d368e53
JS
6496 if (phba->sli4_hba.extents_in_use) {
6497 /*
6498 * The port supports resource extents. The XRI, VPI, VFI, RPI
6499 * resource extent count must be read and allocated before
6500 * provisioning the resource id arrays.
6501 */
6502 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6503 LPFC_IDX_RSRC_RDY) {
6504 /*
6505 * Extent-based resources are set - the driver could
6506 * be in a port reset. Figure out if any corrective
6507 * actions need to be taken.
6508 */
6509 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6510 LPFC_RSC_TYPE_FCOE_VFI);
6511 if (rc != 0)
6512 error++;
6513 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6514 LPFC_RSC_TYPE_FCOE_VPI);
6515 if (rc != 0)
6516 error++;
6517 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6518 LPFC_RSC_TYPE_FCOE_XRI);
6519 if (rc != 0)
6520 error++;
6521 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6522 LPFC_RSC_TYPE_FCOE_RPI);
6523 if (rc != 0)
6524 error++;
6525
6526 /*
6527 * It's possible that the number of resources
6528 * provided to this port instance changed between
6529 * resets. Detect this condition and reallocate
6530 * resources. Otherwise, there is no action.
6531 */
6532 if (error) {
6533 lpfc_printf_log(phba, KERN_INFO,
6534 LOG_MBOX | LOG_INIT,
6535 "2931 Detected extent resource "
6536 "change. Reallocating all "
6537 "extents.\n");
6538 rc = lpfc_sli4_dealloc_extent(phba,
6539 LPFC_RSC_TYPE_FCOE_VFI);
6540 rc = lpfc_sli4_dealloc_extent(phba,
6541 LPFC_RSC_TYPE_FCOE_VPI);
6542 rc = lpfc_sli4_dealloc_extent(phba,
6543 LPFC_RSC_TYPE_FCOE_XRI);
6544 rc = lpfc_sli4_dealloc_extent(phba,
6545 LPFC_RSC_TYPE_FCOE_RPI);
6546 } else
6547 return 0;
6548 }
6549
6550 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6551 if (unlikely(rc))
6552 goto err_exit;
6553
6554 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6555 if (unlikely(rc))
6556 goto err_exit;
6557
6558 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6559 if (unlikely(rc))
6560 goto err_exit;
6561
6562 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6563 if (unlikely(rc))
6564 goto err_exit;
6565 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6566 LPFC_IDX_RSRC_RDY);
6567 return rc;
6568 } else {
6569 /*
6570 * The port does not support resource extents. The XRI, VPI,
6571 * VFI, RPI resource ids were determined from READ_CONFIG.
6572 * Just allocate the bitmasks and provision the resource id
6573 * arrays. If a port reset is active, the resources don't
6574 * need any action - just exit.
6575 */
6576 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
ff78d8f9
JS
6577 LPFC_IDX_RSRC_RDY) {
6578 lpfc_sli4_dealloc_resource_identifiers(phba);
6579 lpfc_sli4_remove_rpis(phba);
6580 }
6d368e53
JS
6581 /* RPIs. */
6582 count = phba->sli4_hba.max_cfg_param.max_rpi;
0a630c27
JS
6583 if (count <= 0) {
6584 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6585 "3279 Invalid provisioning of "
6586 "rpi:%d\n", count);
6587 rc = -EINVAL;
6588 goto err_exit;
6589 }
6d368e53
JS
6590 base = phba->sli4_hba.max_cfg_param.rpi_base;
6591 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6592 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6d368e53
JS
6593 sizeof(unsigned long),
6594 GFP_KERNEL);
6595 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6596 rc = -ENOMEM;
6597 goto err_exit;
6598 }
6396bb22 6599 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6600 GFP_KERNEL);
6601 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6602 rc = -ENOMEM;
6603 goto free_rpi_bmask;
6604 }
6605
6606 for (i = 0; i < count; i++)
6607 phba->sli4_hba.rpi_ids[i] = base + i;
6608
6609 /* VPIs. */
6610 count = phba->sli4_hba.max_cfg_param.max_vpi;
0a630c27
JS
6611 if (count <= 0) {
6612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6613 "3280 Invalid provisioning of "
6614 "vpi:%d\n", count);
6615 rc = -EINVAL;
6616 goto free_rpi_ids;
6617 }
6d368e53
JS
6618 base = phba->sli4_hba.max_cfg_param.vpi_base;
6619 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6620 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6d368e53
JS
6621 GFP_KERNEL);
6622 if (unlikely(!phba->vpi_bmask)) {
6623 rc = -ENOMEM;
6624 goto free_rpi_ids;
6625 }
6396bb22 6626 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6627 GFP_KERNEL);
6628 if (unlikely(!phba->vpi_ids)) {
6629 rc = -ENOMEM;
6630 goto free_vpi_bmask;
6631 }
6632
6633 for (i = 0; i < count; i++)
6634 phba->vpi_ids[i] = base + i;
6635
6636 /* XRIs. */
6637 count = phba->sli4_hba.max_cfg_param.max_xri;
0a630c27
JS
6638 if (count <= 0) {
6639 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6640 "3281 Invalid provisioning of "
6641 "xri:%d\n", count);
6642 rc = -EINVAL;
6643 goto free_vpi_ids;
6644 }
6d368e53
JS
6645 base = phba->sli4_hba.max_cfg_param.xri_base;
6646 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6647 phba->sli4_hba.xri_bmask = kcalloc(longs,
6d368e53
JS
6648 sizeof(unsigned long),
6649 GFP_KERNEL);
6650 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6651 rc = -ENOMEM;
6652 goto free_vpi_ids;
6653 }
41899be7 6654 phba->sli4_hba.max_cfg_param.xri_used = 0;
6396bb22 6655 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6656 GFP_KERNEL);
6657 if (unlikely(!phba->sli4_hba.xri_ids)) {
6658 rc = -ENOMEM;
6659 goto free_xri_bmask;
6660 }
6661
6662 for (i = 0; i < count; i++)
6663 phba->sli4_hba.xri_ids[i] = base + i;
6664
6665 /* VFIs. */
6666 count = phba->sli4_hba.max_cfg_param.max_vfi;
0a630c27
JS
6667 if (count <= 0) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669 "3282 Invalid provisioning of "
6670 "vfi:%d\n", count);
6671 rc = -EINVAL;
6672 goto free_xri_ids;
6673 }
6d368e53
JS
6674 base = phba->sli4_hba.max_cfg_param.vfi_base;
6675 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6396bb22 6676 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6d368e53
JS
6677 sizeof(unsigned long),
6678 GFP_KERNEL);
6679 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6680 rc = -ENOMEM;
6681 goto free_xri_ids;
6682 }
6396bb22 6683 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6d368e53
JS
6684 GFP_KERNEL);
6685 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6686 rc = -ENOMEM;
6687 goto free_vfi_bmask;
6688 }
6689
6690 for (i = 0; i < count; i++)
6691 phba->sli4_hba.vfi_ids[i] = base + i;
6692
6693 /*
6694 * Mark all resources ready. An HBA reset doesn't need
6695 * to reset the initialization.
6696 */
6697 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6698 LPFC_IDX_RSRC_RDY);
6699 return 0;
6700 }
6701
6702 free_vfi_bmask:
6703 kfree(phba->sli4_hba.vfi_bmask);
cd60be49 6704 phba->sli4_hba.vfi_bmask = NULL;
6d368e53
JS
6705 free_xri_ids:
6706 kfree(phba->sli4_hba.xri_ids);
cd60be49 6707 phba->sli4_hba.xri_ids = NULL;
6d368e53
JS
6708 free_xri_bmask:
6709 kfree(phba->sli4_hba.xri_bmask);
cd60be49 6710 phba->sli4_hba.xri_bmask = NULL;
6d368e53
JS
6711 free_vpi_ids:
6712 kfree(phba->vpi_ids);
cd60be49 6713 phba->vpi_ids = NULL;
6d368e53
JS
6714 free_vpi_bmask:
6715 kfree(phba->vpi_bmask);
cd60be49 6716 phba->vpi_bmask = NULL;
6d368e53
JS
6717 free_rpi_ids:
6718 kfree(phba->sli4_hba.rpi_ids);
cd60be49 6719 phba->sli4_hba.rpi_ids = NULL;
6d368e53
JS
6720 free_rpi_bmask:
6721 kfree(phba->sli4_hba.rpi_bmask);
cd60be49 6722 phba->sli4_hba.rpi_bmask = NULL;
6d368e53
JS
6723 err_exit:
6724 return rc;
6725}
6726
6727/**
6728 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6729 * @phba: Pointer to HBA context object.
6730 *
6731 * This function allocates the number of elements for the specified
6732 * resource type.
6733 **/
6734int
6735lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6736{
6737 if (phba->sli4_hba.extents_in_use) {
6738 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6739 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6740 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6741 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6742 } else {
6743 kfree(phba->vpi_bmask);
16a3a208 6744 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6d368e53
JS
6745 kfree(phba->vpi_ids);
6746 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6747 kfree(phba->sli4_hba.xri_bmask);
6748 kfree(phba->sli4_hba.xri_ids);
6d368e53
JS
6749 kfree(phba->sli4_hba.vfi_bmask);
6750 kfree(phba->sli4_hba.vfi_ids);
6751 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6752 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6753 }
6754
6755 return 0;
6756}
6757
b76f2dc9
JS
6758/**
6759 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6760 * @phba: Pointer to HBA context object.
6761 * @type: The resource extent type.
6762 * @extnt_count: buffer to hold port extent count response
6763 * @extnt_size: buffer to hold port extent size response.
6764 *
6765 * This function calls the port to read the host allocated extents
6766 * for a particular type.
6767 **/
6768int
6769lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6770 uint16_t *extnt_cnt, uint16_t *extnt_size)
6771{
6772 bool emb;
6773 int rc = 0;
6774 uint16_t curr_blks = 0;
6775 uint32_t req_len, emb_len;
6776 uint32_t alloc_len, mbox_tmo;
6777 struct list_head *blk_list_head;
6778 struct lpfc_rsrc_blks *rsrc_blk;
6779 LPFC_MBOXQ_t *mbox;
6780 void *virtaddr = NULL;
6781 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6782 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6783 union lpfc_sli4_cfg_shdr *shdr;
6784
6785 switch (type) {
6786 case LPFC_RSC_TYPE_FCOE_VPI:
6787 blk_list_head = &phba->lpfc_vpi_blk_list;
6788 break;
6789 case LPFC_RSC_TYPE_FCOE_XRI:
6790 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6791 break;
6792 case LPFC_RSC_TYPE_FCOE_VFI:
6793 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6794 break;
6795 case LPFC_RSC_TYPE_FCOE_RPI:
6796 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6797 break;
6798 default:
6799 return -EIO;
6800 }
6801
6802 /* Count the number of extents currently allocatd for this type. */
6803 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6804 if (curr_blks == 0) {
6805 /*
6806 * The GET_ALLOCATED mailbox does not return the size,
6807 * just the count. The size should be just the size
6808 * stored in the current allocated block and all sizes
6809 * for an extent type are the same so set the return
6810 * value now.
6811 */
6812 *extnt_size = rsrc_blk->rsrc_size;
6813 }
6814 curr_blks++;
6815 }
6816
b76f2dc9
JS
6817 /*
6818 * Calculate the size of an embedded mailbox. The uint32_t
6819 * accounts for extents-specific word.
6820 */
6821 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6822 sizeof(uint32_t);
6823
6824 /*
6825 * Presume the allocation and response will fit into an embedded
6826 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6827 */
6828 emb = LPFC_SLI4_MBX_EMBED;
6829 req_len = emb_len;
6830 if (req_len > emb_len) {
6831 req_len = curr_blks * sizeof(uint16_t) +
6832 sizeof(union lpfc_sli4_cfg_shdr) +
6833 sizeof(uint32_t);
6834 emb = LPFC_SLI4_MBX_NEMBED;
6835 }
6836
6837 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6838 if (!mbox)
6839 return -ENOMEM;
6840 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6841
6842 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6843 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6844 req_len, emb);
6845 if (alloc_len < req_len) {
6846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6847 "2983 Allocated DMA memory size (x%x) is "
6848 "less than the requested DMA memory "
6849 "size (x%x)\n", alloc_len, req_len);
6850 rc = -ENOMEM;
6851 goto err_exit;
6852 }
6853 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6854 if (unlikely(rc)) {
6855 rc = -EIO;
6856 goto err_exit;
6857 }
6858
6859 if (!phba->sli4_hba.intr_enable)
6860 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6861 else {
a183a15f 6862 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
b76f2dc9
JS
6863 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6864 }
6865
6866 if (unlikely(rc)) {
6867 rc = -EIO;
6868 goto err_exit;
6869 }
6870
6871 /*
6872 * Figure out where the response is located. Then get local pointers
6873 * to the response data. The port does not guarantee to respond to
6874 * all extents counts request so update the local variable with the
6875 * allocated count from the port.
6876 */
6877 if (emb == LPFC_SLI4_MBX_EMBED) {
6878 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6879 shdr = &rsrc_ext->header.cfg_shdr;
6880 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6881 } else {
6882 virtaddr = mbox->sge_array->addr[0];
6883 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6884 shdr = &n_rsrc->cfg_shdr;
6885 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6886 }
6887
6888 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6889 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6890 "2984 Failed to read allocated resources "
6891 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6892 type,
6893 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6894 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6895 rc = -EIO;
6896 goto err_exit;
6897 }
6898 err_exit:
6899 lpfc_sli4_mbox_cmd_free(phba, mbox);
6900 return rc;
6901}
6902
8a9d2e80 6903/**
0ef69968 6904 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
8a9d2e80 6905 * @phba: pointer to lpfc hba data structure.
895427bd
JS
6906 * @pring: Pointer to driver SLI ring object.
6907 * @sgl_list: linked link of sgl buffers to post
6908 * @cnt: number of linked list buffers
8a9d2e80 6909 *
895427bd 6910 * This routine walks the list of buffers that have been allocated and
8a9d2e80
JS
6911 * repost them to the port by using SGL block post. This is needed after a
6912 * pci_function_reset/warm_start or start. It attempts to construct blocks
895427bd
JS
6913 * of buffer sgls which contains contiguous xris and uses the non-embedded
6914 * SGL block post mailbox commands to post them to the port. For single
8a9d2e80
JS
6915 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6916 * mailbox command for posting.
6917 *
6918 * Returns: 0 = success, non-zero failure.
6919 **/
6920static int
895427bd
JS
6921lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6922 struct list_head *sgl_list, int cnt)
8a9d2e80
JS
6923{
6924 struct lpfc_sglq *sglq_entry = NULL;
6925 struct lpfc_sglq *sglq_entry_next = NULL;
6926 struct lpfc_sglq *sglq_entry_first = NULL;
895427bd
JS
6927 int status, total_cnt;
6928 int post_cnt = 0, num_posted = 0, block_cnt = 0;
8a9d2e80
JS
6929 int last_xritag = NO_XRI;
6930 LIST_HEAD(prep_sgl_list);
6931 LIST_HEAD(blck_sgl_list);
6932 LIST_HEAD(allc_sgl_list);
6933 LIST_HEAD(post_sgl_list);
6934 LIST_HEAD(free_sgl_list);
6935
38c20673 6936 spin_lock_irq(&phba->hbalock);
895427bd
JS
6937 spin_lock(&phba->sli4_hba.sgl_list_lock);
6938 list_splice_init(sgl_list, &allc_sgl_list);
6939 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 6940 spin_unlock_irq(&phba->hbalock);
8a9d2e80 6941
895427bd 6942 total_cnt = cnt;
8a9d2e80
JS
6943 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6944 &allc_sgl_list, list) {
6945 list_del_init(&sglq_entry->list);
6946 block_cnt++;
6947 if ((last_xritag != NO_XRI) &&
6948 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6949 /* a hole in xri block, form a sgl posting block */
6950 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6951 post_cnt = block_cnt - 1;
6952 /* prepare list for next posting block */
6953 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6954 block_cnt = 1;
6955 } else {
6956 /* prepare list for next posting block */
6957 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6958 /* enough sgls for non-embed sgl mbox command */
6959 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6960 list_splice_init(&prep_sgl_list,
6961 &blck_sgl_list);
6962 post_cnt = block_cnt;
6963 block_cnt = 0;
6964 }
6965 }
6966 num_posted++;
6967
6968 /* keep track of last sgl's xritag */
6969 last_xritag = sglq_entry->sli4_xritag;
6970
895427bd
JS
6971 /* end of repost sgl list condition for buffers */
6972 if (num_posted == total_cnt) {
8a9d2e80
JS
6973 if (post_cnt == 0) {
6974 list_splice_init(&prep_sgl_list,
6975 &blck_sgl_list);
6976 post_cnt = block_cnt;
6977 } else if (block_cnt == 1) {
6978 status = lpfc_sli4_post_sgl(phba,
6979 sglq_entry->phys, 0,
6980 sglq_entry->sli4_xritag);
6981 if (!status) {
6982 /* successful, put sgl to posted list */
6983 list_add_tail(&sglq_entry->list,
6984 &post_sgl_list);
6985 } else {
6986 /* Failure, put sgl to free list */
6987 lpfc_printf_log(phba, KERN_WARNING,
6988 LOG_SLI,
895427bd 6989 "3159 Failed to post "
8a9d2e80
JS
6990 "sgl, xritag:x%x\n",
6991 sglq_entry->sli4_xritag);
6992 list_add_tail(&sglq_entry->list,
6993 &free_sgl_list);
711ea882 6994 total_cnt--;
8a9d2e80
JS
6995 }
6996 }
6997 }
6998
6999 /* continue until a nembed page worth of sgls */
7000 if (post_cnt == 0)
7001 continue;
7002
895427bd
JS
7003 /* post the buffer list sgls as a block */
7004 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7005 post_cnt);
8a9d2e80
JS
7006
7007 if (!status) {
7008 /* success, put sgl list to posted sgl list */
7009 list_splice_init(&blck_sgl_list, &post_sgl_list);
7010 } else {
7011 /* Failure, put sgl list to free sgl list */
7012 sglq_entry_first = list_first_entry(&blck_sgl_list,
7013 struct lpfc_sglq,
7014 list);
7015 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
895427bd 7016 "3160 Failed to post sgl-list, "
8a9d2e80
JS
7017 "xritag:x%x-x%x\n",
7018 sglq_entry_first->sli4_xritag,
7019 (sglq_entry_first->sli4_xritag +
7020 post_cnt - 1));
7021 list_splice_init(&blck_sgl_list, &free_sgl_list);
711ea882 7022 total_cnt -= post_cnt;
8a9d2e80
JS
7023 }
7024
7025 /* don't reset xirtag due to hole in xri block */
7026 if (block_cnt == 0)
7027 last_xritag = NO_XRI;
7028
895427bd 7029 /* reset sgl post count for next round of posting */
8a9d2e80
JS
7030 post_cnt = 0;
7031 }
7032
895427bd 7033 /* free the sgls failed to post */
8a9d2e80
JS
7034 lpfc_free_sgl_list(phba, &free_sgl_list);
7035
895427bd 7036 /* push sgls posted to the available list */
8a9d2e80 7037 if (!list_empty(&post_sgl_list)) {
38c20673 7038 spin_lock_irq(&phba->hbalock);
895427bd
JS
7039 spin_lock(&phba->sli4_hba.sgl_list_lock);
7040 list_splice_init(&post_sgl_list, sgl_list);
7041 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 7042 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
7043 } else {
7044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 7045 "3161 Failure to post sgl to port.\n");
8a9d2e80
JS
7046 return -EIO;
7047 }
895427bd
JS
7048
7049 /* return the number of XRIs actually posted */
7050 return total_cnt;
8a9d2e80
JS
7051}
7052
0794d601
JS
7053/**
7054 * lpfc_sli4_repost_common_sgl_list - Repost all the allocated nvme buffer sgls
7055 * @phba: pointer to lpfc hba data structure.
7056 *
7057 * This routine walks the list of nvme buffers that have been allocated and
7058 * repost them to the port by using SGL block post. This is needed after a
7059 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7060 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7061 * to the lpfc_common_buf_list. If the repost fails, reject all nvme buffers.
7062 *
7063 * Returns: 0 = success, non-zero failure.
7064 **/
7065int
7066lpfc_sli4_repost_common_sgl_list(struct lpfc_hba *phba)
7067{
7068 LIST_HEAD(post_nblist);
7069 int num_posted, rc = 0;
7070
7071 /* get all NVME buffers need to repost to a local list */
7072 spin_lock_irq(&phba->common_buf_list_get_lock);
7073 spin_lock(&phba->common_buf_list_put_lock);
7074 list_splice_init(&phba->lpfc_common_buf_list_get, &post_nblist);
7075 list_splice(&phba->lpfc_common_buf_list_put, &post_nblist);
7076 phba->get_common_bufs = 0;
7077 phba->put_common_bufs = 0;
7078 spin_unlock(&phba->common_buf_list_put_lock);
7079 spin_unlock_irq(&phba->common_buf_list_get_lock);
7080
7081 /* post the list of nvme buffer sgls to port if available */
7082 if (!list_empty(&post_nblist)) {
7083 num_posted = lpfc_sli4_post_common_sgl_list(
7084 phba, &post_nblist, phba->sli4_hba.common_xri_cnt);
7085 /* failed to post any nvme buffer, return error */
7086 if (num_posted == 0)
7087 rc = -EIO;
7088 }
7089 return rc;
7090}
7091
61bda8f7
JS
7092void
7093lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7094{
7095 uint32_t len;
7096
7097 len = sizeof(struct lpfc_mbx_set_host_data) -
7098 sizeof(struct lpfc_sli4_cfg_mhdr);
7099 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7100 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7101 LPFC_SLI4_MBX_EMBED);
7102
7103 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
b2fd103b
JS
7104 mbox->u.mqe.un.set_host_data.param_len =
7105 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
61bda8f7
JS
7106 snprintf(mbox->u.mqe.un.set_host_data.data,
7107 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7108 "Linux %s v"LPFC_DRIVER_VERSION,
7109 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7110}
7111
a8cf5dfe 7112int
6c621a22 7113lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
a8cf5dfe 7114 struct lpfc_queue *drq, int count, int idx)
6c621a22
JS
7115{
7116 int rc, i;
7117 struct lpfc_rqe hrqe;
7118 struct lpfc_rqe drqe;
7119 struct lpfc_rqb *rqbp;
411de511 7120 unsigned long flags;
6c621a22
JS
7121 struct rqb_dmabuf *rqb_buffer;
7122 LIST_HEAD(rqb_buf_list);
7123
411de511 7124 spin_lock_irqsave(&phba->hbalock, flags);
6c621a22
JS
7125 rqbp = hrq->rqbp;
7126 for (i = 0; i < count; i++) {
7127 /* IF RQ is already full, don't bother */
7128 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7129 break;
7130 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7131 if (!rqb_buffer)
7132 break;
7133 rqb_buffer->hrq = hrq;
7134 rqb_buffer->drq = drq;
a8cf5dfe 7135 rqb_buffer->idx = idx;
6c621a22
JS
7136 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7137 }
7138 while (!list_empty(&rqb_buf_list)) {
7139 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7140 hbuf.list);
7141
7142 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7143 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7144 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7145 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7146 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7147 if (rc < 0) {
411de511
JS
7148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7149 "6421 Cannot post to HRQ %d: %x %x %x "
7150 "DRQ %x %x\n",
7151 hrq->queue_id,
7152 hrq->host_index,
7153 hrq->hba_index,
7154 hrq->entry_count,
7155 drq->host_index,
7156 drq->hba_index);
6c621a22
JS
7157 rqbp->rqb_free_buffer(phba, rqb_buffer);
7158 } else {
7159 list_add_tail(&rqb_buffer->hbuf.list,
7160 &rqbp->rqb_buffer_list);
7161 rqbp->buffer_count++;
7162 }
7163 }
411de511 7164 spin_unlock_irqrestore(&phba->hbalock, flags);
6c621a22
JS
7165 return 1;
7166}
7167
da0436e9 7168/**
183b8021 7169 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
da0436e9
JS
7170 * @phba: Pointer to HBA context object.
7171 *
183b8021
MY
7172 * This function is the main SLI4 device initialization PCI function. This
7173 * function is called by the HBA initialization code, HBA reset code and
da0436e9
JS
7174 * HBA error attention handler code. Caller is not required to hold any
7175 * locks.
7176 **/
7177int
7178lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7179{
6c621a22 7180 int rc, i, cnt;
da0436e9
JS
7181 LPFC_MBOXQ_t *mboxq;
7182 struct lpfc_mqe *mqe;
7183 uint8_t *vpd;
7184 uint32_t vpd_size;
7185 uint32_t ftr_rsp = 0;
7186 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7187 struct lpfc_vport *vport = phba->pport;
7188 struct lpfc_dmabuf *mp;
2d7dbc4c 7189 struct lpfc_rqb *rqbp;
da0436e9
JS
7190
7191 /* Perform a PCI function reset to start from clean */
7192 rc = lpfc_pci_function_reset(phba);
7193 if (unlikely(rc))
7194 return -ENODEV;
7195
7196 /* Check the HBA Host Status Register for readyness */
7197 rc = lpfc_sli4_post_status_check(phba);
7198 if (unlikely(rc))
7199 return -ENODEV;
7200 else {
7201 spin_lock_irq(&phba->hbalock);
7202 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7203 spin_unlock_irq(&phba->hbalock);
7204 }
7205
7206 /*
7207 * Allocate a single mailbox container for initializing the
7208 * port.
7209 */
7210 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7211 if (!mboxq)
7212 return -ENOMEM;
7213
da0436e9 7214 /* Issue READ_REV to collect vpd and FW information. */
49198b37 7215 vpd_size = SLI4_PAGE_SIZE;
da0436e9
JS
7216 vpd = kzalloc(vpd_size, GFP_KERNEL);
7217 if (!vpd) {
7218 rc = -ENOMEM;
7219 goto out_free_mbox;
7220 }
7221
7222 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
76a95d75
JS
7223 if (unlikely(rc)) {
7224 kfree(vpd);
7225 goto out_free_mbox;
7226 }
572709e2 7227
da0436e9 7228 mqe = &mboxq->u.mqe;
f1126688 7229 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
b5c53958 7230 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
76a95d75 7231 phba->hba_flag |= HBA_FCOE_MODE;
b5c53958
JS
7232 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7233 } else {
76a95d75 7234 phba->hba_flag &= ~HBA_FCOE_MODE;
b5c53958 7235 }
45ed1190
JS
7236
7237 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7238 LPFC_DCBX_CEE_MODE)
7239 phba->hba_flag |= HBA_FIP_SUPPORT;
7240 else
7241 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7242
4f2e66c6
JS
7243 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7244
c31098ce 7245 if (phba->sli_rev != LPFC_SLI_REV4) {
da0436e9
JS
7246 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7247 "0376 READ_REV Error. SLI Level %d "
7248 "FCoE enabled %d\n",
76a95d75 7249 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
da0436e9 7250 rc = -EIO;
76a95d75
JS
7251 kfree(vpd);
7252 goto out_free_mbox;
da0436e9 7253 }
cd1c8301 7254
ff78d8f9
JS
7255 /*
7256 * Continue initialization with default values even if driver failed
7257 * to read FCoE param config regions, only read parameters if the
7258 * board is FCoE
7259 */
7260 if (phba->hba_flag & HBA_FCOE_MODE &&
7261 lpfc_sli4_read_fcoe_params(phba))
7262 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7263 "2570 Failed to read FCoE parameters\n");
7264
cd1c8301
JS
7265 /*
7266 * Retrieve sli4 device physical port name, failure of doing it
7267 * is considered as non-fatal.
7268 */
7269 rc = lpfc_sli4_retrieve_pport_name(phba);
7270 if (!rc)
7271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7272 "3080 Successful retrieving SLI4 device "
7273 "physical port name: %s.\n", phba->Port);
7274
da0436e9
JS
7275 /*
7276 * Evaluate the read rev and vpd data. Populate the driver
7277 * state with the results. If this routine fails, the failure
7278 * is not fatal as the driver will use generic values.
7279 */
7280 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7281 if (unlikely(!rc)) {
7282 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7283 "0377 Error %d parsing vpd. "
7284 "Using defaults.\n", rc);
7285 rc = 0;
7286 }
76a95d75 7287 kfree(vpd);
da0436e9 7288
f1126688
JS
7289 /* Save information as VPD data */
7290 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7291 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4e565cf0
JS
7292
7293 /*
7294 * This is because first G7 ASIC doesn't support the standard
7295 * 0x5a NVME cmd descriptor type/subtype
7296 */
7297 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7298 LPFC_SLI_INTF_IF_TYPE_6) &&
7299 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7300 (phba->vpd.rev.smRev == 0) &&
7301 (phba->cfg_nvme_embed_cmd == 1))
7302 phba->cfg_nvme_embed_cmd = 0;
7303
f1126688
JS
7304 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7305 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7306 &mqe->un.read_rev);
7307 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7308 &mqe->un.read_rev);
7309 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7310 &mqe->un.read_rev);
7311 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7312 &mqe->un.read_rev);
7313 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7314 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7315 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7316 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7317 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7318 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7319 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7320 "(%d):0380 READ_REV Status x%x "
7321 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7322 mboxq->vport ? mboxq->vport->vpi : 0,
7323 bf_get(lpfc_mqe_status, mqe),
7324 phba->vpd.rev.opFwName,
7325 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7326 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9 7327
572709e2
JS
7328 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7329 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7330 if (phba->pport->cfg_lun_queue_depth > rc) {
7331 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7332 "3362 LUN queue depth changed from %d to %d\n",
7333 phba->pport->cfg_lun_queue_depth, rc);
7334 phba->pport->cfg_lun_queue_depth = rc;
7335 }
7336
65791f1f 7337 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7bdedb34
JS
7338 LPFC_SLI_INTF_IF_TYPE_0) {
7339 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7340 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7341 if (rc == MBX_SUCCESS) {
7342 phba->hba_flag |= HBA_RECOVERABLE_UE;
7343 /* Set 1Sec interval to detect UE */
7344 phba->eratt_poll_interval = 1;
7345 phba->sli4_hba.ue_to_sr = bf_get(
7346 lpfc_mbx_set_feature_UESR,
7347 &mboxq->u.mqe.un.set_feature);
7348 phba->sli4_hba.ue_to_rp = bf_get(
7349 lpfc_mbx_set_feature_UERP,
7350 &mboxq->u.mqe.un.set_feature);
7351 }
7352 }
7353
7354 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7355 /* Enable MDS Diagnostics only if the SLI Port supports it */
7356 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7357 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7358 if (rc != MBX_SUCCESS)
7359 phba->mds_diags_support = 0;
7360 }
572709e2 7361
da0436e9
JS
7362 /*
7363 * Discover the port's supported feature set and match it against the
7364 * hosts requests.
7365 */
7366 lpfc_request_features(phba, mboxq);
7367 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7368 if (unlikely(rc)) {
7369 rc = -EIO;
76a95d75 7370 goto out_free_mbox;
da0436e9
JS
7371 }
7372
7373 /*
7374 * The port must support FCP initiator mode as this is the
7375 * only mode running in the host.
7376 */
7377 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7378 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7379 "0378 No support for fcpi mode.\n");
7380 ftr_rsp++;
7381 }
0bc2b7c5
JS
7382
7383 /* Performance Hints are ONLY for FCoE */
7384 if (phba->hba_flag & HBA_FCOE_MODE) {
7385 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7386 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7387 else
7388 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7389 }
7390
da0436e9
JS
7391 /*
7392 * If the port cannot support the host's requested features
7393 * then turn off the global config parameters to disable the
7394 * feature in the driver. This is not a fatal error.
7395 */
f44ac12f
JS
7396 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7397 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7398 phba->cfg_enable_bg = 0;
7399 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
bf08611b 7400 ftr_rsp++;
f44ac12f 7401 }
bf08611b 7402 }
da0436e9
JS
7403
7404 if (phba->max_vpi && phba->cfg_enable_npiv &&
7405 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7406 ftr_rsp++;
7407
7408 if (ftr_rsp) {
7409 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7410 "0379 Feature Mismatch Data: x%08x %08x "
7411 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7412 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7413 phba->cfg_enable_npiv, phba->max_vpi);
7414 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7415 phba->cfg_enable_bg = 0;
7416 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7417 phba->cfg_enable_npiv = 0;
7418 }
7419
7420 /* These SLI3 features are assumed in SLI4 */
7421 spin_lock_irq(&phba->hbalock);
7422 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7423 spin_unlock_irq(&phba->hbalock);
7424
6d368e53
JS
7425 /*
7426 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7427 * calls depends on these resources to complete port setup.
7428 */
7429 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7430 if (rc) {
7431 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7432 "2920 Failed to alloc Resource IDs "
7433 "rc = x%x\n", rc);
7434 goto out_free_mbox;
7435 }
7436
61bda8f7
JS
7437 lpfc_set_host_data(phba, mboxq);
7438
7439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7440 if (rc) {
7441 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7442 "2134 Failed to set host os driver version %x",
7443 rc);
7444 }
7445
da0436e9 7446 /* Read the port's service parameters. */
9f1177a3
JS
7447 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7448 if (rc) {
7449 phba->link_state = LPFC_HBA_ERROR;
7450 rc = -ENOMEM;
76a95d75 7451 goto out_free_mbox;
9f1177a3
JS
7452 }
7453
da0436e9
JS
7454 mboxq->vport = vport;
7455 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3e1f0718 7456 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
da0436e9
JS
7457 if (rc == MBX_SUCCESS) {
7458 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7459 rc = 0;
7460 }
7461
7462 /*
7463 * This memory was allocated by the lpfc_read_sparam routine. Release
7464 * it to the mbuf pool.
7465 */
7466 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7467 kfree(mp);
3e1f0718 7468 mboxq->ctx_buf = NULL;
da0436e9
JS
7469 if (unlikely(rc)) {
7470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7471 "0382 READ_SPARAM command failed "
7472 "status %d, mbxStatus x%x\n",
7473 rc, bf_get(lpfc_mqe_status, mqe));
7474 phba->link_state = LPFC_HBA_ERROR;
7475 rc = -EIO;
76a95d75 7476 goto out_free_mbox;
da0436e9
JS
7477 }
7478
0558056c 7479 lpfc_update_vport_wwn(vport);
da0436e9
JS
7480
7481 /* Update the fc_host data structures with new wwn. */
7482 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7483 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7484
895427bd
JS
7485 /* Create all the SLI4 queues */
7486 rc = lpfc_sli4_queue_create(phba);
7487 if (rc) {
7488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7489 "3089 Failed to allocate queues\n");
7490 rc = -ENODEV;
7491 goto out_free_mbox;
7492 }
7493 /* Set up all the queues to the device */
7494 rc = lpfc_sli4_queue_setup(phba);
7495 if (unlikely(rc)) {
7496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7497 "0381 Error %d during queue setup.\n ", rc);
7498 goto out_stop_timers;
7499 }
7500 /* Initialize the driver internal SLI layer lists. */
7501 lpfc_sli4_setup(phba);
7502 lpfc_sli4_queue_init(phba);
7503
7504 /* update host els xri-sgl sizes and mappings */
7505 rc = lpfc_sli4_els_sgl_update(phba);
8a9d2e80
JS
7506 if (unlikely(rc)) {
7507 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7508 "1400 Failed to update xri-sgl size and "
7509 "mapping: %d\n", rc);
895427bd 7510 goto out_destroy_queue;
da0436e9
JS
7511 }
7512
8a9d2e80 7513 /* register the els sgl pool to the port */
895427bd
JS
7514 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7515 phba->sli4_hba.els_xri_cnt);
7516 if (unlikely(rc < 0)) {
8a9d2e80
JS
7517 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7518 "0582 Error %d during els sgl post "
7519 "operation\n", rc);
7520 rc = -ENODEV;
895427bd 7521 goto out_destroy_queue;
8a9d2e80 7522 }
895427bd 7523 phba->sli4_hba.els_xri_cnt = rc;
8a9d2e80 7524
f358dd0c
JS
7525 if (phba->nvmet_support) {
7526 /* update host nvmet xri-sgl sizes and mappings */
7527 rc = lpfc_sli4_nvmet_sgl_update(phba);
7528 if (unlikely(rc)) {
7529 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7530 "6308 Failed to update nvmet-sgl size "
7531 "and mapping: %d\n", rc);
7532 goto out_destroy_queue;
7533 }
7534
7535 /* register the nvmet sgl pool to the port */
7536 rc = lpfc_sli4_repost_sgl_list(
7537 phba,
7538 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7539 phba->sli4_hba.nvmet_xri_cnt);
7540 if (unlikely(rc < 0)) {
7541 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7542 "3117 Error %d during nvmet "
7543 "sgl post\n", rc);
7544 rc = -ENODEV;
7545 goto out_destroy_queue;
7546 }
7547 phba->sli4_hba.nvmet_xri_cnt = rc;
6c621a22
JS
7548
7549 cnt = phba->cfg_iocb_cnt * 1024;
7550 /* We need 1 iocbq for every SGL, for IO processing */
7551 cnt += phba->sli4_hba.nvmet_xri_cnt;
f358dd0c 7552 } else {
0794d601
JS
7553 /* update host common xri-sgl sizes and mappings */
7554 rc = lpfc_sli4_common_sgl_update(phba);
895427bd
JS
7555 if (unlikely(rc)) {
7556 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601 7557 "6082 Failed to update nvme-sgl size "
895427bd
JS
7558 "and mapping: %d\n", rc);
7559 goto out_destroy_queue;
7560 }
7561
0794d601
JS
7562 /* register the allocated common sgl pool to the port */
7563 rc = lpfc_sli4_repost_common_sgl_list(phba);
895427bd
JS
7564 if (unlikely(rc)) {
7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
0794d601
JS
7566 "6116 Error %d during nvme sgl post "
7567 "operation\n", rc);
7568 /* Some NVME buffers were moved to abort nvme list */
7569 /* A pci function reset will repost them */
7570 rc = -ENODEV;
895427bd
JS
7571 goto out_destroy_queue;
7572 }
6c621a22 7573 cnt = phba->cfg_iocb_cnt * 1024;
11e644e2
JS
7574 }
7575
7576 if (!phba->sli.iocbq_lookup) {
6c621a22
JS
7577 /* Initialize and populate the iocb list per host */
7578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11e644e2 7579 "2821 initialize iocb list %d total %d\n",
6c621a22
JS
7580 phba->cfg_iocb_cnt, cnt);
7581 rc = lpfc_init_iocb_list(phba, cnt);
7582 if (rc) {
7583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11e644e2 7584 "1413 Failed to init iocb list.\n");
6c621a22
JS
7585 goto out_destroy_queue;
7586 }
895427bd
JS
7587 }
7588
11e644e2
JS
7589 if (phba->nvmet_support)
7590 lpfc_nvmet_create_targetport(phba);
7591
2d7dbc4c 7592 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
2d7dbc4c
JS
7593 /* Post initial buffers to all RQs created */
7594 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7595 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7596 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7597 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7598 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
61f3d4bf 7599 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
2d7dbc4c
JS
7600 rqbp->buffer_count = 0;
7601
2d7dbc4c
JS
7602 lpfc_post_rq_buffer(
7603 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7604 phba->sli4_hba.nvmet_mrq_data[i],
2448e484 7605 phba->cfg_nvmet_mrq_post, i);
2d7dbc4c
JS
7606 }
7607 }
7608
da0436e9
JS
7609 /* Post the rpi header region to the device. */
7610 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7611 if (unlikely(rc)) {
7612 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7613 "0393 Error %d during rpi post operation\n",
7614 rc);
7615 rc = -ENODEV;
895427bd 7616 goto out_destroy_queue;
da0436e9 7617 }
97f2ecf1 7618 lpfc_sli4_node_prep(phba);
da0436e9 7619
895427bd 7620 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2d7dbc4c 7621 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
895427bd
JS
7622 /*
7623 * The FC Port needs to register FCFI (index 0)
7624 */
7625 lpfc_reg_fcfi(phba, mboxq);
7626 mboxq->vport = phba->pport;
7627 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7628 if (rc != MBX_SUCCESS)
7629 goto out_unset_queue;
7630 rc = 0;
7631 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7632 &mboxq->u.mqe.un.reg_fcfi);
2d7dbc4c
JS
7633 } else {
7634 /* We are a NVME Target mode with MRQ > 1 */
7635
7636 /* First register the FCFI */
7637 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7638 mboxq->vport = phba->pport;
7639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7640 if (rc != MBX_SUCCESS)
7641 goto out_unset_queue;
7642 rc = 0;
7643 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7644 &mboxq->u.mqe.un.reg_fcfi_mrq);
7645
7646 /* Next register the MRQs */
7647 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7648 mboxq->vport = phba->pport;
7649 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7650 if (rc != MBX_SUCCESS)
7651 goto out_unset_queue;
7652 rc = 0;
895427bd
JS
7653 }
7654 /* Check if the port is configured to be disabled */
7655 lpfc_sli_read_link_ste(phba);
da0436e9
JS
7656 }
7657
7658 /* Arm the CQs and then EQs on device */
7659 lpfc_sli4_arm_cqeq_intr(phba);
7660
7661 /* Indicate device interrupt mode */
7662 phba->sli4_hba.intr_enable = 1;
7663
7664 /* Allow asynchronous mailbox command to go through */
7665 spin_lock_irq(&phba->hbalock);
7666 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7667 spin_unlock_irq(&phba->hbalock);
7668
7669 /* Post receive buffers to the device */
7670 lpfc_sli4_rb_setup(phba);
7671
fc2b989b
JS
7672 /* Reset HBA FCF states after HBA reset */
7673 phba->fcf.fcf_flag = 0;
7674 phba->fcf.current_rec.flag = 0;
7675
da0436e9 7676 /* Start the ELS watchdog timer */
8fa38513 7677 mod_timer(&vport->els_tmofunc,
256ec0d0 7678 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
da0436e9
JS
7679
7680 /* Start heart beat timer */
7681 mod_timer(&phba->hb_tmofunc,
256ec0d0 7682 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
da0436e9
JS
7683 phba->hb_outstanding = 0;
7684 phba->last_completion_time = jiffies;
7685
7686 /* Start error attention (ERATT) polling timer */
256ec0d0 7687 mod_timer(&phba->eratt_poll,
65791f1f 7688 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
da0436e9 7689
75baf696
JS
7690 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7691 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7692 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7693 if (!rc) {
7694 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7695 "2829 This device supports "
7696 "Advanced Error Reporting (AER)\n");
7697 spin_lock_irq(&phba->hbalock);
7698 phba->hba_flag |= HBA_AER_ENABLED;
7699 spin_unlock_irq(&phba->hbalock);
7700 } else {
7701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7702 "2830 This device does not support "
7703 "Advanced Error Reporting (AER)\n");
7704 phba->cfg_aer_support = 0;
7705 }
0a96e975 7706 rc = 0;
75baf696
JS
7707 }
7708
da0436e9
JS
7709 /*
7710 * The port is ready, set the host's link state to LINK_DOWN
7711 * in preparation for link interrupts.
7712 */
da0436e9
JS
7713 spin_lock_irq(&phba->hbalock);
7714 phba->link_state = LPFC_LINK_DOWN;
1dc5ec24
JS
7715
7716 /* Check if physical ports are trunked */
7717 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7718 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7719 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7720 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7721 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7722 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7723 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7724 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
da0436e9 7725 spin_unlock_irq(&phba->hbalock);
1dc5ec24 7726
026abb87
JS
7727 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7728 (phba->hba_flag & LINK_DISABLED)) {
7729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7730 "3103 Adapter Link is disabled.\n");
7731 lpfc_down_link(phba, mboxq);
7732 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7733 if (rc != MBX_SUCCESS) {
7734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7735 "3104 Adapter failed to issue "
7736 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7737 goto out_unset_queue;
7738 }
7739 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
1b51197d
JS
7740 /* don't perform init_link on SLI4 FC port loopback test */
7741 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7742 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7743 if (rc)
7744 goto out_unset_queue;
7745 }
5350d872
JS
7746 }
7747 mempool_free(mboxq, phba->mbox_mem_pool);
7748 return rc;
76a95d75 7749out_unset_queue:
da0436e9 7750 /* Unset all the queues set up in this routine when error out */
5350d872
JS
7751 lpfc_sli4_queue_unset(phba);
7752out_destroy_queue:
6c621a22 7753 lpfc_free_iocb_list(phba);
5350d872 7754 lpfc_sli4_queue_destroy(phba);
da0436e9 7755out_stop_timers:
5350d872 7756 lpfc_stop_hba_timers(phba);
da0436e9
JS
7757out_free_mbox:
7758 mempool_free(mboxq, phba->mbox_mem_pool);
7759 return rc;
7760}
7761
7762/**
7763 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7764 * @ptr: context object - pointer to hba structure.
7765 *
7766 * This is the callback function for mailbox timer. The mailbox
7767 * timer is armed when a new mailbox command is issued and the timer
7768 * is deleted when the mailbox complete. The function is called by
7769 * the kernel timer code when a mailbox does not complete within
7770 * expected time. This function wakes up the worker thread to
7771 * process the mailbox timeout and returns. All the processing is
7772 * done by the worker thread function lpfc_mbox_timeout_handler.
7773 **/
7774void
f22eb4d3 7775lpfc_mbox_timeout(struct timer_list *t)
da0436e9 7776{
f22eb4d3 7777 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
da0436e9
JS
7778 unsigned long iflag;
7779 uint32_t tmo_posted;
7780
7781 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7782 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7783 if (!tmo_posted)
7784 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7785 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7786
7787 if (!tmo_posted)
7788 lpfc_worker_wake_up(phba);
7789 return;
7790}
7791
e8d3c3b1
JS
7792/**
7793 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7794 * are pending
7795 * @phba: Pointer to HBA context object.
7796 *
7797 * This function checks if any mailbox completions are present on the mailbox
7798 * completion queue.
7799 **/
3bb11fc5 7800static bool
e8d3c3b1
JS
7801lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7802{
7803
7804 uint32_t idx;
7805 struct lpfc_queue *mcq;
7806 struct lpfc_mcqe *mcqe;
7807 bool pending_completions = false;
7365f6fd 7808 uint8_t qe_valid;
e8d3c3b1
JS
7809
7810 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7811 return false;
7812
7813 /* Check for completions on mailbox completion queue */
7814
7815 mcq = phba->sli4_hba.mbx_cq;
7816 idx = mcq->hba_index;
7365f6fd
JS
7817 qe_valid = mcq->qe_valid;
7818 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
e8d3c3b1
JS
7819 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7820 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7821 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7822 pending_completions = true;
7823 break;
7824 }
7825 idx = (idx + 1) % mcq->entry_count;
7826 if (mcq->hba_index == idx)
7827 break;
7365f6fd
JS
7828
7829 /* if the index wrapped around, toggle the valid bit */
7830 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7831 qe_valid = (qe_valid) ? 0 : 1;
e8d3c3b1
JS
7832 }
7833 return pending_completions;
7834
7835}
7836
7837/**
7838 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7839 * that were missed.
7840 * @phba: Pointer to HBA context object.
7841 *
7842 * For sli4, it is possible to miss an interrupt. As such mbox completions
7843 * maybe missed causing erroneous mailbox timeouts to occur. This function
7844 * checks to see if mbox completions are on the mailbox completion queue
7845 * and will process all the completions associated with the eq for the
7846 * mailbox completion queue.
7847 **/
7848bool
7849lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7850{
b71413dd 7851 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
e8d3c3b1
JS
7852 uint32_t eqidx;
7853 struct lpfc_queue *fpeq = NULL;
7854 struct lpfc_eqe *eqe;
7855 bool mbox_pending;
7856
7857 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7858 return false;
7859
7860 /* Find the eq associated with the mcq */
7861
cdb42bec
JS
7862 if (sli4_hba->hdwq)
7863 for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++)
7864 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
b71413dd 7865 sli4_hba->mbx_cq->assoc_qid) {
cdb42bec 7866 fpeq = sli4_hba->hdwq[eqidx].hba_eq;
e8d3c3b1
JS
7867 break;
7868 }
7869 if (!fpeq)
7870 return false;
7871
7872 /* Turn off interrupts from this EQ */
7873
b71413dd 7874 sli4_hba->sli4_eq_clr_intr(fpeq);
e8d3c3b1
JS
7875
7876 /* Check to see if a mbox completion is pending */
7877
7878 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7879
7880 /*
7881 * If a mbox completion is pending, process all the events on EQ
7882 * associated with the mbox completion queue (this could include
7883 * mailbox commands, async events, els commands, receive queue data
7884 * and fcp commands)
7885 */
7886
7887 if (mbox_pending)
7888 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7889 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7890 fpeq->EQ_processed++;
7891 }
7892
7893 /* Always clear and re-arm the EQ */
7894
b71413dd 7895 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
e8d3c3b1
JS
7896
7897 return mbox_pending;
7898
7899}
da0436e9
JS
7900
7901/**
7902 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7903 * @phba: Pointer to HBA context object.
7904 *
7905 * This function is called from worker thread when a mailbox command times out.
7906 * The caller is not required to hold any locks. This function will reset the
7907 * HBA and recover all the pending commands.
7908 **/
7909void
7910lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7911{
7912 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
eb016566
JS
7913 MAILBOX_t *mb = NULL;
7914
da0436e9 7915 struct lpfc_sli *psli = &phba->sli;
da0436e9 7916
e8d3c3b1
JS
7917 /* If the mailbox completed, process the completion and return */
7918 if (lpfc_sli4_process_missed_mbox_completions(phba))
7919 return;
7920
eb016566
JS
7921 if (pmbox != NULL)
7922 mb = &pmbox->u.mb;
da0436e9
JS
7923 /* Check the pmbox pointer first. There is a race condition
7924 * between the mbox timeout handler getting executed in the
7925 * worklist and the mailbox actually completing. When this
7926 * race condition occurs, the mbox_active will be NULL.
7927 */
7928 spin_lock_irq(&phba->hbalock);
7929 if (pmbox == NULL) {
7930 lpfc_printf_log(phba, KERN_WARNING,
7931 LOG_MBOX | LOG_SLI,
7932 "0353 Active Mailbox cleared - mailbox timeout "
7933 "exiting\n");
7934 spin_unlock_irq(&phba->hbalock);
7935 return;
7936 }
7937
7938 /* Mbox cmd <mbxCommand> timeout */
7939 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7940 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7941 mb->mbxCommand,
7942 phba->pport->port_state,
7943 phba->sli.sli_flag,
7944 phba->sli.mbox_active);
7945 spin_unlock_irq(&phba->hbalock);
7946
7947 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7948 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
25985edc 7949 * it to fail all outstanding SCSI IO.
da0436e9
JS
7950 */
7951 spin_lock_irq(&phba->pport->work_port_lock);
7952 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7953 spin_unlock_irq(&phba->pport->work_port_lock);
7954 spin_lock_irq(&phba->hbalock);
7955 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 7956 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
7957 spin_unlock_irq(&phba->hbalock);
7958
db55fba8 7959 lpfc_sli_abort_fcp_rings(phba);
da0436e9
JS
7960
7961 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7962 "0345 Resetting board due to mailbox timeout\n");
7963
7964 /* Reset the HBA device */
7965 lpfc_reset_hba(phba);
7966}
7967
7968/**
7969 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7970 * @phba: Pointer to HBA context object.
7971 * @pmbox: Pointer to mailbox object.
7972 * @flag: Flag indicating how the mailbox need to be processed.
7973 *
7974 * This function is called by discovery code and HBA management code
7975 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7976 * function gets the hbalock to protect the data structures.
7977 * The mailbox command can be submitted in polling mode, in which case
7978 * this function will wait in a polling loop for the completion of the
7979 * mailbox.
7980 * If the mailbox is submitted in no_wait mode (not polling) the
7981 * function will submit the command and returns immediately without waiting
7982 * for the mailbox completion. The no_wait is supported only when HBA
7983 * is in SLI2/SLI3 mode - interrupts are enabled.
7984 * The SLI interface allows only one mailbox pending at a time. If the
7985 * mailbox is issued in polling mode and there is already a mailbox
7986 * pending, then the function will return an error. If the mailbox is issued
7987 * in NO_WAIT mode and there is a mailbox pending already, the function
7988 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7989 * The sli layer owns the mailbox object until the completion of mailbox
7990 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7991 * return codes the caller owns the mailbox command after the return of
7992 * the function.
e59058c4 7993 **/
3772a991
JS
7994static int
7995lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7996 uint32_t flag)
dea3101e 7997{
bf07bdea 7998 MAILBOX_t *mbx;
2e0fef85 7999 struct lpfc_sli *psli = &phba->sli;
dea3101e 8000 uint32_t status, evtctr;
9940b97b 8001 uint32_t ha_copy, hc_copy;
dea3101e 8002 int i;
09372820 8003 unsigned long timeout;
dea3101e 8004 unsigned long drvr_flag = 0;
34b02dcd 8005 uint32_t word0, ldata;
dea3101e 8006 void __iomem *to_slim;
58da1ffb
JS
8007 int processing_queue = 0;
8008
8009 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8010 if (!pmbox) {
8568a4d2 8011 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 8012 /* processing mbox queue from intr_handler */
3772a991
JS
8013 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8014 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8015 return MBX_SUCCESS;
8016 }
58da1ffb 8017 processing_queue = 1;
58da1ffb
JS
8018 pmbox = lpfc_mbox_get(phba);
8019 if (!pmbox) {
8020 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8021 return MBX_SUCCESS;
8022 }
8023 }
dea3101e 8024
ed957684 8025 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 8026 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 8027 if(!pmbox->vport) {
58da1ffb 8028 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 8029 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 8030 LOG_MBOX | LOG_VPORT,
e8b62011 8031 "1806 Mbox x%x failed. No vport\n",
3772a991 8032 pmbox->u.mb.mbxCommand);
ed957684 8033 dump_stack();
58da1ffb 8034 goto out_not_finished;
ed957684
JS
8035 }
8036 }
8037
8d63f375 8038 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
8039 if (unlikely(pci_channel_offline(phba->pcidev))) {
8040 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8041 goto out_not_finished;
8042 }
8d63f375 8043
a257bf90
JS
8044 /* If HBA has a deferred error attention, fail the iocb. */
8045 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8046 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8047 goto out_not_finished;
8048 }
8049
dea3101e 8050 psli = &phba->sli;
92d7f7b0 8051
bf07bdea 8052 mbx = &pmbox->u.mb;
dea3101e 8053 status = MBX_SUCCESS;
8054
2e0fef85
JS
8055 if (phba->link_state == LPFC_HBA_ERROR) {
8056 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
8057
8058 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8059 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8060 "(%d):0311 Mailbox command x%x cannot "
8061 "issue Data: x%x x%x\n",
8062 pmbox->vport ? pmbox->vport->vpi : 0,
8063 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 8064 goto out_not_finished;
41415862
JW
8065 }
8066
bf07bdea 8067 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9940b97b
JS
8068 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8069 !(hc_copy & HC_MBINT_ENA)) {
8070 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8071 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3772a991
JS
8072 "(%d):2528 Mailbox command x%x cannot "
8073 "issue Data: x%x x%x\n",
8074 pmbox->vport ? pmbox->vport->vpi : 0,
8075 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9940b97b
JS
8076 goto out_not_finished;
8077 }
9290831f
JS
8078 }
8079
dea3101e 8080 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8081 /* Polling for a mbox command when another one is already active
8082 * is not allowed in SLI. Also, the driver must have established
8083 * SLI2 mode to queue and process multiple mbox commands.
8084 */
8085
8086 if (flag & MBX_POLL) {
2e0fef85 8087 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8088
8089 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8090 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8091 "(%d):2529 Mailbox command x%x "
8092 "cannot issue Data: x%x x%x\n",
8093 pmbox->vport ? pmbox->vport->vpi : 0,
8094 pmbox->u.mb.mbxCommand,
8095 psli->sli_flag, flag);
58da1ffb 8096 goto out_not_finished;
dea3101e 8097 }
8098
3772a991 8099 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 8100 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8101 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8102 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8103 "(%d):2530 Mailbox command x%x "
8104 "cannot issue Data: x%x x%x\n",
8105 pmbox->vport ? pmbox->vport->vpi : 0,
8106 pmbox->u.mb.mbxCommand,
8107 psli->sli_flag, flag);
58da1ffb 8108 goto out_not_finished;
dea3101e 8109 }
8110
dea3101e 8111 /* Another mailbox command is still being processed, queue this
8112 * command to be processed later.
8113 */
8114 lpfc_mbox_put(phba, pmbox);
8115
8116 /* Mbox cmd issue - BUSY */
ed957684 8117 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8118 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 8119 "x%x x%x x%x x%x\n",
92d7f7b0 8120 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
e92974f6
JS
8121 mbx->mbxCommand,
8122 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8123 psli->sli_flag, flag);
dea3101e 8124
8125 psli->slistat.mbox_busy++;
2e0fef85 8126 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8127
858c9f6c
JS
8128 if (pmbox->vport) {
8129 lpfc_debugfs_disc_trc(pmbox->vport,
8130 LPFC_DISC_TRC_MBOX_VPORT,
8131 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8132 (uint32_t)mbx->mbxCommand,
8133 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8134 }
8135 else {
8136 lpfc_debugfs_disc_trc(phba->pport,
8137 LPFC_DISC_TRC_MBOX,
8138 "MBOX Bsy: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8139 (uint32_t)mbx->mbxCommand,
8140 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8141 }
8142
2e0fef85 8143 return MBX_BUSY;
dea3101e 8144 }
8145
dea3101e 8146 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8147
8148 /* If we are not polling, we MUST be in SLI2 mode */
8149 if (flag != MBX_POLL) {
3772a991 8150 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
bf07bdea 8151 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 8152 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8153 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 8154 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
8155 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8156 "(%d):2531 Mailbox command x%x "
8157 "cannot issue Data: x%x x%x\n",
8158 pmbox->vport ? pmbox->vport->vpi : 0,
8159 pmbox->u.mb.mbxCommand,
8160 psli->sli_flag, flag);
58da1ffb 8161 goto out_not_finished;
dea3101e 8162 }
8163 /* timeout active mbox command */
256ec0d0
JS
8164 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8165 1000);
8166 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea3101e 8167 }
8168
8169 /* Mailbox cmd <cmd> issue */
ed957684 8170 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 8171 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 8172 "x%x\n",
e8b62011 8173 pmbox->vport ? pmbox->vport->vpi : 0,
e92974f6
JS
8174 mbx->mbxCommand,
8175 phba->pport ? phba->pport->port_state : 0xff,
92d7f7b0 8176 psli->sli_flag, flag);
dea3101e 8177
bf07bdea 8178 if (mbx->mbxCommand != MBX_HEARTBEAT) {
858c9f6c
JS
8179 if (pmbox->vport) {
8180 lpfc_debugfs_disc_trc(pmbox->vport,
8181 LPFC_DISC_TRC_MBOX_VPORT,
8182 "MBOX Send vport: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8183 (uint32_t)mbx->mbxCommand,
8184 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8185 }
8186 else {
8187 lpfc_debugfs_disc_trc(phba->pport,
8188 LPFC_DISC_TRC_MBOX,
8189 "MBOX Send: cmd:x%x mb:x%x x%x",
bf07bdea
RD
8190 (uint32_t)mbx->mbxCommand,
8191 mbx->un.varWords[0], mbx->un.varWords[1]);
858c9f6c
JS
8192 }
8193 }
8194
dea3101e 8195 psli->slistat.mbox_cmd++;
8196 evtctr = psli->slistat.mbox_event;
8197
8198 /* next set own bit for the adapter and copy over command word */
bf07bdea 8199 mbx->mbxOwner = OWN_CHIP;
dea3101e 8200
3772a991 8201 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7a470277
JS
8202 /* Populate mbox extension offset word. */
8203 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
bf07bdea 8204 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8205 = (uint8_t *)phba->mbox_ext
8206 - (uint8_t *)phba->mbox;
8207 }
8208
8209 /* Copy the mailbox extension data */
3e1f0718
JS
8210 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8211 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8212 (uint8_t *)phba->mbox_ext,
8213 pmbox->in_ext_byte_len);
7a470277
JS
8214 }
8215 /* Copy command data to host SLIM area */
bf07bdea 8216 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 8217 } else {
7a470277
JS
8218 /* Populate mbox extension offset word. */
8219 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
bf07bdea 8220 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7a470277
JS
8221 = MAILBOX_HBA_EXT_OFFSET;
8222
8223 /* Copy the mailbox extension data */
3e1f0718 8224 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
7a470277
JS
8225 lpfc_memcpy_to_slim(phba->MBslimaddr +
8226 MAILBOX_HBA_EXT_OFFSET,
3e1f0718 8227 pmbox->ctx_buf, pmbox->in_ext_byte_len);
7a470277 8228
895427bd 8229 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8230 /* copy command data into host mbox for cmpl */
895427bd
JS
8231 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8232 MAILBOX_CMD_SIZE);
dea3101e 8233
8234 /* First copy mbox command data to HBA SLIM, skip past first
8235 word */
8236 to_slim = phba->MBslimaddr + sizeof (uint32_t);
bf07bdea 8237 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea3101e 8238 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8239
8240 /* Next copy over first word, with mbxOwner set */
bf07bdea 8241 ldata = *((uint32_t *)mbx);
dea3101e 8242 to_slim = phba->MBslimaddr;
8243 writel(ldata, to_slim);
8244 readl(to_slim); /* flush */
8245
895427bd 8246 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea3101e 8247 /* switch over to host mailbox */
3772a991 8248 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e 8249 }
8250
8251 wmb();
dea3101e 8252
8253 switch (flag) {
8254 case MBX_NOWAIT:
09372820 8255 /* Set up reference to mailbox command */
dea3101e 8256 psli->mbox_active = pmbox;
09372820
JS
8257 /* Interrupt board to do it */
8258 writel(CA_MBATT, phba->CAregaddr);
8259 readl(phba->CAregaddr); /* flush */
8260 /* Don't wait for it to finish, just return */
dea3101e 8261 break;
8262
8263 case MBX_POLL:
09372820 8264 /* Set up null reference to mailbox command */
dea3101e 8265 psli->mbox_active = NULL;
09372820
JS
8266 /* Interrupt board to do it */
8267 writel(CA_MBATT, phba->CAregaddr);
8268 readl(phba->CAregaddr); /* flush */
8269
3772a991 8270 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8271 /* First read mbox status word */
34b02dcd 8272 word0 = *((uint32_t *)phba->mbox);
dea3101e 8273 word0 = le32_to_cpu(word0);
8274 } else {
8275 /* First read mbox status word */
9940b97b
JS
8276 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8277 spin_unlock_irqrestore(&phba->hbalock,
8278 drvr_flag);
8279 goto out_not_finished;
8280 }
dea3101e 8281 }
8282
8283 /* Read the HBA Host Attention Register */
9940b97b
JS
8284 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8285 spin_unlock_irqrestore(&phba->hbalock,
8286 drvr_flag);
8287 goto out_not_finished;
8288 }
a183a15f
JS
8289 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8290 1000) + jiffies;
09372820 8291 i = 0;
dea3101e 8292 /* Wait for command to complete */
41415862
JW
8293 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8294 (!(ha_copy & HA_MBATT) &&
2e0fef85 8295 (phba->link_state > LPFC_WARM_START))) {
09372820 8296 if (time_after(jiffies, timeout)) {
dea3101e 8297 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 8298 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 8299 drvr_flag);
58da1ffb 8300 goto out_not_finished;
dea3101e 8301 }
8302
8303 /* Check if we took a mbox interrupt while we were
8304 polling */
8305 if (((word0 & OWN_CHIP) != OWN_CHIP)
8306 && (evtctr != psli->slistat.mbox_event))
8307 break;
8308
09372820
JS
8309 if (i++ > 10) {
8310 spin_unlock_irqrestore(&phba->hbalock,
8311 drvr_flag);
8312 msleep(1);
8313 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8314 }
dea3101e 8315
3772a991 8316 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8317 /* First copy command data */
34b02dcd 8318 word0 = *((uint32_t *)phba->mbox);
dea3101e 8319 word0 = le32_to_cpu(word0);
bf07bdea 8320 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 8321 MAILBOX_t *slimmb;
34b02dcd 8322 uint32_t slimword0;
dea3101e 8323 /* Check real SLIM for any errors */
8324 slimword0 = readl(phba->MBslimaddr);
8325 slimmb = (MAILBOX_t *) & slimword0;
8326 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8327 && slimmb->mbxStatus) {
8328 psli->sli_flag &=
3772a991 8329 ~LPFC_SLI_ACTIVE;
dea3101e 8330 word0 = slimword0;
8331 }
8332 }
8333 } else {
8334 /* First copy command data */
8335 word0 = readl(phba->MBslimaddr);
8336 }
8337 /* Read the HBA Host Attention Register */
9940b97b
JS
8338 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8339 spin_unlock_irqrestore(&phba->hbalock,
8340 drvr_flag);
8341 goto out_not_finished;
8342 }
dea3101e 8343 }
8344
3772a991 8345 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 8346 /* copy results back to user */
2ea259ee
JS
8347 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8348 MAILBOX_CMD_SIZE);
7a470277 8349 /* Copy the mailbox extension data */
3e1f0718 8350 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
7a470277 8351 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
3e1f0718 8352 pmbox->ctx_buf,
7a470277
JS
8353 pmbox->out_ext_byte_len);
8354 }
dea3101e 8355 } else {
8356 /* First copy command data */
bf07bdea 8357 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
2ea259ee 8358 MAILBOX_CMD_SIZE);
7a470277 8359 /* Copy the mailbox extension data */
3e1f0718
JS
8360 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8361 lpfc_memcpy_from_slim(
8362 pmbox->ctx_buf,
7a470277
JS
8363 phba->MBslimaddr +
8364 MAILBOX_HBA_EXT_OFFSET,
8365 pmbox->out_ext_byte_len);
dea3101e 8366 }
8367 }
8368
8369 writel(HA_MBATT, phba->HAregaddr);
8370 readl(phba->HAregaddr); /* flush */
8371
8372 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
bf07bdea 8373 status = mbx->mbxStatus;
dea3101e 8374 }
8375
2e0fef85
JS
8376 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8377 return status;
58da1ffb
JS
8378
8379out_not_finished:
8380 if (processing_queue) {
da0436e9 8381 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
8382 lpfc_mbox_cmpl_put(phba, pmbox);
8383 }
8384 return MBX_NOT_FINISHED;
dea3101e 8385}
8386
f1126688
JS
8387/**
8388 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8389 * @phba: Pointer to HBA context object.
8390 *
8391 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8392 * the driver internal pending mailbox queue. It will then try to wait out the
8393 * possible outstanding mailbox command before return.
8394 *
8395 * Returns:
8396 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8397 * the outstanding mailbox command timed out.
8398 **/
8399static int
8400lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8401{
8402 struct lpfc_sli *psli = &phba->sli;
f1126688 8403 int rc = 0;
a183a15f 8404 unsigned long timeout = 0;
f1126688
JS
8405
8406 /* Mark the asynchronous mailbox command posting as blocked */
8407 spin_lock_irq(&phba->hbalock);
8408 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
f1126688
JS
8409 /* Determine how long we might wait for the active mailbox
8410 * command to be gracefully completed by firmware.
8411 */
a183a15f
JS
8412 if (phba->sli.mbox_active)
8413 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8414 phba->sli.mbox_active) *
8415 1000) + jiffies;
8416 spin_unlock_irq(&phba->hbalock);
8417
e8d3c3b1
JS
8418 /* Make sure the mailbox is really active */
8419 if (timeout)
8420 lpfc_sli4_process_missed_mbox_completions(phba);
8421
f1126688
JS
8422 /* Wait for the outstnading mailbox command to complete */
8423 while (phba->sli.mbox_active) {
8424 /* Check active mailbox complete status every 2ms */
8425 msleep(2);
8426 if (time_after(jiffies, timeout)) {
8427 /* Timeout, marked the outstanding cmd not complete */
8428 rc = 1;
8429 break;
8430 }
8431 }
8432
8433 /* Can not cleanly block async mailbox command, fails it */
8434 if (rc) {
8435 spin_lock_irq(&phba->hbalock);
8436 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8437 spin_unlock_irq(&phba->hbalock);
8438 }
8439 return rc;
8440}
8441
8442/**
8443 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8444 * @phba: Pointer to HBA context object.
8445 *
8446 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8447 * commands from the driver internal pending mailbox queue. It makes sure
8448 * that there is no outstanding mailbox command before resuming posting
8449 * asynchronous mailbox commands. If, for any reason, there is outstanding
8450 * mailbox command, it will try to wait it out before resuming asynchronous
8451 * mailbox command posting.
8452 **/
8453static void
8454lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8455{
8456 struct lpfc_sli *psli = &phba->sli;
8457
8458 spin_lock_irq(&phba->hbalock);
8459 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8460 /* Asynchronous mailbox posting is not blocked, do nothing */
8461 spin_unlock_irq(&phba->hbalock);
8462 return;
8463 }
8464
8465 /* Outstanding synchronous mailbox command is guaranteed to be done,
8466 * successful or timeout, after timing-out the outstanding mailbox
8467 * command shall always be removed, so just unblock posting async
8468 * mailbox command and resume
8469 */
8470 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8471 spin_unlock_irq(&phba->hbalock);
8472
8473 /* wake up worker thread to post asynchronlous mailbox command */
8474 lpfc_worker_wake_up(phba);
8475}
8476
2d843edc
JS
8477/**
8478 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8479 * @phba: Pointer to HBA context object.
8480 * @mboxq: Pointer to mailbox object.
8481 *
8482 * The function waits for the bootstrap mailbox register ready bit from
8483 * port for twice the regular mailbox command timeout value.
8484 *
8485 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8486 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8487 **/
8488static int
8489lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8490{
8491 uint32_t db_ready;
8492 unsigned long timeout;
8493 struct lpfc_register bmbx_reg;
8494
8495 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8496 * 1000) + jiffies;
8497
8498 do {
8499 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8500 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8501 if (!db_ready)
8502 msleep(2);
8503
8504 if (time_after(jiffies, timeout))
8505 return MBXERR_ERROR;
8506 } while (!db_ready);
8507
8508 return 0;
8509}
8510
da0436e9
JS
8511/**
8512 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8513 * @phba: Pointer to HBA context object.
8514 * @mboxq: Pointer to mailbox object.
8515 *
8516 * The function posts a mailbox to the port. The mailbox is expected
8517 * to be comletely filled in and ready for the port to operate on it.
8518 * This routine executes a synchronous completion operation on the
8519 * mailbox by polling for its completion.
8520 *
8521 * The caller must not be holding any locks when calling this routine.
8522 *
8523 * Returns:
8524 * MBX_SUCCESS - mailbox posted successfully
8525 * Any of the MBX error values.
8526 **/
8527static int
8528lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8529{
8530 int rc = MBX_SUCCESS;
8531 unsigned long iflag;
da0436e9
JS
8532 uint32_t mcqe_status;
8533 uint32_t mbx_cmnd;
da0436e9
JS
8534 struct lpfc_sli *psli = &phba->sli;
8535 struct lpfc_mqe *mb = &mboxq->u.mqe;
8536 struct lpfc_bmbx_create *mbox_rgn;
8537 struct dma_address *dma_address;
da0436e9
JS
8538
8539 /*
8540 * Only one mailbox can be active to the bootstrap mailbox region
8541 * at a time and there is no queueing provided.
8542 */
8543 spin_lock_irqsave(&phba->hbalock, iflag);
8544 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8545 spin_unlock_irqrestore(&phba->hbalock, iflag);
8546 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8547 "(%d):2532 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8548 "cannot issue Data: x%x x%x\n",
8549 mboxq->vport ? mboxq->vport->vpi : 0,
8550 mboxq->u.mb.mbxCommand,
a183a15f
JS
8551 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8552 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8553 psli->sli_flag, MBX_POLL);
8554 return MBXERR_ERROR;
8555 }
8556 /* The server grabs the token and owns it until release */
8557 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8558 phba->sli.mbox_active = mboxq;
8559 spin_unlock_irqrestore(&phba->hbalock, iflag);
8560
2d843edc
JS
8561 /* wait for bootstrap mbox register for readyness */
8562 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8563 if (rc)
8564 goto exit;
8565
da0436e9
JS
8566 /*
8567 * Initialize the bootstrap memory region to avoid stale data areas
8568 * in the mailbox post. Then copy the caller's mailbox contents to
8569 * the bmbx mailbox region.
8570 */
8571 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8572 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
48f8fdb4
JS
8573 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8574 sizeof(struct lpfc_mqe));
da0436e9
JS
8575
8576 /* Post the high mailbox dma address to the port and wait for ready. */
8577 dma_address = &phba->sli4_hba.bmbx.dma_address;
8578 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8579
2d843edc
JS
8580 /* wait for bootstrap mbox register for hi-address write done */
8581 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8582 if (rc)
8583 goto exit;
da0436e9
JS
8584
8585 /* Post the low mailbox dma address to the port. */
8586 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
da0436e9 8587
2d843edc
JS
8588 /* wait for bootstrap mbox register for low address write done */
8589 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8590 if (rc)
8591 goto exit;
da0436e9
JS
8592
8593 /*
8594 * Read the CQ to ensure the mailbox has completed.
8595 * If so, update the mailbox status so that the upper layers
8596 * can complete the request normally.
8597 */
48f8fdb4
JS
8598 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8599 sizeof(struct lpfc_mqe));
da0436e9 8600 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
48f8fdb4
JS
8601 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8602 sizeof(struct lpfc_mcqe));
da0436e9 8603 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
0558056c
JS
8604 /*
8605 * When the CQE status indicates a failure and the mailbox status
8606 * indicates success then copy the CQE status into the mailbox status
8607 * (and prefix it with x4000).
8608 */
da0436e9 8609 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
0558056c
JS
8610 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8611 bf_set(lpfc_mqe_status, mb,
8612 (LPFC_MBX_ERROR_RANGE | mcqe_status));
da0436e9 8613 rc = MBXERR_ERROR;
d7c47992
JS
8614 } else
8615 lpfc_sli4_swap_str(phba, mboxq);
da0436e9
JS
8616
8617 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8618 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
da0436e9
JS
8619 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8620 " x%x x%x CQ: x%x x%x x%x x%x\n",
a183a15f
JS
8621 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8622 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8623 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8624 bf_get(lpfc_mqe_status, mb),
8625 mb->un.mb_words[0], mb->un.mb_words[1],
8626 mb->un.mb_words[2], mb->un.mb_words[3],
8627 mb->un.mb_words[4], mb->un.mb_words[5],
8628 mb->un.mb_words[6], mb->un.mb_words[7],
8629 mb->un.mb_words[8], mb->un.mb_words[9],
8630 mb->un.mb_words[10], mb->un.mb_words[11],
8631 mb->un.mb_words[12], mboxq->mcqe.word0,
8632 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8633 mboxq->mcqe.trailer);
8634exit:
8635 /* We are holding the token, no needed for lock when release */
8636 spin_lock_irqsave(&phba->hbalock, iflag);
8637 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8638 phba->sli.mbox_active = NULL;
8639 spin_unlock_irqrestore(&phba->hbalock, iflag);
8640 return rc;
8641}
8642
8643/**
8644 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8645 * @phba: Pointer to HBA context object.
8646 * @pmbox: Pointer to mailbox object.
8647 * @flag: Flag indicating how the mailbox need to be processed.
8648 *
8649 * This function is called by discovery code and HBA management code to submit
8650 * a mailbox command to firmware with SLI-4 interface spec.
8651 *
8652 * Return codes the caller owns the mailbox command after the return of the
8653 * function.
8654 **/
8655static int
8656lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8657 uint32_t flag)
8658{
8659 struct lpfc_sli *psli = &phba->sli;
8660 unsigned long iflags;
8661 int rc;
8662
b76f2dc9
JS
8663 /* dump from issue mailbox command if setup */
8664 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8665
8fa38513
JS
8666 rc = lpfc_mbox_dev_check(phba);
8667 if (unlikely(rc)) {
8668 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8669 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8fa38513
JS
8670 "cannot issue Data: x%x x%x\n",
8671 mboxq->vport ? mboxq->vport->vpi : 0,
8672 mboxq->u.mb.mbxCommand,
a183a15f
JS
8673 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8674 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8fa38513
JS
8675 psli->sli_flag, flag);
8676 goto out_not_finished;
8677 }
8678
da0436e9
JS
8679 /* Detect polling mode and jump to a handler */
8680 if (!phba->sli4_hba.intr_enable) {
8681 if (flag == MBX_POLL)
8682 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8683 else
8684 rc = -EIO;
8685 if (rc != MBX_SUCCESS)
0558056c 8686 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
da0436e9 8687 "(%d):2541 Mailbox command x%x "
cc459f19
JS
8688 "(x%x/x%x) failure: "
8689 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8690 "Data: x%x x%x\n,",
da0436e9
JS
8691 mboxq->vport ? mboxq->vport->vpi : 0,
8692 mboxq->u.mb.mbxCommand,
a183a15f
JS
8693 lpfc_sli_config_mbox_subsys_get(phba,
8694 mboxq),
8695 lpfc_sli_config_mbox_opcode_get(phba,
8696 mboxq),
cc459f19
JS
8697 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8698 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8699 bf_get(lpfc_mcqe_ext_status,
8700 &mboxq->mcqe),
da0436e9
JS
8701 psli->sli_flag, flag);
8702 return rc;
8703 } else if (flag == MBX_POLL) {
f1126688
JS
8704 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8705 "(%d):2542 Try to issue mailbox command "
7365f6fd 8706 "x%x (x%x/x%x) synchronously ahead of async "
f1126688 8707 "mailbox command queue: x%x x%x\n",
da0436e9
JS
8708 mboxq->vport ? mboxq->vport->vpi : 0,
8709 mboxq->u.mb.mbxCommand,
a183a15f
JS
8710 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8711 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9 8712 psli->sli_flag, flag);
f1126688
JS
8713 /* Try to block the asynchronous mailbox posting */
8714 rc = lpfc_sli4_async_mbox_block(phba);
8715 if (!rc) {
8716 /* Successfully blocked, now issue sync mbox cmd */
8717 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8718 if (rc != MBX_SUCCESS)
cc459f19 8719 lpfc_printf_log(phba, KERN_WARNING,
a183a15f 8720 LOG_MBOX | LOG_SLI,
cc459f19
JS
8721 "(%d):2597 Sync Mailbox command "
8722 "x%x (x%x/x%x) failure: "
8723 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8724 "Data: x%x x%x\n,",
8725 mboxq->vport ? mboxq->vport->vpi : 0,
a183a15f
JS
8726 mboxq->u.mb.mbxCommand,
8727 lpfc_sli_config_mbox_subsys_get(phba,
8728 mboxq),
8729 lpfc_sli_config_mbox_opcode_get(phba,
8730 mboxq),
cc459f19
JS
8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8733 bf_get(lpfc_mcqe_ext_status,
8734 &mboxq->mcqe),
a183a15f 8735 psli->sli_flag, flag);
f1126688
JS
8736 /* Unblock the async mailbox posting afterward */
8737 lpfc_sli4_async_mbox_unblock(phba);
8738 }
8739 return rc;
da0436e9
JS
8740 }
8741
8742 /* Now, interrupt mode asynchrous mailbox command */
8743 rc = lpfc_mbox_cmd_check(phba, mboxq);
8744 if (rc) {
8745 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8746 "(%d):2543 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8747 "cannot issue Data: x%x x%x\n",
8748 mboxq->vport ? mboxq->vport->vpi : 0,
8749 mboxq->u.mb.mbxCommand,
a183a15f
JS
8750 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8751 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8752 psli->sli_flag, flag);
8753 goto out_not_finished;
8754 }
da0436e9
JS
8755
8756 /* Put the mailbox command to the driver internal FIFO */
8757 psli->slistat.mbox_busy++;
8758 spin_lock_irqsave(&phba->hbalock, iflags);
8759 lpfc_mbox_put(phba, mboxq);
8760 spin_unlock_irqrestore(&phba->hbalock, iflags);
8761 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8762 "(%d):0354 Mbox cmd issue - Enqueue Data: "
a183a15f 8763 "x%x (x%x/x%x) x%x x%x x%x\n",
da0436e9
JS
8764 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8765 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
a183a15f
JS
8766 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8767 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8768 phba->pport->port_state,
8769 psli->sli_flag, MBX_NOWAIT);
8770 /* Wake up worker thread to transport mailbox command from head */
8771 lpfc_worker_wake_up(phba);
8772
8773 return MBX_BUSY;
8774
8775out_not_finished:
8776 return MBX_NOT_FINISHED;
8777}
8778
8779/**
8780 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8781 * @phba: Pointer to HBA context object.
8782 *
8783 * This function is called by worker thread to send a mailbox command to
8784 * SLI4 HBA firmware.
8785 *
8786 **/
8787int
8788lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8789{
8790 struct lpfc_sli *psli = &phba->sli;
8791 LPFC_MBOXQ_t *mboxq;
8792 int rc = MBX_SUCCESS;
8793 unsigned long iflags;
8794 struct lpfc_mqe *mqe;
8795 uint32_t mbx_cmnd;
8796
8797 /* Check interrupt mode before post async mailbox command */
8798 if (unlikely(!phba->sli4_hba.intr_enable))
8799 return MBX_NOT_FINISHED;
8800
8801 /* Check for mailbox command service token */
8802 spin_lock_irqsave(&phba->hbalock, iflags);
8803 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8804 spin_unlock_irqrestore(&phba->hbalock, iflags);
8805 return MBX_NOT_FINISHED;
8806 }
8807 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8808 spin_unlock_irqrestore(&phba->hbalock, iflags);
8809 return MBX_NOT_FINISHED;
8810 }
8811 if (unlikely(phba->sli.mbox_active)) {
8812 spin_unlock_irqrestore(&phba->hbalock, iflags);
8813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8814 "0384 There is pending active mailbox cmd\n");
8815 return MBX_NOT_FINISHED;
8816 }
8817 /* Take the mailbox command service token */
8818 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8819
8820 /* Get the next mailbox command from head of queue */
8821 mboxq = lpfc_mbox_get(phba);
8822
8823 /* If no more mailbox command waiting for post, we're done */
8824 if (!mboxq) {
8825 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8826 spin_unlock_irqrestore(&phba->hbalock, iflags);
8827 return MBX_SUCCESS;
8828 }
8829 phba->sli.mbox_active = mboxq;
8830 spin_unlock_irqrestore(&phba->hbalock, iflags);
8831
8832 /* Check device readiness for posting mailbox command */
8833 rc = lpfc_mbox_dev_check(phba);
8834 if (unlikely(rc))
8835 /* Driver clean routine will clean up pending mailbox */
8836 goto out_not_finished;
8837
8838 /* Prepare the mbox command to be posted */
8839 mqe = &mboxq->u.mqe;
8840 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8841
8842 /* Start timer for the mbox_tmo and log some mailbox post messages */
8843 mod_timer(&psli->mbox_tmo, (jiffies +
256ec0d0 8844 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
da0436e9
JS
8845
8846 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
a183a15f 8847 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
da0436e9
JS
8848 "x%x x%x\n",
8849 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
a183a15f
JS
8850 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8851 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8852 phba->pport->port_state, psli->sli_flag);
8853
8854 if (mbx_cmnd != MBX_HEARTBEAT) {
8855 if (mboxq->vport) {
8856 lpfc_debugfs_disc_trc(mboxq->vport,
8857 LPFC_DISC_TRC_MBOX_VPORT,
8858 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8859 mbx_cmnd, mqe->un.mb_words[0],
8860 mqe->un.mb_words[1]);
8861 } else {
8862 lpfc_debugfs_disc_trc(phba->pport,
8863 LPFC_DISC_TRC_MBOX,
8864 "MBOX Send: cmd:x%x mb:x%x x%x",
8865 mbx_cmnd, mqe->un.mb_words[0],
8866 mqe->un.mb_words[1]);
8867 }
8868 }
8869 psli->slistat.mbox_cmd++;
8870
8871 /* Post the mailbox command to the port */
8872 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8873 if (rc != MBX_SUCCESS) {
8874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
a183a15f 8875 "(%d):2533 Mailbox command x%x (x%x/x%x) "
da0436e9
JS
8876 "cannot issue Data: x%x x%x\n",
8877 mboxq->vport ? mboxq->vport->vpi : 0,
8878 mboxq->u.mb.mbxCommand,
a183a15f
JS
8879 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8880 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
da0436e9
JS
8881 psli->sli_flag, MBX_NOWAIT);
8882 goto out_not_finished;
8883 }
8884
8885 return rc;
8886
8887out_not_finished:
8888 spin_lock_irqsave(&phba->hbalock, iflags);
d7069f09
JS
8889 if (phba->sli.mbox_active) {
8890 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8891 __lpfc_mbox_cmpl_put(phba, mboxq);
8892 /* Release the token */
8893 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8894 phba->sli.mbox_active = NULL;
8895 }
da0436e9
JS
8896 spin_unlock_irqrestore(&phba->hbalock, iflags);
8897
8898 return MBX_NOT_FINISHED;
8899}
8900
8901/**
8902 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8903 * @phba: Pointer to HBA context object.
8904 * @pmbox: Pointer to mailbox object.
8905 * @flag: Flag indicating how the mailbox need to be processed.
8906 *
8907 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8908 * the API jump table function pointer from the lpfc_hba struct.
8909 *
8910 * Return codes the caller owns the mailbox command after the return of the
8911 * function.
8912 **/
8913int
8914lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8915{
8916 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8917}
8918
8919/**
25985edc 8920 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
da0436e9
JS
8921 * @phba: The hba struct for which this call is being executed.
8922 * @dev_grp: The HBA PCI-Device group number.
8923 *
8924 * This routine sets up the mbox interface API function jump table in @phba
8925 * struct.
8926 * Returns: 0 - success, -ENODEV - failure.
8927 **/
8928int
8929lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8930{
8931
8932 switch (dev_grp) {
8933 case LPFC_PCI_DEV_LP:
8934 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8935 phba->lpfc_sli_handle_slow_ring_event =
8936 lpfc_sli_handle_slow_ring_event_s3;
8937 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8938 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8939 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8940 break;
8941 case LPFC_PCI_DEV_OC:
8942 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8943 phba->lpfc_sli_handle_slow_ring_event =
8944 lpfc_sli_handle_slow_ring_event_s4;
8945 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8946 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8947 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8948 break;
8949 default:
8950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8951 "1420 Invalid HBA PCI-device group: 0x%x\n",
8952 dev_grp);
8953 return -ENODEV;
8954 break;
8955 }
8956 return 0;
8957}
8958
e59058c4 8959/**
3621a710 8960 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
8961 * @phba: Pointer to HBA context object.
8962 * @pring: Pointer to driver SLI ring object.
8963 * @piocb: Pointer to address of newly added command iocb.
8964 *
8965 * This function is called with hbalock held to add a command
8966 * iocb to the txq when SLI layer cannot submit the command iocb
8967 * to the ring.
8968 **/
2a9bf3d0 8969void
92d7f7b0 8970__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8971 struct lpfc_iocbq *piocb)
dea3101e 8972{
1c2ba475 8973 lockdep_assert_held(&phba->hbalock);
dea3101e 8974 /* Insert the caller's iocb in the txq tail for later processing. */
8975 list_add_tail(&piocb->list, &pring->txq);
dea3101e 8976}
8977
e59058c4 8978/**
3621a710 8979 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
8980 * @phba: Pointer to HBA context object.
8981 * @pring: Pointer to driver SLI ring object.
8982 * @piocb: Pointer to address of newly added command iocb.
8983 *
8984 * This function is called with hbalock held before a new
8985 * iocb is submitted to the firmware. This function checks
8986 * txq to flush the iocbs in txq to Firmware before
8987 * submitting new iocbs to the Firmware.
8988 * If there are iocbs in the txq which need to be submitted
8989 * to firmware, lpfc_sli_next_iocb returns the first element
8990 * of the txq after dequeuing it from txq.
8991 * If there is no iocb in the txq then the function will return
8992 * *piocb and *piocb is set to NULL. Caller needs to check
8993 * *piocb to find if there are more commands in the txq.
8994 **/
dea3101e 8995static struct lpfc_iocbq *
8996lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 8997 struct lpfc_iocbq **piocb)
dea3101e 8998{
8999 struct lpfc_iocbq * nextiocb;
9000
1c2ba475
JT
9001 lockdep_assert_held(&phba->hbalock);
9002
dea3101e 9003 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9004 if (!nextiocb) {
9005 nextiocb = *piocb;
9006 *piocb = NULL;
9007 }
9008
9009 return nextiocb;
9010}
9011
e59058c4 9012/**
3772a991 9013 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 9014 * @phba: Pointer to HBA context object.
3772a991 9015 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
9016 * @piocb: Pointer to command iocb.
9017 * @flag: Flag indicating if this command can be put into txq.
9018 *
3772a991
JS
9019 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9020 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9021 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9022 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9023 * this function allows only iocbs for posting buffers. This function finds
9024 * next available slot in the command ring and posts the command to the
9025 * available slot and writes the port attention register to request HBA start
9026 * processing new iocb. If there is no slot available in the ring and
9027 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9028 * the function returns IOCB_BUSY.
e59058c4 9029 *
3772a991
JS
9030 * This function is called with hbalock held. The function will return success
9031 * after it successfully submit the iocb to firmware or after adding to the
9032 * txq.
e59058c4 9033 **/
98c9ea5c 9034static int
3772a991 9035__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e 9036 struct lpfc_iocbq *piocb, uint32_t flag)
9037{
9038 struct lpfc_iocbq *nextiocb;
9039 IOCB_t *iocb;
895427bd 9040 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea3101e 9041
1c2ba475
JT
9042 lockdep_assert_held(&phba->hbalock);
9043
92d7f7b0
JS
9044 if (piocb->iocb_cmpl && (!piocb->vport) &&
9045 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9046 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9047 lpfc_printf_log(phba, KERN_ERR,
9048 LOG_SLI | LOG_VPORT,
e8b62011 9049 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
9050 piocb->iocb.ulpCommand);
9051 dump_stack();
9052 return IOCB_ERROR;
9053 }
9054
9055
8d63f375
LV
9056 /* If the PCI channel is in offline state, do not post iocbs. */
9057 if (unlikely(pci_channel_offline(phba->pcidev)))
9058 return IOCB_ERROR;
9059
a257bf90
JS
9060 /* If HBA has a deferred error attention, fail the iocb. */
9061 if (unlikely(phba->hba_flag & DEFER_ERATT))
9062 return IOCB_ERROR;
9063
dea3101e 9064 /*
9065 * We should never get an IOCB if we are in a < LINK_DOWN state
9066 */
2e0fef85 9067 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e 9068 return IOCB_ERROR;
9069
9070 /*
9071 * Check to see if we are blocking IOCB processing because of a
0b727fea 9072 * outstanding event.
dea3101e 9073 */
0b727fea 9074 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e 9075 goto iocb_busy;
9076
2e0fef85 9077 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 9078 /*
2680eeaa 9079 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e 9080 * can be issued if the link is not up.
9081 */
9082 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
9083 case CMD_GEN_REQUEST64_CR:
9084 case CMD_GEN_REQUEST64_CX:
9085 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9086 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 9087 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
9088 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9089 MENLO_TRANSPORT_TYPE))
9090
9091 goto iocb_busy;
9092 break;
dea3101e 9093 case CMD_QUE_RING_BUF_CN:
9094 case CMD_QUE_RING_BUF64_CN:
dea3101e 9095 /*
9096 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9097 * completion, iocb_cmpl MUST be 0.
9098 */
9099 if (piocb->iocb_cmpl)
9100 piocb->iocb_cmpl = NULL;
9101 /*FALLTHROUGH*/
9102 case CMD_CREATE_XRI_CR:
2680eeaa
JS
9103 case CMD_CLOSE_XRI_CN:
9104 case CMD_CLOSE_XRI_CX:
dea3101e 9105 break;
9106 default:
9107 goto iocb_busy;
9108 }
9109
9110 /*
9111 * For FCP commands, we must be in a state where we can process link
9112 * attention events.
9113 */
895427bd 9114 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
92d7f7b0 9115 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 9116 goto iocb_busy;
92d7f7b0 9117 }
dea3101e 9118
dea3101e 9119 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9120 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9121 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9122
9123 if (iocb)
9124 lpfc_sli_update_ring(phba, pring);
9125 else
9126 lpfc_sli_update_full_ring(phba, pring);
9127
9128 if (!piocb)
9129 return IOCB_SUCCESS;
9130
9131 goto out_busy;
9132
9133 iocb_busy:
9134 pring->stats.iocb_cmd_delay++;
9135
9136 out_busy:
9137
9138 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 9139 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e 9140 return IOCB_SUCCESS;
9141 }
9142
9143 return IOCB_BUSY;
9144}
9145
3772a991 9146/**
4f774513
JS
9147 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9148 * @phba: Pointer to HBA context object.
9149 * @piocb: Pointer to command iocb.
9150 * @sglq: Pointer to the scatter gather queue object.
9151 *
9152 * This routine converts the bpl or bde that is in the IOCB
9153 * to a sgl list for the sli4 hardware. The physical address
9154 * of the bpl/bde is converted back to a virtual address.
9155 * If the IOCB contains a BPL then the list of BDE's is
9156 * converted to sli4_sge's. If the IOCB contains a single
9157 * BDE then it is converted to a single sli_sge.
9158 * The IOCB is still in cpu endianess so the contents of
9159 * the bpl can be used without byte swapping.
9160 *
9161 * Returns valid XRI = Success, NO_XRI = Failure.
9162**/
9163static uint16_t
9164lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9165 struct lpfc_sglq *sglq)
3772a991 9166{
4f774513
JS
9167 uint16_t xritag = NO_XRI;
9168 struct ulp_bde64 *bpl = NULL;
9169 struct ulp_bde64 bde;
9170 struct sli4_sge *sgl = NULL;
1b51197d 9171 struct lpfc_dmabuf *dmabuf;
4f774513
JS
9172 IOCB_t *icmd;
9173 int numBdes = 0;
9174 int i = 0;
63e801ce
JS
9175 uint32_t offset = 0; /* accumulated offset in the sg request list */
9176 int inbound = 0; /* number of sg reply entries inbound from firmware */
3772a991 9177
4f774513
JS
9178 if (!piocbq || !sglq)
9179 return xritag;
9180
9181 sgl = (struct sli4_sge *)sglq->sgl;
9182 icmd = &piocbq->iocb;
6b5151fd
JS
9183 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9184 return sglq->sli4_xritag;
4f774513
JS
9185 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9186 numBdes = icmd->un.genreq64.bdl.bdeSize /
9187 sizeof(struct ulp_bde64);
9188 /* The addrHigh and addrLow fields within the IOCB
9189 * have not been byteswapped yet so there is no
9190 * need to swap them back.
9191 */
1b51197d
JS
9192 if (piocbq->context3)
9193 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9194 else
9195 return xritag;
4f774513 9196
1b51197d 9197 bpl = (struct ulp_bde64 *)dmabuf->virt;
4f774513
JS
9198 if (!bpl)
9199 return xritag;
9200
9201 for (i = 0; i < numBdes; i++) {
9202 /* Should already be byte swapped. */
28baac74
JS
9203 sgl->addr_hi = bpl->addrHigh;
9204 sgl->addr_lo = bpl->addrLow;
9205
0558056c 9206 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9207 if ((i+1) == numBdes)
9208 bf_set(lpfc_sli4_sge_last, sgl, 1);
9209 else
9210 bf_set(lpfc_sli4_sge_last, sgl, 0);
28baac74
JS
9211 /* swap the size field back to the cpu so we
9212 * can assign it to the sgl.
9213 */
9214 bde.tus.w = le32_to_cpu(bpl->tus.w);
9215 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
63e801ce
JS
9216 /* The offsets in the sgl need to be accumulated
9217 * separately for the request and reply lists.
9218 * The request is always first, the reply follows.
9219 */
9220 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9221 /* add up the reply sg entries */
9222 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9223 inbound++;
9224 /* first inbound? reset the offset */
9225 if (inbound == 1)
9226 offset = 0;
9227 bf_set(lpfc_sli4_sge_offset, sgl, offset);
f9bb2da1
JS
9228 bf_set(lpfc_sli4_sge_type, sgl,
9229 LPFC_SGE_TYPE_DATA);
63e801ce
JS
9230 offset += bde.tus.f.bdeSize;
9231 }
546fc854 9232 sgl->word2 = cpu_to_le32(sgl->word2);
4f774513
JS
9233 bpl++;
9234 sgl++;
9235 }
9236 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9237 /* The addrHigh and addrLow fields of the BDE have not
9238 * been byteswapped yet so they need to be swapped
9239 * before putting them in the sgl.
9240 */
9241 sgl->addr_hi =
9242 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9243 sgl->addr_lo =
9244 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
0558056c 9245 sgl->word2 = le32_to_cpu(sgl->word2);
4f774513
JS
9246 bf_set(lpfc_sli4_sge_last, sgl, 1);
9247 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74
JS
9248 sgl->sge_len =
9249 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
4f774513
JS
9250 }
9251 return sglq->sli4_xritag;
3772a991 9252}
92d7f7b0 9253
e59058c4 9254/**
4f774513 9255 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 9256 * @phba: Pointer to HBA context object.
4f774513
JS
9257 * @piocb: Pointer to command iocb.
9258 * @wqe: Pointer to the work queue entry.
e59058c4 9259 *
4f774513
JS
9260 * This routine converts the iocb command to its Work Queue Entry
9261 * equivalent. The wqe pointer should not have any fields set when
9262 * this routine is called because it will memcpy over them.
9263 * This routine does not set the CQ_ID or the WQEC bits in the
9264 * wqe.
e59058c4 9265 *
4f774513 9266 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 9267 **/
cf5bf97e 9268static int
4f774513 9269lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
205e8240 9270 union lpfc_wqe128 *wqe)
cf5bf97e 9271{
5ffc266e 9272 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
9273 uint8_t ct = 0;
9274 uint32_t fip;
9275 uint32_t abort_tag;
9276 uint8_t command_type = ELS_COMMAND_NON_FIP;
9277 uint8_t cmnd;
9278 uint16_t xritag;
dcf2a4e0
JS
9279 uint16_t abrt_iotag;
9280 struct lpfc_iocbq *abrtiocbq;
4f774513 9281 struct ulp_bde64 *bpl = NULL;
f0d9bccc 9282 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5ffc266e
JS
9283 int numBdes, i;
9284 struct ulp_bde64 bde;
c31098ce 9285 struct lpfc_nodelist *ndlp;
ff78d8f9 9286 uint32_t *pcmd;
1b51197d 9287 uint32_t if_type;
4f774513 9288
45ed1190 9289 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 9290 /* The fcp commands will set command type */
0c287589 9291 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 9292 command_type = FCP_COMMAND;
c868595d 9293 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
9294 command_type = ELS_COMMAND_FIP;
9295 else
9296 command_type = ELS_COMMAND_NON_FIP;
9297
b5c53958
JS
9298 if (phba->fcp_embed_io)
9299 memset(wqe, 0, sizeof(union lpfc_wqe128));
4f774513
JS
9300 /* Some of the fields are in the right position already */
9301 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
ae9e28f3
JS
9302 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9303 /* The ct field has moved so reset */
9304 wqe->generic.wqe_com.word7 = 0;
9305 wqe->generic.wqe_com.word10 = 0;
9306 }
b5c53958
JS
9307
9308 abort_tag = (uint32_t) iocbq->iotag;
9309 xritag = iocbq->sli4_xritag;
4f774513
JS
9310 /* words0-2 bpl convert bde */
9311 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
9312 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9313 sizeof(struct ulp_bde64);
4f774513
JS
9314 bpl = (struct ulp_bde64 *)
9315 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9316 if (!bpl)
9317 return IOCB_ERROR;
cf5bf97e 9318
4f774513
JS
9319 /* Should already be byte swapped. */
9320 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9321 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9322 /* swap the size field back to the cpu so we
9323 * can assign it to the sgl.
9324 */
9325 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
9326 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9327 total_len = 0;
9328 for (i = 0; i < numBdes; i++) {
9329 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9330 total_len += bde.tus.f.bdeSize;
9331 }
4f774513 9332 } else
5ffc266e 9333 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 9334
4f774513
JS
9335 iocbq->iocb.ulpIoTag = iocbq->iotag;
9336 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 9337
4f774513
JS
9338 switch (iocbq->iocb.ulpCommand) {
9339 case CMD_ELS_REQUEST64_CR:
93d1379e
JS
9340 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9341 ndlp = iocbq->context_un.ndlp;
9342 else
9343 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513
JS
9344 if (!iocbq->iocb.ulpLe) {
9345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9346 "2007 Only Limited Edition cmd Format"
9347 " supported 0x%x\n",
9348 iocbq->iocb.ulpCommand);
9349 return IOCB_ERROR;
9350 }
ff78d8f9 9351
5ffc266e 9352 wqe->els_req.payload_len = xmit_len;
4f774513
JS
9353 /* Els_reguest64 has a TMO */
9354 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9355 iocbq->iocb.ulpTimeout);
9356 /* Need a VF for word 4 set the vf bit*/
9357 bf_set(els_req64_vf, &wqe->els_req, 0);
9358 /* And a VFID for word 12 */
9359 bf_set(els_req64_vfid, &wqe->els_req, 0);
4f774513 9360 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
f0d9bccc
JS
9361 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9362 iocbq->iocb.ulpContext);
9363 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9364 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
4f774513 9365 /* CCP CCPE PV PRI in word10 were set in the memcpy */
ff78d8f9 9366 if (command_type == ELS_COMMAND_FIP)
c868595d
JS
9367 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9368 >> LPFC_FIP_ELS_ID_SHIFT);
ff78d8f9
JS
9369 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9370 iocbq->context2)->virt);
1b51197d
JS
9371 if_type = bf_get(lpfc_sli_intf_if_type,
9372 &phba->sli4_hba.sli_intf);
27d6ac0a 9373 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
ff78d8f9 9374 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
cb69f7de 9375 *pcmd == ELS_CMD_SCR ||
6b5151fd 9376 *pcmd == ELS_CMD_FDISC ||
bdcd2b92 9377 *pcmd == ELS_CMD_LOGO ||
ff78d8f9
JS
9378 *pcmd == ELS_CMD_PLOGI)) {
9379 bf_set(els_req64_sp, &wqe->els_req, 1);
9380 bf_set(els_req64_sid, &wqe->els_req,
9381 iocbq->vport->fc_myDID);
939723a4
JS
9382 if ((*pcmd == ELS_CMD_FLOGI) &&
9383 !(phba->fc_topology ==
9384 LPFC_TOPOLOGY_LOOP))
9385 bf_set(els_req64_sid, &wqe->els_req, 0);
ff78d8f9
JS
9386 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9387 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
a7dd9c0f 9388 phba->vpi_ids[iocbq->vport->vpi]);
3ef6d24c 9389 } else if (pcmd && iocbq->context1) {
ff78d8f9
JS
9390 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9391 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9392 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9393 }
c868595d 9394 }
6d368e53
JS
9395 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9396 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
f0d9bccc
JS
9397 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9398 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9399 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9400 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9401 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9402 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
af22741c 9403 wqe->els_req.max_response_payload_len = total_len - xmit_len;
7851fe2c 9404 break;
5ffc266e 9405 case CMD_XMIT_SEQUENCE64_CX:
f0d9bccc
JS
9406 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9407 iocbq->iocb.un.ulpWord[3]);
9408 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7851fe2c 9409 iocbq->iocb.unsli3.rcvsli3.ox_id);
5ffc266e
JS
9410 /* The entire sequence is transmitted for this IOCB */
9411 xmit_len = total_len;
9412 cmnd = CMD_XMIT_SEQUENCE64_CR;
1b51197d
JS
9413 if (phba->link_flag & LS_LOOPBACK_MODE)
9414 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
4f774513 9415 case CMD_XMIT_SEQUENCE64_CR:
f0d9bccc
JS
9416 /* word3 iocb=io_tag32 wqe=reserved */
9417 wqe->xmit_sequence.rsvd3 = 0;
4f774513
JS
9418 /* word4 relative_offset memcpy */
9419 /* word5 r_ctl/df_ctl memcpy */
f0d9bccc
JS
9420 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9421 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9422 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9423 LPFC_WQE_IOD_WRITE);
9424 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9425 LPFC_WQE_LENLOC_WORD12);
9426 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
5ffc266e
JS
9427 wqe->xmit_sequence.xmit_len = xmit_len;
9428 command_type = OTHER_COMMAND;
7851fe2c 9429 break;
4f774513 9430 case CMD_XMIT_BCAST64_CN:
f0d9bccc
JS
9431 /* word3 iocb=iotag32 wqe=seq_payload_len */
9432 wqe->xmit_bcast64.seq_payload_len = xmit_len;
4f774513
JS
9433 /* word4 iocb=rsvd wqe=rsvd */
9434 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9435 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
f0d9bccc 9436 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
4f774513 9437 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
f0d9bccc
JS
9438 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9439 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9440 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9441 LPFC_WQE_LENLOC_WORD3);
9442 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7851fe2c 9443 break;
4f774513
JS
9444 case CMD_FCP_IWRITE64_CR:
9445 command_type = FCP_COMMAND_DATA_OUT;
f0d9bccc
JS
9446 /* word3 iocb=iotag wqe=payload_offset_len */
9447 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9448 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9449 xmit_len + sizeof(struct fcp_rsp));
9450 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9451 0);
f0d9bccc
JS
9452 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9453 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9454 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9455 iocbq->iocb.ulpFCP2Rcvy);
9456 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9457 /* Always open the exchange */
f0d9bccc
JS
9458 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9459 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9460 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9461 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9462 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
1ba981fd
JS
9463 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9464 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
c92c841c
JS
9465 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9466 if (iocbq->priority) {
9467 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9468 (iocbq->priority << 1));
9469 } else {
1ba981fd
JS
9470 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9471 (phba->cfg_XLanePriority << 1));
9472 }
9473 }
b5c53958
JS
9474 /* Note, word 10 is already initialized to 0 */
9475
414abe0a
JS
9476 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9477 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9478 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9479 else
9480 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9481
b5c53958
JS
9482 if (phba->fcp_embed_io) {
9483 struct lpfc_scsi_buf *lpfc_cmd;
9484 struct sli4_sge *sgl;
b5c53958
JS
9485 struct fcp_cmnd *fcp_cmnd;
9486 uint32_t *ptr;
9487
9488 /* 128 byte wqe support here */
b5c53958
JS
9489
9490 lpfc_cmd = iocbq->context1;
0794d601 9491 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9492 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9493
9494 /* Word 0-2 - FCP_CMND */
205e8240 9495 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9496 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9497 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9498 wqe->generic.bde.addrHigh = 0;
9499 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9500
205e8240
JS
9501 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9502 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
b5c53958
JS
9503
9504 /* Word 22-29 FCP CMND Payload */
205e8240 9505 ptr = &wqe->words[22];
b5c53958
JS
9506 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9507 }
7851fe2c 9508 break;
4f774513 9509 case CMD_FCP_IREAD64_CR:
f0d9bccc
JS
9510 /* word3 iocb=iotag wqe=payload_offset_len */
9511 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
0ba4b219
JS
9512 bf_set(payload_offset_len, &wqe->fcp_iread,
9513 xmit_len + sizeof(struct fcp_rsp));
9514 bf_set(cmd_buff_len, &wqe->fcp_iread,
9515 0);
f0d9bccc
JS
9516 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9517 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9518 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9519 iocbq->iocb.ulpFCP2Rcvy);
9520 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
f1126688 9521 /* Always open the exchange */
f0d9bccc
JS
9522 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9523 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9524 LPFC_WQE_LENLOC_WORD4);
f0d9bccc 9525 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
acd6859b 9526 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
1ba981fd
JS
9527 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9528 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
c92c841c
JS
9529 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9530 if (iocbq->priority) {
9531 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9532 (iocbq->priority << 1));
9533 } else {
1ba981fd
JS
9534 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9535 (phba->cfg_XLanePriority << 1));
9536 }
9537 }
b5c53958
JS
9538 /* Note, word 10 is already initialized to 0 */
9539
414abe0a
JS
9540 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9541 if (phba->cfg_enable_pbde)
0bc2b7c5
JS
9542 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9543 else
9544 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9545
b5c53958
JS
9546 if (phba->fcp_embed_io) {
9547 struct lpfc_scsi_buf *lpfc_cmd;
9548 struct sli4_sge *sgl;
b5c53958
JS
9549 struct fcp_cmnd *fcp_cmnd;
9550 uint32_t *ptr;
9551
9552 /* 128 byte wqe support here */
b5c53958
JS
9553
9554 lpfc_cmd = iocbq->context1;
0794d601 9555 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9556 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9557
9558 /* Word 0-2 - FCP_CMND */
205e8240 9559 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9560 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9561 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9562 wqe->generic.bde.addrHigh = 0;
9563 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9564
205e8240
JS
9565 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9566 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
b5c53958
JS
9567
9568 /* Word 22-29 FCP CMND Payload */
205e8240 9569 ptr = &wqe->words[22];
b5c53958
JS
9570 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9571 }
7851fe2c 9572 break;
4f774513 9573 case CMD_FCP_ICMND64_CR:
0ba4b219
JS
9574 /* word3 iocb=iotag wqe=payload_offset_len */
9575 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9576 bf_set(payload_offset_len, &wqe->fcp_icmd,
9577 xmit_len + sizeof(struct fcp_rsp));
9578 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9579 0);
f0d9bccc 9580 /* word3 iocb=IO_TAG wqe=reserved */
f0d9bccc 9581 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
4f774513 9582 /* Always open the exchange */
f0d9bccc
JS
9583 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9584 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9585 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9586 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9587 LPFC_WQE_LENLOC_NONE);
2a94aea4
JS
9588 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9589 iocbq->iocb.ulpFCP2Rcvy);
1ba981fd
JS
9590 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9591 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
c92c841c
JS
9592 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9593 if (iocbq->priority) {
9594 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9595 (iocbq->priority << 1));
9596 } else {
1ba981fd
JS
9597 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9598 (phba->cfg_XLanePriority << 1));
9599 }
9600 }
b5c53958
JS
9601 /* Note, word 10 is already initialized to 0 */
9602
9603 if (phba->fcp_embed_io) {
9604 struct lpfc_scsi_buf *lpfc_cmd;
9605 struct sli4_sge *sgl;
b5c53958
JS
9606 struct fcp_cmnd *fcp_cmnd;
9607 uint32_t *ptr;
9608
9609 /* 128 byte wqe support here */
b5c53958
JS
9610
9611 lpfc_cmd = iocbq->context1;
0794d601 9612 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
b5c53958
JS
9613 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9614
9615 /* Word 0-2 - FCP_CMND */
205e8240 9616 wqe->generic.bde.tus.f.bdeFlags =
b5c53958 9617 BUFF_TYPE_BDE_IMMED;
205e8240
JS
9618 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9619 wqe->generic.bde.addrHigh = 0;
9620 wqe->generic.bde.addrLow = 88; /* Word 22 */
b5c53958 9621
205e8240
JS
9622 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9623 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
b5c53958
JS
9624
9625 /* Word 22-29 FCP CMND Payload */
205e8240 9626 ptr = &wqe->words[22];
b5c53958
JS
9627 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9628 }
7851fe2c 9629 break;
4f774513 9630 case CMD_GEN_REQUEST64_CR:
63e801ce
JS
9631 /* For this command calculate the xmit length of the
9632 * request bde.
9633 */
9634 xmit_len = 0;
9635 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9636 sizeof(struct ulp_bde64);
9637 for (i = 0; i < numBdes; i++) {
63e801ce 9638 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
546fc854
JS
9639 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9640 break;
63e801ce
JS
9641 xmit_len += bde.tus.f.bdeSize;
9642 }
f0d9bccc
JS
9643 /* word3 iocb=IO_TAG wqe=request_payload_len */
9644 wqe->gen_req.request_payload_len = xmit_len;
9645 /* word4 iocb=parameter wqe=relative_offset memcpy */
9646 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
4f774513
JS
9647 /* word6 context tag copied in memcpy */
9648 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9649 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9651 "2015 Invalid CT %x command 0x%x\n",
9652 ct, iocbq->iocb.ulpCommand);
9653 return IOCB_ERROR;
9654 }
f0d9bccc
JS
9655 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9656 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9657 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9658 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9659 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9660 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9661 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9662 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
af22741c 9663 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
4f774513 9664 command_type = OTHER_COMMAND;
7851fe2c 9665 break;
4f774513 9666 case CMD_XMIT_ELS_RSP64_CX:
c31098ce 9667 ndlp = (struct lpfc_nodelist *)iocbq->context1;
4f774513 9668 /* words0-2 BDE memcpy */
f0d9bccc
JS
9669 /* word3 iocb=iotag32 wqe=response_payload_len */
9670 wqe->xmit_els_rsp.response_payload_len = xmit_len;
939723a4
JS
9671 /* word4 */
9672 wqe->xmit_els_rsp.word4 = 0;
4f774513
JS
9673 /* word5 iocb=rsvd wge=did */
9674 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
939723a4
JS
9675 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9676
9677 if_type = bf_get(lpfc_sli_intf_if_type,
9678 &phba->sli4_hba.sli_intf);
27d6ac0a 9679 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
939723a4
JS
9680 if (iocbq->vport->fc_flag & FC_PT2PT) {
9681 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9682 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9683 iocbq->vport->fc_myDID);
9684 if (iocbq->vport->fc_myDID == Fabric_DID) {
9685 bf_set(wqe_els_did,
9686 &wqe->xmit_els_rsp.wqe_dest, 0);
9687 }
9688 }
9689 }
f0d9bccc
JS
9690 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9691 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9692 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9693 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7851fe2c 9694 iocbq->iocb.unsli3.rcvsli3.ox_id);
4f774513 9695 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
f0d9bccc 9696 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6d368e53 9697 phba->vpi_ids[iocbq->vport->vpi]);
f0d9bccc
JS
9698 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9699 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9700 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9701 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9702 LPFC_WQE_LENLOC_WORD3);
9703 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6d368e53
JS
9704 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9705 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
ff78d8f9
JS
9706 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9707 iocbq->context2)->virt);
9708 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
939723a4
JS
9709 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9710 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
ff78d8f9 9711 iocbq->vport->fc_myDID);
939723a4
JS
9712 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9713 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
ff78d8f9
JS
9714 phba->vpi_ids[phba->pport->vpi]);
9715 }
4f774513 9716 command_type = OTHER_COMMAND;
7851fe2c 9717 break;
4f774513
JS
9718 case CMD_CLOSE_XRI_CN:
9719 case CMD_ABORT_XRI_CN:
9720 case CMD_ABORT_XRI_CX:
9721 /* words 0-2 memcpy should be 0 rserved */
9722 /* port will send abts */
dcf2a4e0
JS
9723 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9724 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9725 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9726 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9727 } else
9728 fip = 0;
9729
9730 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
4f774513 9731 /*
dcf2a4e0
JS
9732 * The link is down, or the command was ELS_FIP
9733 * so the fw does not need to send abts
4f774513
JS
9734 * on the wire.
9735 */
9736 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9737 else
9738 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9739 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
f0d9bccc
JS
9740 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9741 wqe->abort_cmd.rsrvd5 = 0;
9742 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
4f774513
JS
9743 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9744 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
4f774513
JS
9745 /*
9746 * The abort handler will send us CMD_ABORT_XRI_CN or
9747 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9748 */
f0d9bccc
JS
9749 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9750 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9751 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9752 LPFC_WQE_LENLOC_NONE);
4f774513
JS
9753 cmnd = CMD_ABORT_XRI_CX;
9754 command_type = OTHER_COMMAND;
9755 xritag = 0;
7851fe2c 9756 break;
6669f9bb 9757 case CMD_XMIT_BLS_RSP64_CX:
6b5151fd 9758 ndlp = (struct lpfc_nodelist *)iocbq->context1;
546fc854 9759 /* As BLS ABTS RSP WQE is very different from other WQEs,
6669f9bb
JS
9760 * we re-construct this WQE here based on information in
9761 * iocbq from scratch.
9762 */
9763 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 9764 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 9765 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
546fc854
JS
9766 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9767 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
5ffc266e
JS
9768 LPFC_ABTS_UNSOL_INT) {
9769 /* ABTS sent by initiator to CT exchange, the
9770 * RX_ID field will be filled with the newly
9771 * allocated responder XRI.
9772 */
9773 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9774 iocbq->sli4_xritag);
9775 } else {
9776 /* ABTS sent by responder to CT exchange, the
9777 * RX_ID field will be filled with the responder
9778 * RX_ID from ABTS.
9779 */
9780 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
546fc854 9781 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
5ffc266e 9782 }
6669f9bb
JS
9783 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9784 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6b5151fd
JS
9785
9786 /* Use CT=VPI */
9787 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9788 ndlp->nlp_DID);
9789 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9790 iocbq->iocb.ulpContext);
9791 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
6669f9bb 9792 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6b5151fd 9793 phba->vpi_ids[phba->pport->vpi]);
f0d9bccc
JS
9794 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9795 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9796 LPFC_WQE_LENLOC_NONE);
6669f9bb
JS
9797 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9798 command_type = OTHER_COMMAND;
546fc854
JS
9799 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9800 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9801 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9802 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9803 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9804 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9805 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9806 }
9807
7851fe2c 9808 break;
ae9e28f3
JS
9809 case CMD_SEND_FRAME:
9810 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9811 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9812 return 0;
4f774513
JS
9813 case CMD_XRI_ABORTED_CX:
9814 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
4f774513
JS
9815 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9816 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9817 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9818 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9819 default:
9820 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9821 "2014 Invalid command 0x%x\n",
9822 iocbq->iocb.ulpCommand);
9823 return IOCB_ERROR;
7851fe2c 9824 break;
4f774513 9825 }
6d368e53 9826
8012cc38
JS
9827 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9828 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9829 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9830 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9831 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9832 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9833 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9834 LPFC_IO_DIF_INSERT);
f0d9bccc
JS
9835 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9836 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9837 wqe->generic.wqe_com.abort_tag = abort_tag;
9838 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9839 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9840 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9841 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
4f774513
JS
9842 return 0;
9843}
9844
9845/**
9846 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9847 * @phba: Pointer to HBA context object.
9848 * @ring_number: SLI ring number to issue iocb on.
9849 * @piocb: Pointer to command iocb.
9850 * @flag: Flag indicating if this command can be put into txq.
9851 *
9852 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9853 * an iocb command to an HBA with SLI-4 interface spec.
9854 *
9855 * This function is called with hbalock held. The function will return success
9856 * after it successfully submit the iocb to firmware or after adding to the
9857 * txq.
9858 **/
9859static int
9860__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9861 struct lpfc_iocbq *piocb, uint32_t flag)
9862{
9863 struct lpfc_sglq *sglq;
205e8240 9864 union lpfc_wqe128 wqe;
1ba981fd 9865 struct lpfc_queue *wq;
895427bd 9866 struct lpfc_sli_ring *pring;
4f774513 9867
895427bd
JS
9868 /* Get the WQ */
9869 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9870 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
cdb42bec 9871 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
895427bd
JS
9872 } else {
9873 wq = phba->sli4_hba.els_wq;
9874 }
9875
9876 /* Get corresponding ring */
9877 pring = wq->pring;
1c2ba475 9878
b5c53958
JS
9879 /*
9880 * The WQE can be either 64 or 128 bytes,
b5c53958 9881 */
b5c53958 9882
895427bd
JS
9883 lockdep_assert_held(&phba->hbalock);
9884
4f774513
JS
9885 if (piocb->sli4_xritag == NO_XRI) {
9886 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6b5151fd 9887 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
9888 sglq = NULL;
9889 else {
0e9bb8d7 9890 if (!list_empty(&pring->txq)) {
2a9bf3d0
JS
9891 if (!(flag & SLI_IOCB_RET_IOCB)) {
9892 __lpfc_sli_ringtx_put(phba,
9893 pring, piocb);
9894 return IOCB_SUCCESS;
9895 } else {
9896 return IOCB_BUSY;
9897 }
9898 } else {
895427bd 9899 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
2a9bf3d0
JS
9900 if (!sglq) {
9901 if (!(flag & SLI_IOCB_RET_IOCB)) {
9902 __lpfc_sli_ringtx_put(phba,
9903 pring,
9904 piocb);
9905 return IOCB_SUCCESS;
9906 } else
9907 return IOCB_BUSY;
9908 }
9909 }
4f774513 9910 }
2ea259ee 9911 } else if (piocb->iocb_flag & LPFC_IO_FCP)
6d368e53
JS
9912 /* These IO's already have an XRI and a mapped sgl. */
9913 sglq = NULL;
2ea259ee 9914 else {
6d368e53
JS
9915 /*
9916 * This is a continuation of a commandi,(CX) so this
4f774513
JS
9917 * sglq is on the active list
9918 */
edccdc17 9919 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
4f774513
JS
9920 if (!sglq)
9921 return IOCB_ERROR;
9922 }
9923
9924 if (sglq) {
6d368e53 9925 piocb->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0 9926 piocb->sli4_xritag = sglq->sli4_xritag;
2a9bf3d0 9927 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
4f774513
JS
9928 return IOCB_ERROR;
9929 }
9930
205e8240 9931 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
4f774513
JS
9932 return IOCB_ERROR;
9933
205e8240 9934 if (lpfc_sli4_wq_put(wq, &wqe))
895427bd 9935 return IOCB_ERROR;
4f774513
JS
9936 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9937
9938 return 0;
9939}
9940
9941/**
9942 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9943 *
9944 * This routine wraps the actual lockless version for issusing IOCB function
9945 * pointer from the lpfc_hba struct.
9946 *
9947 * Return codes:
b5c53958
JS
9948 * IOCB_ERROR - Error
9949 * IOCB_SUCCESS - Success
9950 * IOCB_BUSY - Busy
4f774513 9951 **/
2a9bf3d0 9952int
4f774513
JS
9953__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9954 struct lpfc_iocbq *piocb, uint32_t flag)
9955{
9956 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9957}
9958
9959/**
25985edc 9960 * lpfc_sli_api_table_setup - Set up sli api function jump table
4f774513
JS
9961 * @phba: The hba struct for which this call is being executed.
9962 * @dev_grp: The HBA PCI-Device group number.
9963 *
9964 * This routine sets up the SLI interface API function jump table in @phba
9965 * struct.
9966 * Returns: 0 - success, -ENODEV - failure.
9967 **/
9968int
9969lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9970{
9971
9972 switch (dev_grp) {
9973 case LPFC_PCI_DEV_LP:
9974 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9975 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9976 break;
9977 case LPFC_PCI_DEV_OC:
9978 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9979 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9980 break;
9981 default:
9982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9983 "1419 Invalid HBA PCI-device group: 0x%x\n",
9984 dev_grp);
9985 return -ENODEV;
9986 break;
9987 }
9988 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9989 return 0;
9990}
9991
a1efe163 9992/**
895427bd 9993 * lpfc_sli4_calc_ring - Calculates which ring to use
a1efe163 9994 * @phba: Pointer to HBA context object.
a1efe163
JS
9995 * @piocb: Pointer to command iocb.
9996 *
895427bd
JS
9997 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9998 * hba_wqidx, thus we need to calculate the corresponding ring.
a1efe163 9999 * Since ABORTS must go on the same WQ of the command they are
895427bd 10000 * aborting, we use command's hba_wqidx.
a1efe163 10001 */
895427bd
JS
10002struct lpfc_sli_ring *
10003lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9bd2bff5 10004{
895427bd 10005 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
cdb42bec 10006 if (unlikely(!phba->sli4_hba.hdwq))
7370d10a
JS
10007 return NULL;
10008 /*
10009 * for abort iocb hba_wqidx should already
10010 * be setup based on what work queue we used.
10011 */
10012 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10013 piocb->hba_wqidx =
10014 lpfc_sli4_scmd_to_wqidx_distr(
10015 phba, piocb->context1);
10016 piocb->hba_wqidx = piocb->hba_wqidx %
cdb42bec 10017 phba->cfg_hdw_queue;
7370d10a 10018 }
cdb42bec 10019 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
895427bd
JS
10020 } else {
10021 if (unlikely(!phba->sli4_hba.els_wq))
10022 return NULL;
10023 piocb->hba_wqidx = 0;
10024 return phba->sli4_hba.els_wq->pring;
9bd2bff5 10025 }
9bd2bff5
JS
10026}
10027
4f774513
JS
10028/**
10029 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10030 * @phba: Pointer to HBA context object.
10031 * @pring: Pointer to driver SLI ring object.
10032 * @piocb: Pointer to command iocb.
10033 * @flag: Flag indicating if this command can be put into txq.
10034 *
10035 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10036 * function. This function gets the hbalock and calls
10037 * __lpfc_sli_issue_iocb function and will return the error returned
10038 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10039 * functions which do not hold hbalock.
10040 **/
10041int
10042lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10043 struct lpfc_iocbq *piocb, uint32_t flag)
10044{
895427bd 10045 struct lpfc_hba_eq_hdl *hba_eq_hdl;
2a76a283 10046 struct lpfc_sli_ring *pring;
ba20c853
JS
10047 struct lpfc_queue *fpeq;
10048 struct lpfc_eqe *eqe;
4f774513 10049 unsigned long iflags;
2a76a283 10050 int rc, idx;
4f774513 10051
7e56aa25 10052 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
10053 pring = lpfc_sli4_calc_ring(phba, piocb);
10054 if (unlikely(pring == NULL))
9bd2bff5 10055 return IOCB_ERROR;
ba20c853 10056
9bd2bff5
JS
10057 spin_lock_irqsave(&pring->ring_lock, iflags);
10058 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10059 spin_unlock_irqrestore(&pring->ring_lock, iflags);
ba20c853 10060
9bd2bff5 10061 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
895427bd
JS
10062 idx = piocb->hba_wqidx;
10063 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
4f774513 10064
895427bd 10065 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
ba20c853 10066
9bd2bff5 10067 /* Get associated EQ with this index */
cdb42bec 10068 fpeq = phba->sli4_hba.hdwq[idx].hba_eq;
ba20c853 10069
9bd2bff5 10070 /* Turn off interrupts from this EQ */
b71413dd 10071 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
ba20c853 10072
9bd2bff5
JS
10073 /*
10074 * Process all the events on FCP EQ
10075 */
10076 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
10077 lpfc_sli4_hba_handle_eqe(phba,
10078 eqe, idx);
10079 fpeq->EQ_processed++;
ba20c853 10080 }
ba20c853 10081
9bd2bff5 10082 /* Always clear and re-arm the EQ */
b71413dd 10083 phba->sli4_hba.sli4_eq_release(fpeq,
9bd2bff5
JS
10084 LPFC_QUEUE_REARM);
10085 }
895427bd 10086 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
2a76a283 10087 }
7e56aa25
JS
10088 } else {
10089 /* For now, SLI2/3 will still use hbalock */
10090 spin_lock_irqsave(&phba->hbalock, iflags);
10091 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10092 spin_unlock_irqrestore(&phba->hbalock, iflags);
10093 }
4f774513
JS
10094 return rc;
10095}
10096
10097/**
10098 * lpfc_extra_ring_setup - Extra ring setup function
10099 * @phba: Pointer to HBA context object.
10100 *
10101 * This function is called while driver attaches with the
10102 * HBA to setup the extra ring. The extra ring is used
10103 * only when driver needs to support target mode functionality
10104 * or IP over FC functionalities.
10105 *
895427bd 10106 * This function is called with no lock held. SLI3 only.
4f774513
JS
10107 **/
10108static int
10109lpfc_extra_ring_setup( struct lpfc_hba *phba)
10110{
10111 struct lpfc_sli *psli;
10112 struct lpfc_sli_ring *pring;
10113
10114 psli = &phba->sli;
10115
10116 /* Adjust cmd/rsp ring iocb entries more evenly */
10117
10118 /* Take some away from the FCP ring */
895427bd 10119 pring = &psli->sli3_ring[LPFC_FCP_RING];
7e56aa25
JS
10120 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10121 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10122 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10123 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e 10124
a4bc3379 10125 /* and give them to the extra ring */
895427bd 10126 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
a4bc3379 10127
7e56aa25
JS
10128 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10129 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10130 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10131 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
cf5bf97e
JW
10132
10133 /* Setup default profile for this ring */
10134 pring->iotag_max = 4096;
10135 pring->num_mask = 1;
10136 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
10137 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10138 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
10139 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10140 return 0;
10141}
10142
cb69f7de
JS
10143/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10144 * @phba: Pointer to HBA context object.
10145 * @iocbq: Pointer to iocb object.
10146 *
10147 * The async_event handler calls this routine when it receives
10148 * an ASYNC_STATUS_CN event from the port. The port generates
10149 * this event when an Abort Sequence request to an rport fails
10150 * twice in succession. The abort could be originated by the
10151 * driver or by the port. The ABTS could have been for an ELS
10152 * or FCP IO. The port only generates this event when an ABTS
10153 * fails to complete after one retry.
10154 */
10155static void
10156lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10157 struct lpfc_iocbq *iocbq)
10158{
10159 struct lpfc_nodelist *ndlp = NULL;
10160 uint16_t rpi = 0, vpi = 0;
10161 struct lpfc_vport *vport = NULL;
10162
10163 /* The rpi in the ulpContext is vport-sensitive. */
10164 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10165 rpi = iocbq->iocb.ulpContext;
10166
10167 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10168 "3092 Port generated ABTS async event "
10169 "on vpi %d rpi %d status 0x%x\n",
10170 vpi, rpi, iocbq->iocb.ulpStatus);
10171
10172 vport = lpfc_find_vport_by_vpid(phba, vpi);
10173 if (!vport)
10174 goto err_exit;
10175 ndlp = lpfc_findnode_rpi(vport, rpi);
10176 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10177 goto err_exit;
10178
10179 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10180 lpfc_sli_abts_recover_port(vport, ndlp);
10181 return;
10182
10183 err_exit:
10184 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10185 "3095 Event Context not found, no "
10186 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10187 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10188 vpi, rpi);
10189}
10190
10191/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10192 * @phba: pointer to HBA context object.
10193 * @ndlp: nodelist pointer for the impacted rport.
10194 * @axri: pointer to the wcqe containing the failed exchange.
10195 *
10196 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10197 * port. The port generates this event when an abort exchange request to an
10198 * rport fails twice in succession with no reply. The abort could be originated
10199 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10200 */
10201void
10202lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10203 struct lpfc_nodelist *ndlp,
10204 struct sli4_wcqe_xri_aborted *axri)
10205{
10206 struct lpfc_vport *vport;
5c1db2ac 10207 uint32_t ext_status = 0;
cb69f7de 10208
6b5151fd 10209 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cb69f7de
JS
10210 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10211 "3115 Node Context not found, driver "
10212 "ignoring abts err event\n");
6b5151fd
JS
10213 return;
10214 }
10215
cb69f7de
JS
10216 vport = ndlp->vport;
10217 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10218 "3116 Port generated FCP XRI ABORT event on "
5c1db2ac 10219 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8e668af5 10220 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
cb69f7de 10221 bf_get(lpfc_wcqe_xa_xri, axri),
5c1db2ac
JS
10222 bf_get(lpfc_wcqe_xa_status, axri),
10223 axri->parameter);
cb69f7de 10224
5c1db2ac
JS
10225 /*
10226 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10227 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10228 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10229 */
e3d2b802 10230 ext_status = axri->parameter & IOERR_PARAM_MASK;
5c1db2ac
JS
10231 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10232 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
cb69f7de
JS
10233 lpfc_sli_abts_recover_port(vport, ndlp);
10234}
10235
e59058c4 10236/**
3621a710 10237 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
10238 * @phba: Pointer to HBA context object.
10239 * @pring: Pointer to driver SLI ring object.
10240 * @iocbq: Pointer to iocb object.
10241 *
10242 * This function is called by the slow ring event handler
10243 * function when there is an ASYNC event iocb in the ring.
10244 * This function is called with no lock held.
10245 * Currently this function handles only temperature related
10246 * ASYNC events. The function decodes the temperature sensor
10247 * event message and posts events for the management applications.
10248 **/
98c9ea5c 10249static void
57127f15
JS
10250lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10251 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10252{
10253 IOCB_t *icmd;
10254 uint16_t evt_code;
57127f15
JS
10255 struct temp_event temp_event_data;
10256 struct Scsi_Host *shost;
a257bf90 10257 uint32_t *iocb_w;
57127f15
JS
10258
10259 icmd = &iocbq->iocb;
10260 evt_code = icmd->un.asyncstat.evt_code;
57127f15 10261
cb69f7de
JS
10262 switch (evt_code) {
10263 case ASYNC_TEMP_WARN:
10264 case ASYNC_TEMP_SAFE:
10265 temp_event_data.data = (uint32_t) icmd->ulpContext;
10266 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10267 if (evt_code == ASYNC_TEMP_WARN) {
10268 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10269 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10270 "0347 Adapter is very hot, please take "
10271 "corrective action. temperature : %d Celsius\n",
10272 (uint32_t) icmd->ulpContext);
10273 } else {
10274 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10275 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10276 "0340 Adapter temperature is OK now. "
10277 "temperature : %d Celsius\n",
10278 (uint32_t) icmd->ulpContext);
10279 }
10280
10281 /* Send temperature change event to applications */
10282 shost = lpfc_shost_from_vport(phba->pport);
10283 fc_host_post_vendor_event(shost, fc_get_event_number(),
10284 sizeof(temp_event_data), (char *) &temp_event_data,
10285 LPFC_NL_VENDOR_ID);
10286 break;
10287 case ASYNC_STATUS_CN:
10288 lpfc_sli_abts_err_handler(phba, iocbq);
10289 break;
10290 default:
a257bf90 10291 iocb_w = (uint32_t *) icmd;
cb69f7de 10292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
76bb24ef 10293 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 10294 " evt_code 0x%x\n"
a257bf90
JS
10295 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10296 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10297 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10298 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
cb69f7de 10299 pring->ringno, icmd->un.asyncstat.evt_code,
a257bf90
JS
10300 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10301 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10302 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10303 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10304
cb69f7de 10305 break;
57127f15 10306 }
57127f15
JS
10307}
10308
10309
e59058c4 10310/**
895427bd 10311 * lpfc_sli4_setup - SLI ring setup function
e59058c4
JS
10312 * @phba: Pointer to HBA context object.
10313 *
10314 * lpfc_sli_setup sets up rings of the SLI interface with
10315 * number of iocbs per ring and iotags. This function is
10316 * called while driver attach to the HBA and before the
10317 * interrupts are enabled. So there is no need for locking.
10318 *
10319 * This function always returns 0.
10320 **/
dea3101e 10321int
895427bd
JS
10322lpfc_sli4_setup(struct lpfc_hba *phba)
10323{
10324 struct lpfc_sli_ring *pring;
10325
10326 pring = phba->sli4_hba.els_wq->pring;
10327 pring->num_mask = LPFC_MAX_RING_MASK;
10328 pring->prt[0].profile = 0; /* Mask 0 */
10329 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10330 pring->prt[0].type = FC_TYPE_ELS;
10331 pring->prt[0].lpfc_sli_rcv_unsol_event =
10332 lpfc_els_unsol_event;
10333 pring->prt[1].profile = 0; /* Mask 1 */
10334 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10335 pring->prt[1].type = FC_TYPE_ELS;
10336 pring->prt[1].lpfc_sli_rcv_unsol_event =
10337 lpfc_els_unsol_event;
10338 pring->prt[2].profile = 0; /* Mask 2 */
10339 /* NameServer Inquiry */
10340 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10341 /* NameServer */
10342 pring->prt[2].type = FC_TYPE_CT;
10343 pring->prt[2].lpfc_sli_rcv_unsol_event =
10344 lpfc_ct_unsol_event;
10345 pring->prt[3].profile = 0; /* Mask 3 */
10346 /* NameServer response */
10347 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10348 /* NameServer */
10349 pring->prt[3].type = FC_TYPE_CT;
10350 pring->prt[3].lpfc_sli_rcv_unsol_event =
10351 lpfc_ct_unsol_event;
10352 return 0;
10353}
10354
10355/**
10356 * lpfc_sli_setup - SLI ring setup function
10357 * @phba: Pointer to HBA context object.
10358 *
10359 * lpfc_sli_setup sets up rings of the SLI interface with
10360 * number of iocbs per ring and iotags. This function is
10361 * called while driver attach to the HBA and before the
10362 * interrupts are enabled. So there is no need for locking.
10363 *
10364 * This function always returns 0. SLI3 only.
10365 **/
10366int
dea3101e 10367lpfc_sli_setup(struct lpfc_hba *phba)
10368{
ed957684 10369 int i, totiocbsize = 0;
dea3101e 10370 struct lpfc_sli *psli = &phba->sli;
10371 struct lpfc_sli_ring *pring;
10372
2a76a283 10373 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea3101e 10374 psli->sli_flag = 0;
dea3101e 10375
604a3e30
JB
10376 psli->iocbq_lookup = NULL;
10377 psli->iocbq_lookup_len = 0;
10378 psli->last_iotag = 0;
10379
dea3101e 10380 for (i = 0; i < psli->num_rings; i++) {
895427bd 10381 pring = &psli->sli3_ring[i];
dea3101e 10382 switch (i) {
10383 case LPFC_FCP_RING: /* ring 0 - FCP */
10384 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10385 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10386 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10387 pring->sli.sli3.numCiocb +=
10388 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10389 pring->sli.sli3.numRiocb +=
10390 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10391 pring->sli.sli3.numCiocb +=
10392 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10393 pring->sli.sli3.numRiocb +=
10394 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10395 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10396 SLI3_IOCB_CMD_SIZE :
10397 SLI2_IOCB_CMD_SIZE;
7e56aa25 10398 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10399 SLI3_IOCB_RSP_SIZE :
10400 SLI2_IOCB_RSP_SIZE;
dea3101e 10401 pring->iotag_ctr = 0;
10402 pring->iotag_max =
92d7f7b0 10403 (phba->cfg_hba_queue_depth * 2);
dea3101e 10404 pring->fast_iotag = pring->iotag_max;
10405 pring->num_mask = 0;
10406 break;
a4bc3379 10407 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e 10408 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10409 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10410 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10411 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10412 SLI3_IOCB_CMD_SIZE :
10413 SLI2_IOCB_CMD_SIZE;
7e56aa25 10414 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10415 SLI3_IOCB_RSP_SIZE :
10416 SLI2_IOCB_RSP_SIZE;
2e0fef85 10417 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e 10418 pring->num_mask = 0;
10419 break;
10420 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10421 /* numCiocb and numRiocb are used in config_port */
7e56aa25
JS
10422 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10423 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10424 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10425 SLI3_IOCB_CMD_SIZE :
10426 SLI2_IOCB_CMD_SIZE;
7e56aa25 10427 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
10428 SLI3_IOCB_RSP_SIZE :
10429 SLI2_IOCB_RSP_SIZE;
dea3101e 10430 pring->fast_iotag = 0;
10431 pring->iotag_ctr = 0;
10432 pring->iotag_max = 4096;
57127f15
JS
10433 pring->lpfc_sli_rcv_async_status =
10434 lpfc_sli_async_event_handler;
6669f9bb 10435 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 10436 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
10437 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10438 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 10439 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 10440 lpfc_els_unsol_event;
dea3101e 10441 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
10442 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10443 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 10444 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 10445 lpfc_els_unsol_event;
dea3101e 10446 pring->prt[2].profile = 0; /* Mask 2 */
10447 /* NameServer Inquiry */
6a9c52cf 10448 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 10449 /* NameServer */
6a9c52cf 10450 pring->prt[2].type = FC_TYPE_CT;
dea3101e 10451 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 10452 lpfc_ct_unsol_event;
dea3101e 10453 pring->prt[3].profile = 0; /* Mask 3 */
10454 /* NameServer response */
6a9c52cf 10455 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 10456 /* NameServer */
6a9c52cf 10457 pring->prt[3].type = FC_TYPE_CT;
dea3101e 10458 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 10459 lpfc_ct_unsol_event;
dea3101e 10460 break;
10461 }
7e56aa25
JS
10462 totiocbsize += (pring->sli.sli3.numCiocb *
10463 pring->sli.sli3.sizeCiocb) +
10464 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea3101e 10465 }
ed957684 10466 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 10467 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
10468 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10469 "SLI2 SLIM Data: x%x x%lx\n",
10470 phba->brd_no, totiocbsize,
10471 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 10472 }
cf5bf97e
JW
10473 if (phba->cfg_multi_ring_support == 2)
10474 lpfc_extra_ring_setup(phba);
dea3101e 10475
10476 return 0;
10477}
10478
e59058c4 10479/**
895427bd 10480 * lpfc_sli4_queue_init - Queue initialization function
e59058c4
JS
10481 * @phba: Pointer to HBA context object.
10482 *
895427bd 10483 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
e59058c4
JS
10484 * ring. This function also initializes ring indices of each ring.
10485 * This function is called during the initialization of the SLI
10486 * interface of an HBA.
10487 * This function is called with no lock held and always returns
10488 * 1.
10489 **/
895427bd
JS
10490void
10491lpfc_sli4_queue_init(struct lpfc_hba *phba)
dea3101e 10492{
10493 struct lpfc_sli *psli;
10494 struct lpfc_sli_ring *pring;
604a3e30 10495 int i;
dea3101e 10496
10497 psli = &phba->sli;
2e0fef85 10498 spin_lock_irq(&phba->hbalock);
dea3101e 10499 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10500 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10501 /* Initialize list headers for txq and txcmplq as double linked lists */
cdb42bec
JS
10502 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10503 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
895427bd
JS
10504 pring->flag = 0;
10505 pring->ringno = LPFC_FCP_RING;
10506 INIT_LIST_HEAD(&pring->txq);
10507 INIT_LIST_HEAD(&pring->txcmplq);
10508 INIT_LIST_HEAD(&pring->iocb_continueq);
10509 spin_lock_init(&pring->ring_lock);
10510 }
10511 pring = phba->sli4_hba.els_wq->pring;
10512 pring->flag = 0;
10513 pring->ringno = LPFC_ELS_RING;
10514 INIT_LIST_HEAD(&pring->txq);
10515 INIT_LIST_HEAD(&pring->txcmplq);
10516 INIT_LIST_HEAD(&pring->iocb_continueq);
10517 spin_lock_init(&pring->ring_lock);
dea3101e 10518
cdb42bec
JS
10519 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10520 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10521 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10522 pring->flag = 0;
10523 pring->ringno = LPFC_FCP_RING;
10524 INIT_LIST_HEAD(&pring->txq);
10525 INIT_LIST_HEAD(&pring->txcmplq);
10526 INIT_LIST_HEAD(&pring->iocb_continueq);
10527 spin_lock_init(&pring->ring_lock);
10528 }
895427bd
JS
10529 pring = phba->sli4_hba.nvmels_wq->pring;
10530 pring->flag = 0;
10531 pring->ringno = LPFC_ELS_RING;
10532 INIT_LIST_HEAD(&pring->txq);
10533 INIT_LIST_HEAD(&pring->txcmplq);
10534 INIT_LIST_HEAD(&pring->iocb_continueq);
10535 spin_lock_init(&pring->ring_lock);
10536 }
10537
895427bd
JS
10538 spin_unlock_irq(&phba->hbalock);
10539}
10540
10541/**
10542 * lpfc_sli_queue_init - Queue initialization function
10543 * @phba: Pointer to HBA context object.
10544 *
10545 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10546 * ring. This function also initializes ring indices of each ring.
10547 * This function is called during the initialization of the SLI
10548 * interface of an HBA.
10549 * This function is called with no lock held and always returns
10550 * 1.
10551 **/
10552void
10553lpfc_sli_queue_init(struct lpfc_hba *phba)
dea3101e 10554{
10555 struct lpfc_sli *psli;
10556 struct lpfc_sli_ring *pring;
604a3e30 10557 int i;
dea3101e 10558
10559 psli = &phba->sli;
2e0fef85 10560 spin_lock_irq(&phba->hbalock);
dea3101e 10561 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 10562 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e 10563 /* Initialize list headers for txq and txcmplq as double linked lists */
10564 for (i = 0; i < psli->num_rings; i++) {
895427bd 10565 pring = &psli->sli3_ring[i];
dea3101e 10566 pring->ringno = i;
7e56aa25
JS
10567 pring->sli.sli3.next_cmdidx = 0;
10568 pring->sli.sli3.local_getidx = 0;
10569 pring->sli.sli3.cmdidx = 0;
dea3101e 10570 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 10571 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 10572 INIT_LIST_HEAD(&pring->postbufq);
895427bd
JS
10573 pring->flag = 0;
10574 INIT_LIST_HEAD(&pring->txq);
10575 INIT_LIST_HEAD(&pring->txcmplq);
7e56aa25 10576 spin_lock_init(&pring->ring_lock);
dea3101e 10577 }
2e0fef85 10578 spin_unlock_irq(&phba->hbalock);
dea3101e 10579}
10580
04c68496
JS
10581/**
10582 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10583 * @phba: Pointer to HBA context object.
10584 *
10585 * This routine flushes the mailbox command subsystem. It will unconditionally
10586 * flush all the mailbox commands in the three possible stages in the mailbox
10587 * command sub-system: pending mailbox command queue; the outstanding mailbox
10588 * command; and completed mailbox command queue. It is caller's responsibility
10589 * to make sure that the driver is in the proper state to flush the mailbox
10590 * command sub-system. Namely, the posting of mailbox commands into the
10591 * pending mailbox command queue from the various clients must be stopped;
10592 * either the HBA is in a state that it will never works on the outstanding
10593 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10594 * mailbox command has been completed.
10595 **/
10596static void
10597lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10598{
10599 LIST_HEAD(completions);
10600 struct lpfc_sli *psli = &phba->sli;
10601 LPFC_MBOXQ_t *pmb;
10602 unsigned long iflag;
10603
523128e5
JS
10604 /* Disable softirqs, including timers from obtaining phba->hbalock */
10605 local_bh_disable();
10606
04c68496
JS
10607 /* Flush all the mailbox commands in the mbox system */
10608 spin_lock_irqsave(&phba->hbalock, iflag);
523128e5 10609
04c68496
JS
10610 /* The pending mailbox command queue */
10611 list_splice_init(&phba->sli.mboxq, &completions);
10612 /* The outstanding active mailbox command */
10613 if (psli->mbox_active) {
10614 list_add_tail(&psli->mbox_active->list, &completions);
10615 psli->mbox_active = NULL;
10616 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10617 }
10618 /* The completed mailbox command queue */
10619 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10620 spin_unlock_irqrestore(&phba->hbalock, iflag);
10621
523128e5
JS
10622 /* Enable softirqs again, done with phba->hbalock */
10623 local_bh_enable();
10624
04c68496
JS
10625 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10626 while (!list_empty(&completions)) {
10627 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10628 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10629 if (pmb->mbox_cmpl)
10630 pmb->mbox_cmpl(phba, pmb);
10631 }
10632}
10633
e59058c4 10634/**
3621a710 10635 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
10636 * @vport: Pointer to virtual port object.
10637 *
10638 * lpfc_sli_host_down is called to clean up the resources
10639 * associated with a vport before destroying virtual
10640 * port data structures.
10641 * This function does following operations:
10642 * - Free discovery resources associated with this virtual
10643 * port.
10644 * - Free iocbs associated with this virtual port in
10645 * the txq.
10646 * - Send abort for all iocb commands associated with this
10647 * vport in txcmplq.
10648 *
10649 * This function is called with no lock held and always returns 1.
10650 **/
92d7f7b0
JS
10651int
10652lpfc_sli_host_down(struct lpfc_vport *vport)
10653{
858c9f6c 10654 LIST_HEAD(completions);
92d7f7b0
JS
10655 struct lpfc_hba *phba = vport->phba;
10656 struct lpfc_sli *psli = &phba->sli;
895427bd 10657 struct lpfc_queue *qp = NULL;
92d7f7b0
JS
10658 struct lpfc_sli_ring *pring;
10659 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
10660 int i;
10661 unsigned long flags = 0;
10662 uint16_t prev_pring_flag;
10663
10664 lpfc_cleanup_discovery_resources(vport);
10665
10666 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0 10667
895427bd
JS
10668 /*
10669 * Error everything on the txq since these iocbs
10670 * have not been given to the FW yet.
10671 * Also issue ABTS for everything on the txcmplq
10672 */
10673 if (phba->sli_rev != LPFC_SLI_REV4) {
10674 for (i = 0; i < psli->num_rings; i++) {
10675 pring = &psli->sli3_ring[i];
10676 prev_pring_flag = pring->flag;
10677 /* Only slow rings */
10678 if (pring->ringno == LPFC_ELS_RING) {
10679 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10680 /* Set the lpfc data pending flag */
10681 set_bit(LPFC_DATA_READY, &phba->data_flags);
10682 }
10683 list_for_each_entry_safe(iocb, next_iocb,
10684 &pring->txq, list) {
10685 if (iocb->vport != vport)
10686 continue;
10687 list_move_tail(&iocb->list, &completions);
10688 }
10689 list_for_each_entry_safe(iocb, next_iocb,
10690 &pring->txcmplq, list) {
10691 if (iocb->vport != vport)
10692 continue;
10693 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10694 }
10695 pring->flag = prev_pring_flag;
10696 }
10697 } else {
10698 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10699 pring = qp->pring;
10700 if (!pring)
92d7f7b0 10701 continue;
895427bd
JS
10702 if (pring == phba->sli4_hba.els_wq->pring) {
10703 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10704 /* Set the lpfc data pending flag */
10705 set_bit(LPFC_DATA_READY, &phba->data_flags);
10706 }
10707 prev_pring_flag = pring->flag;
10708 spin_lock_irq(&pring->ring_lock);
10709 list_for_each_entry_safe(iocb, next_iocb,
10710 &pring->txq, list) {
10711 if (iocb->vport != vport)
10712 continue;
10713 list_move_tail(&iocb->list, &completions);
10714 }
10715 spin_unlock_irq(&pring->ring_lock);
10716 list_for_each_entry_safe(iocb, next_iocb,
10717 &pring->txcmplq, list) {
10718 if (iocb->vport != vport)
10719 continue;
10720 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10721 }
10722 pring->flag = prev_pring_flag;
92d7f7b0 10723 }
92d7f7b0 10724 }
92d7f7b0
JS
10725 spin_unlock_irqrestore(&phba->hbalock, flags);
10726
a257bf90
JS
10727 /* Cancel all the IOCBs from the completions list */
10728 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10729 IOERR_SLI_DOWN);
92d7f7b0
JS
10730 return 1;
10731}
10732
e59058c4 10733/**
3621a710 10734 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
10735 * @phba: Pointer to HBA context object.
10736 *
10737 * This function cleans up all iocb, buffers, mailbox commands
10738 * while shutting down the HBA. This function is called with no
10739 * lock held and always returns 1.
10740 * This function does the following to cleanup driver resources:
10741 * - Free discovery resources for each virtual port
10742 * - Cleanup any pending fabric iocbs
10743 * - Iterate through the iocb txq and free each entry
10744 * in the list.
10745 * - Free up any buffer posted to the HBA
10746 * - Free mailbox commands in the mailbox queue.
10747 **/
dea3101e 10748int
2e0fef85 10749lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 10750{
2534ba75 10751 LIST_HEAD(completions);
2e0fef85 10752 struct lpfc_sli *psli = &phba->sli;
895427bd 10753 struct lpfc_queue *qp = NULL;
dea3101e 10754 struct lpfc_sli_ring *pring;
0ff10d46 10755 struct lpfc_dmabuf *buf_ptr;
dea3101e 10756 unsigned long flags = 0;
04c68496
JS
10757 int i;
10758
10759 /* Shutdown the mailbox command sub-system */
618a5230 10760 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea3101e 10761
dea3101e 10762 lpfc_hba_down_prep(phba);
10763
523128e5
JS
10764 /* Disable softirqs, including timers from obtaining phba->hbalock */
10765 local_bh_disable();
10766
92d7f7b0
JS
10767 lpfc_fabric_abort_hba(phba);
10768
2e0fef85 10769 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 10770
895427bd
JS
10771 /*
10772 * Error everything on the txq since these iocbs
10773 * have not been given to the FW yet.
10774 */
10775 if (phba->sli_rev != LPFC_SLI_REV4) {
10776 for (i = 0; i < psli->num_rings; i++) {
10777 pring = &psli->sli3_ring[i];
10778 /* Only slow rings */
10779 if (pring->ringno == LPFC_ELS_RING) {
10780 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10781 /* Set the lpfc data pending flag */
10782 set_bit(LPFC_DATA_READY, &phba->data_flags);
10783 }
10784 list_splice_init(&pring->txq, &completions);
10785 }
10786 } else {
10787 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10788 pring = qp->pring;
10789 if (!pring)
10790 continue;
10791 spin_lock_irq(&pring->ring_lock);
10792 list_splice_init(&pring->txq, &completions);
10793 spin_unlock_irq(&pring->ring_lock);
10794 if (pring == phba->sli4_hba.els_wq->pring) {
10795 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10796 /* Set the lpfc data pending flag */
10797 set_bit(LPFC_DATA_READY, &phba->data_flags);
10798 }
10799 }
2534ba75 10800 }
2e0fef85 10801 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 10802
a257bf90
JS
10803 /* Cancel all the IOCBs from the completions list */
10804 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10805 IOERR_SLI_DOWN);
dea3101e 10806
0ff10d46
JS
10807 spin_lock_irqsave(&phba->hbalock, flags);
10808 list_splice_init(&phba->elsbuf, &completions);
10809 phba->elsbuf_cnt = 0;
10810 phba->elsbuf_prev_cnt = 0;
10811 spin_unlock_irqrestore(&phba->hbalock, flags);
10812
10813 while (!list_empty(&completions)) {
10814 list_remove_head(&completions, buf_ptr,
10815 struct lpfc_dmabuf, list);
10816 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10817 kfree(buf_ptr);
10818 }
10819
523128e5
JS
10820 /* Enable softirqs again, done with phba->hbalock */
10821 local_bh_enable();
10822
dea3101e 10823 /* Return any active mbox cmds */
10824 del_timer_sync(&psli->mbox_tmo);
2e0fef85 10825
da0436e9 10826 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 10827 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 10828 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 10829
da0436e9
JS
10830 return 1;
10831}
10832
e59058c4 10833/**
3621a710 10834 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
10835 * @srcp: Source memory pointer.
10836 * @destp: Destination memory pointer.
10837 * @cnt: Number of words required to be copied.
10838 *
10839 * This function is used for copying data between driver memory
10840 * and the SLI memory. This function also changes the endianness
10841 * of each word if native endianness is different from SLI
10842 * endianness. This function can be called with or without
10843 * lock.
10844 **/
dea3101e 10845void
10846lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10847{
10848 uint32_t *src = srcp;
10849 uint32_t *dest = destp;
10850 uint32_t ldata;
10851 int i;
10852
10853 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10854 ldata = *src;
10855 ldata = le32_to_cpu(ldata);
10856 *dest = ldata;
10857 src++;
10858 dest++;
10859 }
10860}
10861
e59058c4 10862
a0c87cbd
JS
10863/**
10864 * lpfc_sli_bemem_bcopy - SLI memory copy function
10865 * @srcp: Source memory pointer.
10866 * @destp: Destination memory pointer.
10867 * @cnt: Number of words required to be copied.
10868 *
10869 * This function is used for copying data between a data structure
10870 * with big endian representation to local endianness.
10871 * This function can be called with or without lock.
10872 **/
10873void
10874lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10875{
10876 uint32_t *src = srcp;
10877 uint32_t *dest = destp;
10878 uint32_t ldata;
10879 int i;
10880
10881 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10882 ldata = *src;
10883 ldata = be32_to_cpu(ldata);
10884 *dest = ldata;
10885 src++;
10886 dest++;
10887 }
10888}
10889
e59058c4 10890/**
3621a710 10891 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
10892 * @phba: Pointer to HBA context object.
10893 * @pring: Pointer to driver SLI ring object.
10894 * @mp: Pointer to driver buffer object.
10895 *
10896 * This function is called with no lock held.
10897 * It always return zero after adding the buffer to the postbufq
10898 * buffer list.
10899 **/
dea3101e 10900int
2e0fef85
JS
10901lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10902 struct lpfc_dmabuf *mp)
dea3101e 10903{
10904 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10905 later */
2e0fef85 10906 spin_lock_irq(&phba->hbalock);
dea3101e 10907 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 10908 pring->postbufq_cnt++;
2e0fef85 10909 spin_unlock_irq(&phba->hbalock);
dea3101e 10910 return 0;
10911}
10912
e59058c4 10913/**
3621a710 10914 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
10915 * @phba: Pointer to HBA context object.
10916 *
10917 * When HBQ is enabled, buffers are searched based on tags. This function
10918 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10919 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10920 * does not conflict with tags of buffer posted for unsolicited events.
10921 * The function returns the allocated tag. The function is called with
10922 * no locks held.
10923 **/
76bb24ef
JS
10924uint32_t
10925lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10926{
10927 spin_lock_irq(&phba->hbalock);
10928 phba->buffer_tag_count++;
10929 /*
10930 * Always set the QUE_BUFTAG_BIT to distiguish between
10931 * a tag assigned by HBQ.
10932 */
10933 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10934 spin_unlock_irq(&phba->hbalock);
10935 return phba->buffer_tag_count;
10936}
10937
e59058c4 10938/**
3621a710 10939 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
10940 * @phba: Pointer to HBA context object.
10941 * @pring: Pointer to driver SLI ring object.
10942 * @tag: Buffer tag.
10943 *
10944 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10945 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10946 * iocb is posted to the response ring with the tag of the buffer.
10947 * This function searches the pring->postbufq list using the tag
10948 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10949 * iocb. If the buffer is found then lpfc_dmabuf object of the
10950 * buffer is returned to the caller else NULL is returned.
10951 * This function is called with no lock held.
10952 **/
76bb24ef
JS
10953struct lpfc_dmabuf *
10954lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10955 uint32_t tag)
10956{
10957 struct lpfc_dmabuf *mp, *next_mp;
10958 struct list_head *slp = &pring->postbufq;
10959
25985edc 10960 /* Search postbufq, from the beginning, looking for a match on tag */
76bb24ef
JS
10961 spin_lock_irq(&phba->hbalock);
10962 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10963 if (mp->buffer_tag == tag) {
10964 list_del_init(&mp->list);
10965 pring->postbufq_cnt--;
10966 spin_unlock_irq(&phba->hbalock);
10967 return mp;
10968 }
10969 }
10970
10971 spin_unlock_irq(&phba->hbalock);
10972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 10973 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
10974 "ring %d Data x%lx x%p x%p x%x\n",
10975 pring->ringno, (unsigned long) tag,
10976 slp->next, slp->prev, pring->postbufq_cnt);
10977
10978 return NULL;
10979}
dea3101e 10980
e59058c4 10981/**
3621a710 10982 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
10983 * @phba: Pointer to HBA context object.
10984 * @pring: Pointer to driver SLI ring object.
10985 * @phys: DMA address of the buffer.
10986 *
10987 * This function searches the buffer list using the dma_address
10988 * of unsolicited event to find the driver's lpfc_dmabuf object
10989 * corresponding to the dma_address. The function returns the
10990 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10991 * This function is called by the ct and els unsolicited event
10992 * handlers to get the buffer associated with the unsolicited
10993 * event.
10994 *
10995 * This function is called with no lock held.
10996 **/
dea3101e 10997struct lpfc_dmabuf *
10998lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10999 dma_addr_t phys)
11000{
11001 struct lpfc_dmabuf *mp, *next_mp;
11002 struct list_head *slp = &pring->postbufq;
11003
25985edc 11004 /* Search postbufq, from the beginning, looking for a match on phys */
2e0fef85 11005 spin_lock_irq(&phba->hbalock);
dea3101e 11006 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11007 if (mp->phys == phys) {
11008 list_del_init(&mp->list);
11009 pring->postbufq_cnt--;
2e0fef85 11010 spin_unlock_irq(&phba->hbalock);
dea3101e 11011 return mp;
11012 }
11013 }
11014
2e0fef85 11015 spin_unlock_irq(&phba->hbalock);
dea3101e 11016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 11017 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 11018 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 11019 pring->ringno, (unsigned long long)phys,
dea3101e 11020 slp->next, slp->prev, pring->postbufq_cnt);
11021 return NULL;
11022}
11023
e59058c4 11024/**
3621a710 11025 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
11026 * @phba: Pointer to HBA context object.
11027 * @cmdiocb: Pointer to driver command iocb object.
11028 * @rspiocb: Pointer to driver response iocb object.
11029 *
11030 * This function is the completion handler for the abort iocbs for
11031 * ELS commands. This function is called from the ELS ring event
11032 * handler with no lock held. This function frees memory resources
11033 * associated with the abort iocb.
11034 **/
dea3101e 11035static void
2e0fef85
JS
11036lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11037 struct lpfc_iocbq *rspiocb)
dea3101e 11038{
2e0fef85 11039 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 11040 uint16_t abort_iotag, abort_context;
ff78d8f9 11041 struct lpfc_iocbq *abort_iocb = NULL;
2680eeaa
JS
11042
11043 if (irsp->ulpStatus) {
ff78d8f9
JS
11044
11045 /*
11046 * Assume that the port already completed and returned, or
11047 * will return the iocb. Just Log the message.
11048 */
2680eeaa
JS
11049 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11050 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11051
2e0fef85 11052 spin_lock_irq(&phba->hbalock);
45ed1190 11053 if (phba->sli_rev < LPFC_SLI_REV4) {
faa832e9
JS
11054 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11055 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11056 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11057 spin_unlock_irq(&phba->hbalock);
11058 goto release_iocb;
11059 }
45ed1190
JS
11060 if (abort_iotag != 0 &&
11061 abort_iotag <= phba->sli.last_iotag)
11062 abort_iocb =
11063 phba->sli.iocbq_lookup[abort_iotag];
11064 } else
11065 /* For sli4 the abort_tag is the XRI,
11066 * so the abort routine puts the iotag of the iocb
11067 * being aborted in the context field of the abort
11068 * IOCB.
11069 */
11070 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 11071
2a9bf3d0
JS
11072 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11073 "0327 Cannot abort els iocb %p "
11074 "with tag %x context %x, abort status %x, "
11075 "abort code %x\n",
11076 abort_iocb, abort_iotag, abort_context,
11077 irsp->ulpStatus, irsp->un.ulpWord[4]);
341af102 11078
ff78d8f9 11079 spin_unlock_irq(&phba->hbalock);
2680eeaa 11080 }
faa832e9 11081release_iocb:
604a3e30 11082 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e 11083 return;
11084}
11085
e59058c4 11086/**
3621a710 11087 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
11088 * @phba: Pointer to HBA context object.
11089 * @cmdiocb: Pointer to driver command iocb object.
11090 * @rspiocb: Pointer to driver response iocb object.
11091 *
11092 * The function is called from SLI ring event handler with no
11093 * lock held. This function is the completion handler for ELS commands
11094 * which are aborted. The function frees memory resources used for
11095 * the aborted ELS commands.
11096 **/
92d7f7b0
JS
11097static void
11098lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11099 struct lpfc_iocbq *rspiocb)
11100{
11101 IOCB_t *irsp = &rspiocb->iocb;
11102
11103 /* ELS cmd tag <ulpIoTag> completes */
11104 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 11105 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 11106 "x%x x%x x%x\n",
e8b62011 11107 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 11108 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
11109 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11110 lpfc_ct_free_iocb(phba, cmdiocb);
11111 else
11112 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
11113 return;
11114}
11115
e59058c4 11116/**
5af5eee7 11117 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
e59058c4
JS
11118 * @phba: Pointer to HBA context object.
11119 * @pring: Pointer to driver SLI ring object.
11120 * @cmdiocb: Pointer to driver command iocb object.
11121 *
5af5eee7
JS
11122 * This function issues an abort iocb for the provided command iocb down to
11123 * the port. Other than the case the outstanding command iocb is an abort
11124 * request, this function issues abort out unconditionally. This function is
11125 * called with hbalock held. The function returns 0 when it fails due to
11126 * memory allocation failure or when the command iocb is an abort request.
e59058c4 11127 **/
5af5eee7
JS
11128static int
11129lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 11130 struct lpfc_iocbq *cmdiocb)
dea3101e 11131{
2e0fef85 11132 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 11133 struct lpfc_iocbq *abtsiocbp;
dea3101e 11134 IOCB_t *icmd = NULL;
11135 IOCB_t *iabt = NULL;
5af5eee7 11136 int retval;
7e56aa25 11137 unsigned long iflags;
faa832e9 11138 struct lpfc_nodelist *ndlp;
07951076 11139
1c2ba475
JT
11140 lockdep_assert_held(&phba->hbalock);
11141
92d7f7b0
JS
11142 /*
11143 * There are certain command types we don't want to abort. And we
11144 * don't want to abort commands that are already in the process of
11145 * being aborted.
07951076
JS
11146 */
11147 icmd = &cmdiocb->iocb;
2e0fef85 11148 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
11149 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11150 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
11151 return 0;
11152
dea3101e 11153 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 11154 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e 11155 if (abtsiocbp == NULL)
11156 return 0;
dea3101e 11157
07951076 11158 /* This signals the response to set the correct status
341af102 11159 * before calling the completion handler
07951076
JS
11160 */
11161 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11162
dea3101e 11163 iabt = &abtsiocbp->iocb;
07951076
JS
11164 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11165 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 11166 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 11167 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190 11168 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
faa832e9 11169 } else {
da0436e9 11170 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
faa832e9
JS
11171 if (pring->ringno == LPFC_ELS_RING) {
11172 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11173 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11174 }
11175 }
07951076
JS
11176 iabt->ulpLe = 1;
11177 iabt->ulpClass = icmd->ulpClass;
dea3101e 11178
5ffc266e 11179 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11180 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
341af102
JS
11181 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11182 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11183 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11184 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11185
2e0fef85 11186 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
11187 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11188 else
11189 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 11190
07951076 11191 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
e6c6acc0 11192 abtsiocbp->vport = vport;
5b8bd0c9 11193
e8b62011
JS
11194 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11195 "0339 Abort xri x%x, original iotag x%x, "
11196 "abort cmd iotag x%x\n",
2a9bf3d0 11197 iabt->un.acxri.abortIoTag,
e8b62011 11198 iabt->un.acxri.abortContextTag,
2a9bf3d0 11199 abtsiocbp->iotag);
7e56aa25
JS
11200
11201 if (phba->sli_rev == LPFC_SLI_REV4) {
895427bd
JS
11202 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11203 if (unlikely(pring == NULL))
9bd2bff5 11204 return 0;
7e56aa25
JS
11205 /* Note: both hbalock and ring_lock need to be set here */
11206 spin_lock_irqsave(&pring->ring_lock, iflags);
11207 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11208 abtsiocbp, 0);
11209 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11210 } else {
11211 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11212 abtsiocbp, 0);
11213 }
dea3101e 11214
d7c255b2
JS
11215 if (retval)
11216 __lpfc_sli_release_iocbq(phba, abtsiocbp);
5af5eee7
JS
11217
11218 /*
11219 * Caller to this routine should check for IOCB_ERROR
11220 * and handle it properly. This routine no longer removes
11221 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11222 */
11223 return retval;
11224}
11225
11226/**
11227 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11228 * @phba: Pointer to HBA context object.
11229 * @pring: Pointer to driver SLI ring object.
11230 * @cmdiocb: Pointer to driver command iocb object.
11231 *
11232 * This function issues an abort iocb for the provided command iocb. In case
11233 * of unloading, the abort iocb will not be issued to commands on the ELS
11234 * ring. Instead, the callback function shall be changed to those commands
11235 * so that nothing happens when them finishes. This function is called with
11236 * hbalock held. The function returns 0 when the command iocb is an abort
11237 * request.
11238 **/
11239int
11240lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11241 struct lpfc_iocbq *cmdiocb)
11242{
11243 struct lpfc_vport *vport = cmdiocb->vport;
11244 int retval = IOCB_ERROR;
11245 IOCB_t *icmd = NULL;
11246
1c2ba475
JT
11247 lockdep_assert_held(&phba->hbalock);
11248
5af5eee7
JS
11249 /*
11250 * There are certain command types we don't want to abort. And we
11251 * don't want to abort commands that are already in the process of
11252 * being aborted.
11253 */
11254 icmd = &cmdiocb->iocb;
11255 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11256 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11257 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11258 return 0;
11259
1234a6d5
DK
11260 if (!pring) {
11261 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11262 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11263 else
11264 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11265 goto abort_iotag_exit;
11266 }
11267
5af5eee7
JS
11268 /*
11269 * If we're unloading, don't abort iocb on the ELS ring, but change
11270 * the callback so that nothing happens when it finishes.
11271 */
11272 if ((vport->load_flag & FC_UNLOADING) &&
11273 (pring->ringno == LPFC_ELS_RING)) {
11274 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11275 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11276 else
11277 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11278 goto abort_iotag_exit;
11279 }
11280
11281 /* Now, we try to issue the abort to the cmdiocb out */
11282 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11283
07951076 11284abort_iotag_exit:
2e0fef85
JS
11285 /*
11286 * Caller to this routine should check for IOCB_ERROR
11287 * and handle it properly. This routine no longer removes
11288 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 11289 */
2e0fef85 11290 return retval;
dea3101e 11291}
11292
895427bd
JS
11293/**
11294 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11295 * @phba: Pointer to HBA context object.
11296 * @pring: Pointer to driver SLI ring object.
11297 * @cmdiocb: Pointer to driver command iocb object.
11298 *
11299 * This function issues an abort iocb for the provided command iocb down to
11300 * the port. Other than the case the outstanding command iocb is an abort
11301 * request, this function issues abort out unconditionally. This function is
11302 * called with hbalock held. The function returns 0 when it fails due to
11303 * memory allocation failure or when the command iocb is an abort request.
11304 **/
11305static int
11306lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11307 struct lpfc_iocbq *cmdiocb)
11308{
11309 struct lpfc_vport *vport = cmdiocb->vport;
11310 struct lpfc_iocbq *abtsiocbp;
205e8240 11311 union lpfc_wqe128 *abts_wqe;
895427bd
JS
11312 int retval;
11313
11314 /*
11315 * There are certain command types we don't want to abort. And we
11316 * don't want to abort commands that are already in the process of
11317 * being aborted.
11318 */
11319 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11320 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11321 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11322 return 0;
11323
11324 /* issue ABTS for this io based on iotag */
11325 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11326 if (abtsiocbp == NULL)
11327 return 0;
11328
11329 /* This signals the response to set the correct status
11330 * before calling the completion handler
11331 */
11332 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11333
11334 /* Complete prepping the abort wqe and issue to the FW. */
11335 abts_wqe = &abtsiocbp->wqe;
895427bd 11336
1c36833d
JS
11337 /* Clear any stale WQE contents */
11338 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11339 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
895427bd
JS
11340
11341 /* word 7 */
895427bd
JS
11342 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11343 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11344 cmdiocb->iocb.ulpClass);
11345
11346 /* word 8 - tell the FW to abort the IO associated with this
11347 * outstanding exchange ID.
11348 */
11349 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11350
11351 /* word 9 - this is the iotag for the abts_wqe completion. */
11352 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11353 abtsiocbp->iotag);
11354
11355 /* word 10 */
895427bd
JS
11356 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11357 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11358
11359 /* word 11 */
11360 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11361 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11362 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11363
11364 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11365 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11366 abtsiocbp->vport = vport;
01649561 11367 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
895427bd 11368 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
cd22d605 11369 if (retval) {
895427bd
JS
11370 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11371 "6147 Failed abts issue_wqe with status x%x "
11372 "for oxid x%x\n",
11373 retval, cmdiocb->sli4_xritag);
11374 lpfc_sli_release_iocbq(phba, abtsiocbp);
11375 return retval;
11376 }
11377
11378 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11379 "6148 Drv Abort NVME Request Issued for "
11380 "ox_id x%x on reqtag x%x\n",
11381 cmdiocb->sli4_xritag,
11382 abtsiocbp->iotag);
11383
11384 return retval;
11385}
11386
5af5eee7
JS
11387/**
11388 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11389 * @phba: pointer to lpfc HBA data structure.
11390 *
11391 * This routine will abort all pending and outstanding iocbs to an HBA.
11392 **/
11393void
11394lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11395{
11396 struct lpfc_sli *psli = &phba->sli;
11397 struct lpfc_sli_ring *pring;
895427bd 11398 struct lpfc_queue *qp = NULL;
5af5eee7
JS
11399 int i;
11400
895427bd
JS
11401 if (phba->sli_rev != LPFC_SLI_REV4) {
11402 for (i = 0; i < psli->num_rings; i++) {
11403 pring = &psli->sli3_ring[i];
11404 lpfc_sli_abort_iocb_ring(phba, pring);
11405 }
11406 return;
11407 }
11408 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11409 pring = qp->pring;
11410 if (!pring)
11411 continue;
db55fba8 11412 lpfc_sli_abort_iocb_ring(phba, pring);
5af5eee7
JS
11413 }
11414}
11415
e59058c4 11416/**
3621a710 11417 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
11418 * @iocbq: Pointer to driver iocb object.
11419 * @vport: Pointer to driver virtual port object.
11420 * @tgt_id: SCSI ID of the target.
11421 * @lun_id: LUN ID of the scsi device.
11422 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11423 *
3621a710 11424 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
11425 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11426 * 0 if the filtering criteria is met for the given iocb and will return
11427 * 1 if the filtering criteria is not met.
11428 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11429 * given iocb is for the SCSI device specified by vport, tgt_id and
11430 * lun_id parameter.
11431 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11432 * given iocb is for the SCSI target specified by vport and tgt_id
11433 * parameters.
11434 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11435 * given iocb is for the SCSI host associated with the given vport.
11436 * This function is called with no locks held.
11437 **/
dea3101e 11438static int
51ef4c26
JS
11439lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11440 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 11441 lpfc_ctx_cmd ctx_cmd)
dea3101e 11442{
0bd4ca25 11443 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 11444 int rc = 1;
11445
b0e83012 11446 if (iocbq->vport != vport)
0bd4ca25
JSEC
11447 return rc;
11448
b0e83012
JS
11449 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11450 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
51ef4c26
JS
11451 return rc;
11452
0bd4ca25 11453 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 11454
495a714c 11455 if (lpfc_cmd->pCmd == NULL)
dea3101e 11456 return rc;
11457
11458 switch (ctx_cmd) {
11459 case LPFC_CTX_LUN:
b0e83012 11460 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c
JS
11461 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11462 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e 11463 rc = 0;
11464 break;
11465 case LPFC_CTX_TGT:
b0e83012 11466 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
495a714c 11467 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e 11468 rc = 0;
11469 break;
dea3101e 11470 case LPFC_CTX_HOST:
11471 rc = 0;
11472 break;
11473 default:
11474 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 11475 __func__, ctx_cmd);
dea3101e 11476 break;
11477 }
11478
11479 return rc;
11480}
11481
e59058c4 11482/**
3621a710 11483 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
11484 * @vport: Pointer to virtual port.
11485 * @tgt_id: SCSI ID of the target.
11486 * @lun_id: LUN ID of the scsi device.
11487 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11488 *
11489 * This function returns number of FCP commands pending for the vport.
11490 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11491 * commands pending on the vport associated with SCSI device specified
11492 * by tgt_id and lun_id parameters.
11493 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11494 * commands pending on the vport associated with SCSI target specified
11495 * by tgt_id parameter.
11496 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11497 * commands pending on the vport.
11498 * This function returns the number of iocbs which satisfy the filter.
11499 * This function is called without any lock held.
11500 **/
dea3101e 11501int
51ef4c26
JS
11502lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11503 lpfc_ctx_cmd ctx_cmd)
dea3101e 11504{
51ef4c26 11505 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11506 struct lpfc_iocbq *iocbq;
11507 int sum, i;
dea3101e 11508
31979008 11509 spin_lock_irq(&phba->hbalock);
0bd4ca25
JSEC
11510 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11511 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11512
51ef4c26
JS
11513 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11514 ctx_cmd) == 0)
0bd4ca25 11515 sum++;
dea3101e 11516 }
31979008 11517 spin_unlock_irq(&phba->hbalock);
0bd4ca25 11518
dea3101e 11519 return sum;
11520}
11521
e59058c4 11522/**
3621a710 11523 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
11524 * @phba: Pointer to HBA context object
11525 * @cmdiocb: Pointer to command iocb object.
11526 * @rspiocb: Pointer to response iocb object.
11527 *
11528 * This function is called when an aborted FCP iocb completes. This
11529 * function is called by the ring event handler with no lock held.
11530 * This function frees the iocb.
11531 **/
5eb95af0 11532void
2e0fef85
JS
11533lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11534 struct lpfc_iocbq *rspiocb)
5eb95af0 11535{
cb69f7de 11536 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8e668af5 11537 "3096 ABORT_XRI_CN completing on rpi x%x "
cb69f7de
JS
11538 "original iotag x%x, abort cmd iotag x%x "
11539 "status 0x%x, reason 0x%x\n",
11540 cmdiocb->iocb.un.acxri.abortContextTag,
11541 cmdiocb->iocb.un.acxri.abortIoTag,
11542 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11543 rspiocb->iocb.un.ulpWord[4]);
604a3e30 11544 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
11545 return;
11546}
11547
e59058c4 11548/**
3621a710 11549 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
11550 * @vport: Pointer to virtual port.
11551 * @pring: Pointer to driver SLI ring object.
11552 * @tgt_id: SCSI ID of the target.
11553 * @lun_id: LUN ID of the scsi device.
11554 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11555 *
11556 * This function sends an abort command for every SCSI command
11557 * associated with the given virtual port pending on the ring
11558 * filtered by lpfc_sli_validate_fcp_iocb function.
11559 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11560 * FCP iocbs associated with lun specified by tgt_id and lun_id
11561 * parameters
11562 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11563 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11564 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11565 * FCP iocbs associated with virtual port.
11566 * This function returns number of iocbs it failed to abort.
11567 * This function is called with no locks held.
11568 **/
dea3101e 11569int
51ef4c26
JS
11570lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11571 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 11572{
51ef4c26 11573 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
11574 struct lpfc_iocbq *iocbq;
11575 struct lpfc_iocbq *abtsiocb;
ecbb227e 11576 struct lpfc_sli_ring *pring_s4;
dea3101e 11577 IOCB_t *cmd = NULL;
dea3101e 11578 int errcnt = 0, ret_val = 0;
0bd4ca25 11579 int i;
dea3101e 11580
b0e83012
JS
11581 /* all I/Os are in process of being flushed */
11582 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11583 return errcnt;
11584
0bd4ca25
JSEC
11585 for (i = 1; i <= phba->sli.last_iotag; i++) {
11586 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 11587
51ef4c26 11588 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 11589 abort_cmd) != 0)
dea3101e 11590 continue;
11591
afbd8d88
JS
11592 /*
11593 * If the iocbq is already being aborted, don't take a second
11594 * action, but do count it.
11595 */
11596 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11597 continue;
11598
dea3101e 11599 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 11600 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e 11601 if (abtsiocb == NULL) {
11602 errcnt++;
11603 continue;
11604 }
dea3101e 11605
afbd8d88
JS
11606 /* indicate the IO is being aborted by the driver. */
11607 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11608
0bd4ca25 11609 cmd = &iocbq->iocb;
dea3101e 11610 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11611 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
11612 if (phba->sli_rev == LPFC_SLI_REV4)
11613 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11614 else
11615 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 11616 abtsiocb->iocb.ulpLe = 1;
11617 abtsiocb->iocb.ulpClass = cmd->ulpClass;
afbd8d88 11618 abtsiocb->vport = vport;
dea3101e 11619
5ffc266e 11620 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11621 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
341af102
JS
11622 if (iocbq->iocb_flag & LPFC_IO_FCP)
11623 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11624 if (iocbq->iocb_flag & LPFC_IO_FOF)
11625 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 11626
2e0fef85 11627 if (lpfc_is_link_up(phba))
dea3101e 11628 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11629 else
11630 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11631
5eb95af0
JSEC
11632 /* Setup callback routine and issue the command. */
11633 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ecbb227e
JS
11634 if (phba->sli_rev == LPFC_SLI_REV4) {
11635 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11636 if (!pring_s4)
11637 continue;
11638 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11639 abtsiocb, 0);
11640 } else
11641 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11642 abtsiocb, 0);
dea3101e 11643 if (ret_val == IOCB_ERROR) {
604a3e30 11644 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e 11645 errcnt++;
11646 continue;
11647 }
11648 }
11649
11650 return errcnt;
11651}
11652
98912dda
JS
11653/**
11654 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11655 * @vport: Pointer to virtual port.
11656 * @pring: Pointer to driver SLI ring object.
11657 * @tgt_id: SCSI ID of the target.
11658 * @lun_id: LUN ID of the scsi device.
11659 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11660 *
11661 * This function sends an abort command for every SCSI command
11662 * associated with the given virtual port pending on the ring
11663 * filtered by lpfc_sli_validate_fcp_iocb function.
11664 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11665 * FCP iocbs associated with lun specified by tgt_id and lun_id
11666 * parameters
11667 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11668 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11669 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11670 * FCP iocbs associated with virtual port.
11671 * This function returns number of iocbs it aborted .
11672 * This function is called with no locks held right after a taskmgmt
11673 * command is sent.
11674 **/
11675int
11676lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11677 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11678{
11679 struct lpfc_hba *phba = vport->phba;
8c50d25c 11680 struct lpfc_scsi_buf *lpfc_cmd;
98912dda 11681 struct lpfc_iocbq *abtsiocbq;
8c50d25c 11682 struct lpfc_nodelist *ndlp;
98912dda
JS
11683 struct lpfc_iocbq *iocbq;
11684 IOCB_t *icmd;
11685 int sum, i, ret_val;
11686 unsigned long iflags;
11687 struct lpfc_sli_ring *pring_s4;
98912dda 11688
59c68eaa 11689 spin_lock_irqsave(&phba->hbalock, iflags);
98912dda
JS
11690
11691 /* all I/Os are in process of being flushed */
11692 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
59c68eaa 11693 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11694 return 0;
11695 }
11696 sum = 0;
11697
11698 for (i = 1; i <= phba->sli.last_iotag; i++) {
11699 iocbq = phba->sli.iocbq_lookup[i];
11700
11701 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11702 cmd) != 0)
11703 continue;
11704
11705 /*
11706 * If the iocbq is already being aborted, don't take a second
11707 * action, but do count it.
11708 */
11709 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11710 continue;
11711
11712 /* issue ABTS for this IOCB based on iotag */
11713 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11714 if (abtsiocbq == NULL)
11715 continue;
11716
11717 icmd = &iocbq->iocb;
11718 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11719 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11720 if (phba->sli_rev == LPFC_SLI_REV4)
11721 abtsiocbq->iocb.un.acxri.abortIoTag =
11722 iocbq->sli4_xritag;
11723 else
11724 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11725 abtsiocbq->iocb.ulpLe = 1;
11726 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11727 abtsiocbq->vport = vport;
11728
11729 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 11730 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
98912dda
JS
11731 if (iocbq->iocb_flag & LPFC_IO_FCP)
11732 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
11733 if (iocbq->iocb_flag & LPFC_IO_FOF)
11734 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
98912dda 11735
8c50d25c
JS
11736 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11737 ndlp = lpfc_cmd->rdata->pnode;
11738
11739 if (lpfc_is_link_up(phba) &&
11740 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
98912dda
JS
11741 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11742 else
11743 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11744
11745 /* Setup callback routine and issue the command. */
11746 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11747
11748 /*
11749 * Indicate the IO is being aborted by the driver and set
11750 * the caller's flag into the aborted IO.
11751 */
11752 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11753
11754 if (phba->sli_rev == LPFC_SLI_REV4) {
59c68eaa
JS
11755 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11756 if (!pring_s4)
895427bd 11757 continue;
98912dda 11758 /* Note: both hbalock and ring_lock must be set here */
59c68eaa 11759 spin_lock(&pring_s4->ring_lock);
98912dda
JS
11760 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11761 abtsiocbq, 0);
59c68eaa 11762 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
11763 } else {
11764 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11765 abtsiocbq, 0);
11766 }
11767
11768
11769 if (ret_val == IOCB_ERROR)
11770 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11771 else
11772 sum++;
11773 }
59c68eaa 11774 spin_unlock_irqrestore(&phba->hbalock, iflags);
98912dda
JS
11775 return sum;
11776}
11777
e59058c4 11778/**
3621a710 11779 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
11780 * @phba: Pointer to HBA context object.
11781 * @cmdiocbq: Pointer to command iocb.
11782 * @rspiocbq: Pointer to response iocb.
11783 *
11784 * This function is the completion handler for iocbs issued using
11785 * lpfc_sli_issue_iocb_wait function. This function is called by the
11786 * ring event handler function without any lock held. This function
11787 * can be called from both worker thread context and interrupt
11788 * context. This function also can be called from other thread which
11789 * cleans up the SLI layer objects.
11790 * This function copy the contents of the response iocb to the
11791 * response iocb memory object provided by the caller of
11792 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11793 * sleeps for the iocb completion.
11794 **/
68876920
JSEC
11795static void
11796lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11797 struct lpfc_iocbq *cmdiocbq,
11798 struct lpfc_iocbq *rspiocbq)
dea3101e 11799{
68876920
JSEC
11800 wait_queue_head_t *pdone_q;
11801 unsigned long iflags;
0f65ff68 11802 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 11803
2e0fef85 11804 spin_lock_irqsave(&phba->hbalock, iflags);
5a0916b4
JS
11805 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11806
11807 /*
11808 * A time out has occurred for the iocb. If a time out
11809 * completion handler has been supplied, call it. Otherwise,
11810 * just free the iocbq.
11811 */
11812
11813 spin_unlock_irqrestore(&phba->hbalock, iflags);
11814 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11815 cmdiocbq->wait_iocb_cmpl = NULL;
11816 if (cmdiocbq->iocb_cmpl)
11817 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11818 else
11819 lpfc_sli_release_iocbq(phba, cmdiocbq);
11820 return;
11821 }
11822
68876920
JSEC
11823 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11824 if (cmdiocbq->context2 && rspiocbq)
11825 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11826 &rspiocbq->iocb, sizeof(IOCB_t));
11827
0f65ff68
JS
11828 /* Set the exchange busy flag for task management commands */
11829 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11830 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11831 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11832 cur_iocbq);
11833 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11834 }
11835
68876920 11836 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
11837 if (pdone_q)
11838 wake_up(pdone_q);
858c9f6c 11839 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 11840 return;
11841}
11842
d11e31dd
JS
11843/**
11844 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11845 * @phba: Pointer to HBA context object..
11846 * @piocbq: Pointer to command iocb.
11847 * @flag: Flag to test.
11848 *
11849 * This routine grabs the hbalock and then test the iocb_flag to
11850 * see if the passed in flag is set.
11851 * Returns:
11852 * 1 if flag is set.
11853 * 0 if flag is not set.
11854 **/
11855static int
11856lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11857 struct lpfc_iocbq *piocbq, uint32_t flag)
11858{
11859 unsigned long iflags;
11860 int ret;
11861
11862 spin_lock_irqsave(&phba->hbalock, iflags);
11863 ret = piocbq->iocb_flag & flag;
11864 spin_unlock_irqrestore(&phba->hbalock, iflags);
11865 return ret;
11866
11867}
11868
e59058c4 11869/**
3621a710 11870 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
11871 * @phba: Pointer to HBA context object..
11872 * @pring: Pointer to sli ring.
11873 * @piocb: Pointer to command iocb.
11874 * @prspiocbq: Pointer to response iocb.
11875 * @timeout: Timeout in number of seconds.
11876 *
11877 * This function issues the iocb to firmware and waits for the
5a0916b4
JS
11878 * iocb to complete. The iocb_cmpl field of the shall be used
11879 * to handle iocbs which time out. If the field is NULL, the
11880 * function shall free the iocbq structure. If more clean up is
11881 * needed, the caller is expected to provide a completion function
11882 * that will provide the needed clean up. If the iocb command is
11883 * not completed within timeout seconds, the function will either
11884 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11885 * completion function set in the iocb_cmpl field and then return
11886 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11887 * resources if this function returns IOCB_TIMEDOUT.
e59058c4
JS
11888 * The function waits for the iocb completion using an
11889 * non-interruptible wait.
11890 * This function will sleep while waiting for iocb completion.
11891 * So, this function should not be called from any context which
11892 * does not allow sleeping. Due to the same reason, this function
11893 * cannot be called with interrupt disabled.
11894 * This function assumes that the iocb completions occur while
11895 * this function sleep. So, this function cannot be called from
11896 * the thread which process iocb completion for this ring.
11897 * This function clears the iocb_flag of the iocb object before
11898 * issuing the iocb and the iocb completion handler sets this
11899 * flag and wakes this thread when the iocb completes.
11900 * The contents of the response iocb will be copied to prspiocbq
11901 * by the completion handler when the command completes.
11902 * This function returns IOCB_SUCCESS when success.
11903 * This function is called with no lock held.
11904 **/
dea3101e 11905int
2e0fef85 11906lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 11907 uint32_t ring_number,
2e0fef85
JS
11908 struct lpfc_iocbq *piocb,
11909 struct lpfc_iocbq *prspiocbq,
68876920 11910 uint32_t timeout)
dea3101e 11911{
7259f0d0 11912 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
11913 long timeleft, timeout_req = 0;
11914 int retval = IOCB_SUCCESS;
875fbdfe 11915 uint32_t creg_val;
0e9bb8d7
JS
11916 struct lpfc_iocbq *iocb;
11917 int txq_cnt = 0;
11918 int txcmplq_cnt = 0;
895427bd 11919 struct lpfc_sli_ring *pring;
5a0916b4
JS
11920 unsigned long iflags;
11921 bool iocb_completed = true;
11922
895427bd
JS
11923 if (phba->sli_rev >= LPFC_SLI_REV4)
11924 pring = lpfc_sli4_calc_ring(phba, piocb);
11925 else
11926 pring = &phba->sli.sli3_ring[ring_number];
dea3101e 11927 /*
68876920
JSEC
11928 * If the caller has provided a response iocbq buffer, then context2
11929 * is NULL or its an error.
dea3101e 11930 */
68876920
JSEC
11931 if (prspiocbq) {
11932 if (piocb->context2)
11933 return IOCB_ERROR;
11934 piocb->context2 = prspiocbq;
dea3101e 11935 }
11936
5a0916b4 11937 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
68876920
JSEC
11938 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11939 piocb->context_un.wait_queue = &done_q;
5a0916b4 11940 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea3101e 11941
875fbdfe 11942 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
11943 if (lpfc_readl(phba->HCregaddr, &creg_val))
11944 return IOCB_ERROR;
875fbdfe
JSEC
11945 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11946 writel(creg_val, phba->HCregaddr);
11947 readl(phba->HCregaddr); /* flush */
11948 }
11949
2a9bf3d0
JS
11950 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11951 SLI_IOCB_RET_IOCB);
68876920 11952 if (retval == IOCB_SUCCESS) {
256ec0d0 11953 timeout_req = msecs_to_jiffies(timeout * 1000);
68876920 11954 timeleft = wait_event_timeout(done_q,
d11e31dd 11955 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 11956 timeout_req);
5a0916b4
JS
11957 spin_lock_irqsave(&phba->hbalock, iflags);
11958 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11959
11960 /*
11961 * IOCB timed out. Inform the wake iocb wait
11962 * completion function and set local status
11963 */
dea3101e 11964
5a0916b4
JS
11965 iocb_completed = false;
11966 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11967 }
11968 spin_unlock_irqrestore(&phba->hbalock, iflags);
11969 if (iocb_completed) {
7054a606 11970 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 11971 "0331 IOCB wake signaled\n");
53151bbb
JS
11972 /* Note: we are not indicating if the IOCB has a success
11973 * status or not - that's for the caller to check.
11974 * IOCB_SUCCESS means just that the command was sent and
11975 * completed. Not that it completed successfully.
11976 * */
7054a606 11977 } else if (timeleft == 0) {
68876920 11978 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11979 "0338 IOCB wait timeout error - no "
11980 "wake response Data x%x\n", timeout);
68876920 11981 retval = IOCB_TIMEDOUT;
7054a606 11982 } else {
68876920 11983 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
11984 "0330 IOCB wake NOT set, "
11985 "Data x%x x%lx\n",
68876920
JSEC
11986 timeout, (timeleft / jiffies));
11987 retval = IOCB_TIMEDOUT;
dea3101e 11988 }
2a9bf3d0 11989 } else if (retval == IOCB_BUSY) {
0e9bb8d7
JS
11990 if (phba->cfg_log_verbose & LOG_SLI) {
11991 list_for_each_entry(iocb, &pring->txq, list) {
11992 txq_cnt++;
11993 }
11994 list_for_each_entry(iocb, &pring->txcmplq, list) {
11995 txcmplq_cnt++;
11996 }
11997 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11998 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11999 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12000 }
2a9bf3d0 12001 return retval;
68876920
JSEC
12002 } else {
12003 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 12004 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 12005 retval);
68876920 12006 retval = IOCB_ERROR;
dea3101e 12007 }
12008
875fbdfe 12009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9940b97b
JS
12010 if (lpfc_readl(phba->HCregaddr, &creg_val))
12011 return IOCB_ERROR;
875fbdfe
JSEC
12012 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12013 writel(creg_val, phba->HCregaddr);
12014 readl(phba->HCregaddr); /* flush */
12015 }
12016
68876920
JSEC
12017 if (prspiocbq)
12018 piocb->context2 = NULL;
12019
12020 piocb->context_un.wait_queue = NULL;
12021 piocb->iocb_cmpl = NULL;
dea3101e 12022 return retval;
12023}
68876920 12024
e59058c4 12025/**
3621a710 12026 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
12027 * @phba: Pointer to HBA context object.
12028 * @pmboxq: Pointer to driver mailbox object.
12029 * @timeout: Timeout in number of seconds.
12030 *
12031 * This function issues the mailbox to firmware and waits for the
12032 * mailbox command to complete. If the mailbox command is not
12033 * completed within timeout seconds, it returns MBX_TIMEOUT.
12034 * The function waits for the mailbox completion using an
12035 * interruptible wait. If the thread is woken up due to a
12036 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12037 * should not free the mailbox resources, if this function returns
12038 * MBX_TIMEOUT.
12039 * This function will sleep while waiting for mailbox completion.
12040 * So, this function should not be called from any context which
12041 * does not allow sleeping. Due to the same reason, this function
12042 * cannot be called with interrupt disabled.
12043 * This function assumes that the mailbox completion occurs while
12044 * this function sleep. So, this function cannot be called from
12045 * the worker thread which processes mailbox completion.
12046 * This function is called in the context of HBA management
12047 * applications.
12048 * This function returns MBX_SUCCESS when successful.
12049 * This function is called with no lock held.
12050 **/
dea3101e 12051int
2e0fef85 12052lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e 12053 uint32_t timeout)
12054{
e29d74f8 12055 struct completion mbox_done;
dea3101e 12056 int retval;
858c9f6c 12057 unsigned long flag;
dea3101e 12058
495a714c 12059 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e 12060 /* setup wake call as IOCB callback */
12061 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
dea3101e 12062
e29d74f8
JS
12063 /* setup context3 field to pass wait_queue pointer to wake function */
12064 init_completion(&mbox_done);
12065 pmboxq->context3 = &mbox_done;
dea3101e 12066 /* now issue the command */
12067 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea3101e 12068 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
e29d74f8
JS
12069 wait_for_completion_timeout(&mbox_done,
12070 msecs_to_jiffies(timeout * 1000));
7054a606 12071
858c9f6c 12072 spin_lock_irqsave(&phba->hbalock, flag);
e29d74f8 12073 pmboxq->context3 = NULL;
7054a606
JS
12074 /*
12075 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12076 * else do not free the resources.
12077 */
d7c47992 12078 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea3101e 12079 retval = MBX_SUCCESS;
d7c47992 12080 } else {
7054a606 12081 retval = MBX_TIMEOUT;
858c9f6c
JS
12082 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12083 }
12084 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e 12085 }
dea3101e 12086 return retval;
12087}
12088
e59058c4 12089/**
3772a991 12090 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
12091 * @phba: Pointer to HBA context.
12092 *
3772a991
JS
12093 * This function is called to shutdown the driver's mailbox sub-system.
12094 * It first marks the mailbox sub-system is in a block state to prevent
12095 * the asynchronous mailbox command from issued off the pending mailbox
12096 * command queue. If the mailbox command sub-system shutdown is due to
12097 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12098 * the mailbox sub-system flush routine to forcefully bring down the
12099 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12100 * as with offline or HBA function reset), this routine will wait for the
12101 * outstanding mailbox command to complete before invoking the mailbox
12102 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 12103 **/
3772a991 12104void
618a5230 12105lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
b4c02652 12106{
3772a991 12107 struct lpfc_sli *psli = &phba->sli;
3772a991 12108 unsigned long timeout;
b4c02652 12109
618a5230
JS
12110 if (mbx_action == LPFC_MBX_NO_WAIT) {
12111 /* delay 100ms for port state */
12112 msleep(100);
12113 lpfc_sli_mbox_sys_flush(phba);
12114 return;
12115 }
a183a15f 12116 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
d7069f09 12117
523128e5
JS
12118 /* Disable softirqs, including timers from obtaining phba->hbalock */
12119 local_bh_disable();
12120
3772a991
JS
12121 spin_lock_irq(&phba->hbalock);
12122 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
b4c02652 12123
3772a991 12124 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3772a991
JS
12125 /* Determine how long we might wait for the active mailbox
12126 * command to be gracefully completed by firmware.
12127 */
a183a15f
JS
12128 if (phba->sli.mbox_active)
12129 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12130 phba->sli.mbox_active) *
12131 1000) + jiffies;
12132 spin_unlock_irq(&phba->hbalock);
12133
523128e5
JS
12134 /* Enable softirqs again, done with phba->hbalock */
12135 local_bh_enable();
12136
3772a991
JS
12137 while (phba->sli.mbox_active) {
12138 /* Check active mailbox complete status every 2ms */
12139 msleep(2);
12140 if (time_after(jiffies, timeout))
12141 /* Timeout, let the mailbox flush routine to
12142 * forcefully release active mailbox command
12143 */
12144 break;
12145 }
523128e5 12146 } else {
d7069f09
JS
12147 spin_unlock_irq(&phba->hbalock);
12148
523128e5
JS
12149 /* Enable softirqs again, done with phba->hbalock */
12150 local_bh_enable();
12151 }
12152
3772a991
JS
12153 lpfc_sli_mbox_sys_flush(phba);
12154}
ed957684 12155
3772a991
JS
12156/**
12157 * lpfc_sli_eratt_read - read sli-3 error attention events
12158 * @phba: Pointer to HBA context.
12159 *
12160 * This function is called to read the SLI3 device error attention registers
12161 * for possible error attention events. The caller must hold the hostlock
12162 * with spin_lock_irq().
12163 *
25985edc 12164 * This function returns 1 when there is Error Attention in the Host Attention
3772a991
JS
12165 * Register and returns 0 otherwise.
12166 **/
12167static int
12168lpfc_sli_eratt_read(struct lpfc_hba *phba)
12169{
12170 uint32_t ha_copy;
b4c02652 12171
3772a991 12172 /* Read chip Host Attention (HA) register */
9940b97b
JS
12173 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12174 goto unplug_err;
12175
3772a991
JS
12176 if (ha_copy & HA_ERATT) {
12177 /* Read host status register to retrieve error event */
9940b97b
JS
12178 if (lpfc_sli_read_hs(phba))
12179 goto unplug_err;
b4c02652 12180
3772a991
JS
12181 /* Check if there is a deferred error condition is active */
12182 if ((HS_FFER1 & phba->work_hs) &&
12183 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0 12184 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
3772a991 12185 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
12186 /* Clear all interrupt enable conditions */
12187 writel(0, phba->HCregaddr);
12188 readl(phba->HCregaddr);
12189 }
12190
12191 /* Set the driver HA work bitmap */
3772a991
JS
12192 phba->work_ha |= HA_ERATT;
12193 /* Indicate polling handles this ERATT */
12194 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
12195 return 1;
12196 }
12197 return 0;
9940b97b
JS
12198
12199unplug_err:
12200 /* Set the driver HS work bitmap */
12201 phba->work_hs |= UNPLUG_ERR;
12202 /* Set the driver HA work bitmap */
12203 phba->work_ha |= HA_ERATT;
12204 /* Indicate polling handles this ERATT */
12205 phba->hba_flag |= HBA_ERATT_HANDLED;
12206 return 1;
b4c02652
JS
12207}
12208
da0436e9
JS
12209/**
12210 * lpfc_sli4_eratt_read - read sli-4 error attention events
12211 * @phba: Pointer to HBA context.
12212 *
12213 * This function is called to read the SLI4 device error attention registers
12214 * for possible error attention events. The caller must hold the hostlock
12215 * with spin_lock_irq().
12216 *
25985edc 12217 * This function returns 1 when there is Error Attention in the Host Attention
da0436e9
JS
12218 * Register and returns 0 otherwise.
12219 **/
12220static int
12221lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12222{
12223 uint32_t uerr_sta_hi, uerr_sta_lo;
2fcee4bf
JS
12224 uint32_t if_type, portsmphr;
12225 struct lpfc_register portstat_reg;
da0436e9 12226
2fcee4bf
JS
12227 /*
12228 * For now, use the SLI4 device internal unrecoverable error
da0436e9
JS
12229 * registers for error attention. This can be changed later.
12230 */
2fcee4bf
JS
12231 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12232 switch (if_type) {
12233 case LPFC_SLI_INTF_IF_TYPE_0:
9940b97b
JS
12234 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12235 &uerr_sta_lo) ||
12236 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12237 &uerr_sta_hi)) {
12238 phba->work_hs |= UNPLUG_ERR;
12239 phba->work_ha |= HA_ERATT;
12240 phba->hba_flag |= HBA_ERATT_HANDLED;
12241 return 1;
12242 }
2fcee4bf
JS
12243 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12244 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12246 "1423 HBA Unrecoverable error: "
12247 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12248 "ue_mask_lo_reg=0x%x, "
12249 "ue_mask_hi_reg=0x%x\n",
12250 uerr_sta_lo, uerr_sta_hi,
12251 phba->sli4_hba.ue_mask_lo,
12252 phba->sli4_hba.ue_mask_hi);
12253 phba->work_status[0] = uerr_sta_lo;
12254 phba->work_status[1] = uerr_sta_hi;
12255 phba->work_ha |= HA_ERATT;
12256 phba->hba_flag |= HBA_ERATT_HANDLED;
12257 return 1;
12258 }
12259 break;
12260 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 12261 case LPFC_SLI_INTF_IF_TYPE_6:
9940b97b
JS
12262 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12263 &portstat_reg.word0) ||
12264 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12265 &portsmphr)){
12266 phba->work_hs |= UNPLUG_ERR;
12267 phba->work_ha |= HA_ERATT;
12268 phba->hba_flag |= HBA_ERATT_HANDLED;
12269 return 1;
12270 }
2fcee4bf
JS
12271 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12272 phba->work_status[0] =
12273 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12274 phba->work_status[1] =
12275 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2e90f4b5 12277 "2885 Port Status Event: "
2fcee4bf
JS
12278 "port status reg 0x%x, "
12279 "port smphr reg 0x%x, "
12280 "error 1=0x%x, error 2=0x%x\n",
12281 portstat_reg.word0,
12282 portsmphr,
12283 phba->work_status[0],
12284 phba->work_status[1]);
12285 phba->work_ha |= HA_ERATT;
12286 phba->hba_flag |= HBA_ERATT_HANDLED;
12287 return 1;
12288 }
12289 break;
12290 case LPFC_SLI_INTF_IF_TYPE_1:
12291 default:
a747c9ce 12292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
12293 "2886 HBA Error Attention on unsupported "
12294 "if type %d.", if_type);
a747c9ce 12295 return 1;
da0436e9 12296 }
2fcee4bf 12297
da0436e9
JS
12298 return 0;
12299}
12300
e59058c4 12301/**
3621a710 12302 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
12303 * @phba: Pointer to HBA context.
12304 *
3772a991 12305 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
12306 * error attention register bit for error attention events.
12307 *
25985edc 12308 * This function returns 1 when there is Error Attention in the Host Attention
9399627f
JS
12309 * Register and returns 0 otherwise.
12310 **/
12311int
12312lpfc_sli_check_eratt(struct lpfc_hba *phba)
12313{
12314 uint32_t ha_copy;
12315
12316 /* If somebody is waiting to handle an eratt, don't process it
12317 * here. The brdkill function will do this.
12318 */
12319 if (phba->link_flag & LS_IGNORE_ERATT)
12320 return 0;
12321
12322 /* Check if interrupt handler handles this ERATT */
12323 spin_lock_irq(&phba->hbalock);
12324 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12325 /* Interrupt handler has handled ERATT */
12326 spin_unlock_irq(&phba->hbalock);
12327 return 0;
12328 }
12329
a257bf90
JS
12330 /*
12331 * If there is deferred error attention, do not check for error
12332 * attention
12333 */
12334 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12335 spin_unlock_irq(&phba->hbalock);
12336 return 0;
12337 }
12338
3772a991
JS
12339 /* If PCI channel is offline, don't process it */
12340 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 12341 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12342 return 0;
12343 }
12344
12345 switch (phba->sli_rev) {
12346 case LPFC_SLI_REV2:
12347 case LPFC_SLI_REV3:
12348 /* Read chip Host Attention (HA) register */
12349 ha_copy = lpfc_sli_eratt_read(phba);
12350 break;
da0436e9 12351 case LPFC_SLI_REV4:
2fcee4bf 12352 /* Read device Uncoverable Error (UERR) registers */
da0436e9
JS
12353 ha_copy = lpfc_sli4_eratt_read(phba);
12354 break;
3772a991
JS
12355 default:
12356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12357 "0299 Invalid SLI revision (%d)\n",
12358 phba->sli_rev);
12359 ha_copy = 0;
12360 break;
9399627f
JS
12361 }
12362 spin_unlock_irq(&phba->hbalock);
3772a991
JS
12363
12364 return ha_copy;
12365}
12366
12367/**
12368 * lpfc_intr_state_check - Check device state for interrupt handling
12369 * @phba: Pointer to HBA context.
12370 *
12371 * This inline routine checks whether a device or its PCI slot is in a state
12372 * that the interrupt should be handled.
12373 *
12374 * This function returns 0 if the device or the PCI slot is in a state that
12375 * interrupt should be handled, otherwise -EIO.
12376 */
12377static inline int
12378lpfc_intr_state_check(struct lpfc_hba *phba)
12379{
12380 /* If the pci channel is offline, ignore all the interrupts */
12381 if (unlikely(pci_channel_offline(phba->pcidev)))
12382 return -EIO;
12383
12384 /* Update device level interrupt statistics */
12385 phba->sli.slistat.sli_intr++;
12386
12387 /* Ignore all interrupts during initialization. */
12388 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12389 return -EIO;
12390
9399627f
JS
12391 return 0;
12392}
12393
12394/**
3772a991 12395 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
12396 * @irq: Interrupt number.
12397 * @dev_id: The device context pointer.
12398 *
9399627f 12399 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12400 * service routine when device with SLI-3 interface spec is enabled with
12401 * MSI-X multi-message interrupt mode and there are slow-path events in
12402 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12403 * interrupt mode, this function is called as part of the device-level
12404 * interrupt handler. When the PCI slot is in error recovery or the HBA
12405 * is undergoing initialization, the interrupt handler will not process
12406 * the interrupt. The link attention and ELS ring attention events are
12407 * handled by the worker thread. The interrupt handler signals the worker
12408 * thread and returns for these events. This function is called without
12409 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
12410 * structures.
12411 *
12412 * This function returns IRQ_HANDLED when interrupt is handled else it
12413 * returns IRQ_NONE.
e59058c4 12414 **/
dea3101e 12415irqreturn_t
3772a991 12416lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 12417{
2e0fef85 12418 struct lpfc_hba *phba;
a747c9ce 12419 uint32_t ha_copy, hc_copy;
dea3101e 12420 uint32_t work_ha_copy;
12421 unsigned long status;
5b75da2f 12422 unsigned long iflag;
dea3101e 12423 uint32_t control;
12424
92d7f7b0 12425 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
12426 struct lpfc_vport *vport;
12427 struct lpfc_nodelist *ndlp;
12428 struct lpfc_dmabuf *mp;
92d7f7b0
JS
12429 LPFC_MBOXQ_t *pmb;
12430 int rc;
12431
dea3101e 12432 /*
12433 * Get the driver's phba structure from the dev_id and
12434 * assume the HBA is not interrupting.
12435 */
9399627f 12436 phba = (struct lpfc_hba *)dev_id;
dea3101e 12437
12438 if (unlikely(!phba))
12439 return IRQ_NONE;
12440
dea3101e 12441 /*
9399627f
JS
12442 * Stuff needs to be attented to when this function is invoked as an
12443 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 12444 */
9399627f 12445 if (phba->intr_type == MSIX) {
3772a991
JS
12446 /* Check device state for handling interrupt */
12447 if (lpfc_intr_state_check(phba))
9399627f
JS
12448 return IRQ_NONE;
12449 /* Need to read HA REG for slow-path events */
5b75da2f 12450 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12451 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12452 goto unplug_error;
9399627f
JS
12453 /* If somebody is waiting to handle an eratt don't process it
12454 * here. The brdkill function will do this.
12455 */
12456 if (phba->link_flag & LS_IGNORE_ERATT)
12457 ha_copy &= ~HA_ERATT;
12458 /* Check the need for handling ERATT in interrupt handler */
12459 if (ha_copy & HA_ERATT) {
12460 if (phba->hba_flag & HBA_ERATT_HANDLED)
12461 /* ERATT polling has handled ERATT */
12462 ha_copy &= ~HA_ERATT;
12463 else
12464 /* Indicate interrupt handler handles ERATT */
12465 phba->hba_flag |= HBA_ERATT_HANDLED;
12466 }
a257bf90
JS
12467
12468 /*
12469 * If there is deferred error attention, do not check for any
12470 * interrupt.
12471 */
12472 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12473 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12474 return IRQ_NONE;
12475 }
12476
9399627f 12477 /* Clear up only attention source related to slow-path */
9940b97b
JS
12478 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12479 goto unplug_error;
12480
a747c9ce
JS
12481 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12482 HC_LAINT_ENA | HC_ERINT_ENA),
12483 phba->HCregaddr);
9399627f
JS
12484 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12485 phba->HAregaddr);
a747c9ce 12486 writel(hc_copy, phba->HCregaddr);
9399627f 12487 readl(phba->HAregaddr); /* flush */
5b75da2f 12488 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12489 } else
12490 ha_copy = phba->ha_copy;
dea3101e 12491
dea3101e 12492 work_ha_copy = ha_copy & phba->work_ha_mask;
12493
9399627f 12494 if (work_ha_copy) {
dea3101e 12495 if (work_ha_copy & HA_LATT) {
12496 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12497 /*
12498 * Turn off Link Attention interrupts
12499 * until CLEAR_LA done
12500 */
5b75da2f 12501 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12502 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
9940b97b
JS
12503 if (lpfc_readl(phba->HCregaddr, &control))
12504 goto unplug_error;
dea3101e 12505 control &= ~HC_LAINT_ENA;
12506 writel(control, phba->HCregaddr);
12507 readl(phba->HCregaddr); /* flush */
5b75da2f 12508 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12509 }
12510 else
12511 work_ha_copy &= ~HA_LATT;
12512 }
12513
9399627f 12514 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
12515 /*
12516 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12517 * the only slow ring.
12518 */
12519 status = (work_ha_copy &
12520 (HA_RXMASK << (4*LPFC_ELS_RING)));
12521 status >>= (4*LPFC_ELS_RING);
12522 if (status & HA_RXMASK) {
5b75da2f 12523 spin_lock_irqsave(&phba->hbalock, iflag);
9940b97b
JS
12524 if (lpfc_readl(phba->HCregaddr, &control))
12525 goto unplug_error;
a58cbd52
JS
12526
12527 lpfc_debugfs_slow_ring_trc(phba,
12528 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12529 control, status,
12530 (uint32_t)phba->sli.slistat.sli_intr);
12531
858c9f6c 12532 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
12533 lpfc_debugfs_slow_ring_trc(phba,
12534 "ISR Disable ring:"
12535 "pwork:x%x hawork:x%x wait:x%x",
12536 phba->work_ha, work_ha_copy,
12537 (uint32_t)((unsigned long)
5e9d9b82 12538 &phba->work_waitq));
a58cbd52 12539
858c9f6c
JS
12540 control &=
12541 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 12542 writel(control, phba->HCregaddr);
12543 readl(phba->HCregaddr); /* flush */
dea3101e 12544 }
a58cbd52
JS
12545 else {
12546 lpfc_debugfs_slow_ring_trc(phba,
12547 "ISR slow ring: pwork:"
12548 "x%x hawork:x%x wait:x%x",
12549 phba->work_ha, work_ha_copy,
12550 (uint32_t)((unsigned long)
5e9d9b82 12551 &phba->work_waitq));
a58cbd52 12552 }
5b75da2f 12553 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e 12554 }
12555 }
5b75da2f 12556 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 12557 if (work_ha_copy & HA_ERATT) {
9940b97b
JS
12558 if (lpfc_sli_read_hs(phba))
12559 goto unplug_error;
a257bf90
JS
12560 /*
12561 * Check if there is a deferred error condition
12562 * is active
12563 */
12564 if ((HS_FFER1 & phba->work_hs) &&
12565 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
dcf2a4e0
JS
12566 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12567 phba->work_hs)) {
a257bf90
JS
12568 phba->hba_flag |= DEFER_ERATT;
12569 /* Clear all interrupt enable conditions */
12570 writel(0, phba->HCregaddr);
12571 readl(phba->HCregaddr);
12572 }
12573 }
12574
9399627f 12575 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 12576 pmb = phba->sli.mbox_active;
04c68496 12577 pmbox = &pmb->u.mb;
34b02dcd 12578 mbox = phba->mbox;
858c9f6c 12579 vport = pmb->vport;
92d7f7b0
JS
12580
12581 /* First check out the status word */
12582 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12583 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 12584 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
12585 /*
12586 * Stray Mailbox Interrupt, mbxCommand <cmd>
12587 * mbxStatus <status>
12588 */
09372820 12589 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 12590 LOG_SLI,
e8b62011 12591 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
12592 "Interrupt mbxCommand x%x "
12593 "mbxStatus x%x\n",
e8b62011 12594 (vport ? vport->vpi : 0),
92d7f7b0
JS
12595 pmbox->mbxCommand,
12596 pmbox->mbxStatus);
09372820
JS
12597 /* clear mailbox attention bit */
12598 work_ha_copy &= ~HA_MBATT;
12599 } else {
97eab634 12600 phba->sli.mbox_active = NULL;
5b75da2f 12601 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
12602 phba->last_completion_time = jiffies;
12603 del_timer(&phba->sli.mbox_tmo);
09372820
JS
12604 if (pmb->mbox_cmpl) {
12605 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12606 MAILBOX_CMD_SIZE);
7a470277 12607 if (pmb->out_ext_byte_len &&
3e1f0718 12608 pmb->ctx_buf)
7a470277
JS
12609 lpfc_sli_pcimem_bcopy(
12610 phba->mbox_ext,
3e1f0718 12611 pmb->ctx_buf,
7a470277 12612 pmb->out_ext_byte_len);
09372820
JS
12613 }
12614 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12615 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12616
12617 lpfc_debugfs_disc_trc(vport,
12618 LPFC_DISC_TRC_MBOX_VPORT,
12619 "MBOX dflt rpi: : "
12620 "status:x%x rpi:x%x",
12621 (uint32_t)pmbox->mbxStatus,
12622 pmbox->un.varWords[0], 0);
12623
12624 if (!pmbox->mbxStatus) {
12625 mp = (struct lpfc_dmabuf *)
3e1f0718 12626 (pmb->ctx_buf);
09372820 12627 ndlp = (struct lpfc_nodelist *)
3e1f0718 12628 pmb->ctx_ndlp;
09372820
JS
12629
12630 /* Reg_LOGIN of dflt RPI was
12631 * successful. new lets get
12632 * rid of the RPI using the
12633 * same mbox buffer.
12634 */
12635 lpfc_unreg_login(phba,
12636 vport->vpi,
12637 pmbox->un.varWords[0],
12638 pmb);
12639 pmb->mbox_cmpl =
12640 lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
12641 pmb->ctx_buf = mp;
12642 pmb->ctx_ndlp = ndlp;
09372820 12643 pmb->vport = vport;
58da1ffb
JS
12644 rc = lpfc_sli_issue_mbox(phba,
12645 pmb,
12646 MBX_NOWAIT);
12647 if (rc != MBX_BUSY)
12648 lpfc_printf_log(phba,
12649 KERN_ERR,
12650 LOG_MBOX | LOG_SLI,
d7c255b2 12651 "0350 rc should have"
6a9c52cf 12652 "been MBX_BUSY\n");
3772a991
JS
12653 if (rc != MBX_NOT_FINISHED)
12654 goto send_current_mbox;
09372820 12655 }
858c9f6c 12656 }
5b75da2f
JS
12657 spin_lock_irqsave(
12658 &phba->pport->work_port_lock,
12659 iflag);
09372820
JS
12660 phba->pport->work_port_events &=
12661 ~WORKER_MBOX_TMO;
5b75da2f
JS
12662 spin_unlock_irqrestore(
12663 &phba->pport->work_port_lock,
12664 iflag);
09372820 12665 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 12666 }
97eab634 12667 } else
5b75da2f 12668 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 12669
92d7f7b0
JS
12670 if ((work_ha_copy & HA_MBATT) &&
12671 (phba->sli.mbox_active == NULL)) {
858c9f6c 12672send_current_mbox:
92d7f7b0 12673 /* Process next mailbox command if there is one */
58da1ffb
JS
12674 do {
12675 rc = lpfc_sli_issue_mbox(phba, NULL,
12676 MBX_NOWAIT);
12677 } while (rc == MBX_NOT_FINISHED);
12678 if (rc != MBX_SUCCESS)
12679 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12680 LOG_SLI, "0349 rc should be "
6a9c52cf 12681 "MBX_SUCCESS\n");
92d7f7b0
JS
12682 }
12683
5b75da2f 12684 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 12685 phba->work_ha |= work_ha_copy;
5b75da2f 12686 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 12687 lpfc_worker_wake_up(phba);
dea3101e 12688 }
9399627f 12689 return IRQ_HANDLED;
9940b97b
JS
12690unplug_error:
12691 spin_unlock_irqrestore(&phba->hbalock, iflag);
12692 return IRQ_HANDLED;
dea3101e 12693
3772a991 12694} /* lpfc_sli_sp_intr_handler */
9399627f
JS
12695
12696/**
3772a991 12697 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
12698 * @irq: Interrupt number.
12699 * @dev_id: The device context pointer.
12700 *
12701 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
12702 * service routine when device with SLI-3 interface spec is enabled with
12703 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12704 * ring event in the HBA. However, when the device is enabled with either
12705 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12706 * device-level interrupt handler. When the PCI slot is in error recovery
12707 * or the HBA is undergoing initialization, the interrupt handler will not
12708 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12709 * the intrrupt context. This function is called without any lock held.
12710 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
12711 *
12712 * This function returns IRQ_HANDLED when interrupt is handled else it
12713 * returns IRQ_NONE.
12714 **/
12715irqreturn_t
3772a991 12716lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
12717{
12718 struct lpfc_hba *phba;
12719 uint32_t ha_copy;
12720 unsigned long status;
5b75da2f 12721 unsigned long iflag;
895427bd 12722 struct lpfc_sli_ring *pring;
9399627f
JS
12723
12724 /* Get the driver's phba structure from the dev_id and
12725 * assume the HBA is not interrupting.
12726 */
12727 phba = (struct lpfc_hba *) dev_id;
12728
12729 if (unlikely(!phba))
12730 return IRQ_NONE;
12731
12732 /*
12733 * Stuff needs to be attented to when this function is invoked as an
12734 * individual interrupt handler in MSI-X multi-message interrupt mode
12735 */
12736 if (phba->intr_type == MSIX) {
3772a991
JS
12737 /* Check device state for handling interrupt */
12738 if (lpfc_intr_state_check(phba))
9399627f
JS
12739 return IRQ_NONE;
12740 /* Need to read HA REG for FCP ring and other ring events */
9940b97b
JS
12741 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12742 return IRQ_HANDLED;
9399627f 12743 /* Clear up only attention source related to fast-path */
5b75da2f 12744 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
12745 /*
12746 * If there is deferred error attention, do not check for
12747 * any interrupt.
12748 */
12749 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 12750 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
12751 return IRQ_NONE;
12752 }
9399627f
JS
12753 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12754 phba->HAregaddr);
12755 readl(phba->HAregaddr); /* flush */
5b75da2f 12756 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
12757 } else
12758 ha_copy = phba->ha_copy;
dea3101e 12759
12760 /*
9399627f 12761 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 12762 */
9399627f
JS
12763 ha_copy &= ~(phba->work_ha_mask);
12764
12765 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 12766 status >>= (4*LPFC_FCP_RING);
895427bd 12767 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
858c9f6c 12768 if (status & HA_RXMASK)
895427bd 12769 lpfc_sli_handle_fast_ring_event(phba, pring, status);
a4bc3379
JS
12770
12771 if (phba->cfg_multi_ring_support == 2) {
12772 /*
9399627f
JS
12773 * Process all events on extra ring. Take the optimized path
12774 * for extra ring IO.
a4bc3379 12775 */
9399627f 12776 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 12777 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 12778 if (status & HA_RXMASK) {
a4bc3379 12779 lpfc_sli_handle_fast_ring_event(phba,
895427bd 12780 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
a4bc3379
JS
12781 status);
12782 }
12783 }
dea3101e 12784 return IRQ_HANDLED;
3772a991 12785} /* lpfc_sli_fp_intr_handler */
9399627f
JS
12786
12787/**
3772a991 12788 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
12789 * @irq: Interrupt number.
12790 * @dev_id: The device context pointer.
12791 *
3772a991
JS
12792 * This function is the HBA device-level interrupt handler to device with
12793 * SLI-3 interface spec, called from the PCI layer when either MSI or
12794 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12795 * requires driver attention. This function invokes the slow-path interrupt
12796 * attention handling function and fast-path interrupt attention handling
12797 * function in turn to process the relevant HBA attention events. This
12798 * function is called without any lock held. It gets the hbalock to access
12799 * and update SLI data structures.
9399627f
JS
12800 *
12801 * This function returns IRQ_HANDLED when interrupt is handled, else it
12802 * returns IRQ_NONE.
12803 **/
12804irqreturn_t
3772a991 12805lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
12806{
12807 struct lpfc_hba *phba;
12808 irqreturn_t sp_irq_rc, fp_irq_rc;
12809 unsigned long status1, status2;
a747c9ce 12810 uint32_t hc_copy;
9399627f
JS
12811
12812 /*
12813 * Get the driver's phba structure from the dev_id and
12814 * assume the HBA is not interrupting.
12815 */
12816 phba = (struct lpfc_hba *) dev_id;
12817
12818 if (unlikely(!phba))
12819 return IRQ_NONE;
12820
3772a991
JS
12821 /* Check device state for handling interrupt */
12822 if (lpfc_intr_state_check(phba))
9399627f
JS
12823 return IRQ_NONE;
12824
12825 spin_lock(&phba->hbalock);
9940b97b
JS
12826 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12827 spin_unlock(&phba->hbalock);
12828 return IRQ_HANDLED;
12829 }
12830
9399627f
JS
12831 if (unlikely(!phba->ha_copy)) {
12832 spin_unlock(&phba->hbalock);
12833 return IRQ_NONE;
12834 } else if (phba->ha_copy & HA_ERATT) {
12835 if (phba->hba_flag & HBA_ERATT_HANDLED)
12836 /* ERATT polling has handled ERATT */
12837 phba->ha_copy &= ~HA_ERATT;
12838 else
12839 /* Indicate interrupt handler handles ERATT */
12840 phba->hba_flag |= HBA_ERATT_HANDLED;
12841 }
12842
a257bf90
JS
12843 /*
12844 * If there is deferred error attention, do not check for any interrupt.
12845 */
12846 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
ec21b3b0 12847 spin_unlock(&phba->hbalock);
a257bf90
JS
12848 return IRQ_NONE;
12849 }
12850
9399627f 12851 /* Clear attention sources except link and error attentions */
9940b97b
JS
12852 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12853 spin_unlock(&phba->hbalock);
12854 return IRQ_HANDLED;
12855 }
a747c9ce
JS
12856 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12857 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12858 phba->HCregaddr);
9399627f 12859 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 12860 writel(hc_copy, phba->HCregaddr);
9399627f
JS
12861 readl(phba->HAregaddr); /* flush */
12862 spin_unlock(&phba->hbalock);
12863
12864 /*
12865 * Invokes slow-path host attention interrupt handling as appropriate.
12866 */
12867
12868 /* status of events with mailbox and link attention */
12869 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12870
12871 /* status of events with ELS ring */
12872 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12873 status2 >>= (4*LPFC_ELS_RING);
12874
12875 if (status1 || (status2 & HA_RXMASK))
3772a991 12876 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
12877 else
12878 sp_irq_rc = IRQ_NONE;
12879
12880 /*
12881 * Invoke fast-path host attention interrupt handling as appropriate.
12882 */
12883
12884 /* status of events with FCP ring */
12885 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12886 status1 >>= (4*LPFC_FCP_RING);
12887
12888 /* status of events with extra ring */
12889 if (phba->cfg_multi_ring_support == 2) {
12890 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12891 status2 >>= (4*LPFC_EXTRA_RING);
12892 } else
12893 status2 = 0;
12894
12895 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 12896 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
12897 else
12898 fp_irq_rc = IRQ_NONE;
dea3101e 12899
9399627f
JS
12900 /* Return device-level interrupt handling status */
12901 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 12902} /* lpfc_sli_intr_handler */
4f774513
JS
12903
12904/**
12905 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12906 * @phba: pointer to lpfc hba data structure.
12907 *
12908 * This routine is invoked by the worker thread to process all the pending
12909 * SLI4 FCP abort XRI events.
12910 **/
12911void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12912{
12913 struct lpfc_cq_event *cq_event;
12914
12915 /* First, declare the fcp xri abort event has been handled */
12916 spin_lock_irq(&phba->hbalock);
12917 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12918 spin_unlock_irq(&phba->hbalock);
12919 /* Now, handle all the fcp xri abort events */
12920 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12921 /* Get the first event from the head of the event queue */
12922 spin_lock_irq(&phba->hbalock);
12923 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12924 cq_event, struct lpfc_cq_event, list);
12925 spin_unlock_irq(&phba->hbalock);
12926 /* Notify aborted XRI for FCP work queue */
12927 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12928 /* Free the event processed back to the free pool */
12929 lpfc_sli4_cq_event_release(phba, cq_event);
12930 }
12931}
12932
12933/**
12934 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12935 * @phba: pointer to lpfc hba data structure.
12936 *
12937 * This routine is invoked by the worker thread to process all the pending
12938 * SLI4 els abort xri events.
12939 **/
12940void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12941{
12942 struct lpfc_cq_event *cq_event;
12943
12944 /* First, declare the els xri abort event has been handled */
12945 spin_lock_irq(&phba->hbalock);
12946 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12947 spin_unlock_irq(&phba->hbalock);
12948 /* Now, handle all the els xri abort events */
12949 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12950 /* Get the first event from the head of the event queue */
12951 spin_lock_irq(&phba->hbalock);
12952 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12953 cq_event, struct lpfc_cq_event, list);
12954 spin_unlock_irq(&phba->hbalock);
12955 /* Notify aborted XRI for ELS work queue */
12956 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12957 /* Free the event processed back to the free pool */
12958 lpfc_sli4_cq_event_release(phba, cq_event);
12959 }
12960}
12961
341af102
JS
12962/**
12963 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12964 * @phba: pointer to lpfc hba data structure
12965 * @pIocbIn: pointer to the rspiocbq
12966 * @pIocbOut: pointer to the cmdiocbq
12967 * @wcqe: pointer to the complete wcqe
12968 *
12969 * This routine transfers the fields of a command iocbq to a response iocbq
12970 * by copying all the IOCB fields from command iocbq and transferring the
12971 * completion status information from the complete wcqe.
12972 **/
4f774513 12973static void
341af102
JS
12974lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12975 struct lpfc_iocbq *pIocbIn,
4f774513
JS
12976 struct lpfc_iocbq *pIocbOut,
12977 struct lpfc_wcqe_complete *wcqe)
12978{
af22741c 12979 int numBdes, i;
341af102 12980 unsigned long iflags;
af22741c
JS
12981 uint32_t status, max_response;
12982 struct lpfc_dmabuf *dmabuf;
12983 struct ulp_bde64 *bpl, bde;
4f774513
JS
12984 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12985
12986 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12987 sizeof(struct lpfc_iocbq) - offset);
4f774513 12988 /* Map WCQE parameters into irspiocb parameters */
acd6859b
JS
12989 status = bf_get(lpfc_wcqe_c_status, wcqe);
12990 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
4f774513
JS
12991 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12992 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12993 pIocbIn->iocb.un.fcpi.fcpi_parm =
12994 pIocbOut->iocb.un.fcpi.fcpi_parm -
12995 wcqe->total_data_placed;
12996 else
12997 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
695a814e 12998 else {
4f774513 12999 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
af22741c
JS
13000 switch (pIocbOut->iocb.ulpCommand) {
13001 case CMD_ELS_REQUEST64_CR:
13002 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13003 bpl = (struct ulp_bde64 *)dmabuf->virt;
13004 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13005 max_response = bde.tus.f.bdeSize;
13006 break;
13007 case CMD_GEN_REQUEST64_CR:
13008 max_response = 0;
13009 if (!pIocbOut->context3)
13010 break;
13011 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13012 sizeof(struct ulp_bde64);
13013 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13014 bpl = (struct ulp_bde64 *)dmabuf->virt;
13015 for (i = 0; i < numBdes; i++) {
13016 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13017 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13018 max_response += bde.tus.f.bdeSize;
13019 }
13020 break;
13021 default:
13022 max_response = wcqe->total_data_placed;
13023 break;
13024 }
13025 if (max_response < wcqe->total_data_placed)
13026 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13027 else
13028 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13029 wcqe->total_data_placed;
695a814e 13030 }
341af102 13031
acd6859b
JS
13032 /* Convert BG errors for completion status */
13033 if (status == CQE_STATUS_DI_ERROR) {
13034 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13035
13036 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13037 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13038 else
13039 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13040
13041 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13042 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13043 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13044 BGS_GUARD_ERR_MASK;
13045 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13046 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13047 BGS_APPTAG_ERR_MASK;
13048 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13049 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13050 BGS_REFTAG_ERR_MASK;
13051
13052 /* Check to see if there was any good data before the error */
13053 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13054 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13055 BGS_HI_WATER_MARK_PRESENT_MASK;
13056 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13057 wcqe->total_data_placed;
13058 }
13059
13060 /*
13061 * Set ALL the error bits to indicate we don't know what
13062 * type of error it is.
13063 */
13064 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13065 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13066 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13067 BGS_GUARD_ERR_MASK);
13068 }
13069
341af102
JS
13070 /* Pick up HBA exchange busy condition */
13071 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13072 spin_lock_irqsave(&phba->hbalock, iflags);
13073 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13074 spin_unlock_irqrestore(&phba->hbalock, iflags);
13075 }
4f774513
JS
13076}
13077
45ed1190
JS
13078/**
13079 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13080 * @phba: Pointer to HBA context object.
13081 * @wcqe: Pointer to work-queue completion queue entry.
13082 *
13083 * This routine handles an ELS work-queue completion event and construct
13084 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13085 * discovery engine to handle.
13086 *
13087 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13088 **/
13089static struct lpfc_iocbq *
13090lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13091 struct lpfc_iocbq *irspiocbq)
13092{
895427bd 13093 struct lpfc_sli_ring *pring;
45ed1190
JS
13094 struct lpfc_iocbq *cmdiocbq;
13095 struct lpfc_wcqe_complete *wcqe;
13096 unsigned long iflags;
13097
895427bd 13098 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
13099 if (unlikely(!pring))
13100 return NULL;
895427bd 13101
45ed1190 13102 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
7e56aa25 13103 spin_lock_irqsave(&pring->ring_lock, iflags);
45ed1190
JS
13104 pring->stats.iocb_event++;
13105 /* Look up the ELS command IOCB and create pseudo response IOCB */
13106 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13107 bf_get(lpfc_wcqe_c_request_tag, wcqe));
45ed1190 13108 if (unlikely(!cmdiocbq)) {
401bb416 13109 spin_unlock_irqrestore(&pring->ring_lock, iflags);
45ed1190
JS
13110 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13111 "0386 ELS complete with no corresponding "
401bb416
DK
13112 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13113 wcqe->word0, wcqe->total_data_placed,
13114 wcqe->parameter, wcqe->word3);
45ed1190
JS
13115 lpfc_sli_release_iocbq(phba, irspiocbq);
13116 return NULL;
13117 }
13118
401bb416
DK
13119 /* Put the iocb back on the txcmplq */
13120 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13121 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13122
45ed1190 13123 /* Fake the irspiocbq and copy necessary response information */
341af102 13124 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
45ed1190
JS
13125
13126 return irspiocbq;
13127}
13128
8a5ca109
JS
13129inline struct lpfc_cq_event *
13130lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13131{
13132 struct lpfc_cq_event *cq_event;
13133
13134 /* Allocate a new internal CQ_EVENT entry */
13135 cq_event = lpfc_sli4_cq_event_alloc(phba);
13136 if (!cq_event) {
13137 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13138 "0602 Failed to alloc CQ_EVENT entry\n");
13139 return NULL;
13140 }
13141
13142 /* Move the CQE into the event */
13143 memcpy(&cq_event->cqe, entry, size);
13144 return cq_event;
13145}
13146
04c68496
JS
13147/**
13148 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13149 * @phba: Pointer to HBA context object.
13150 * @cqe: Pointer to mailbox completion queue entry.
13151 *
13152 * This routine process a mailbox completion queue entry with asynchrous
13153 * event.
13154 *
13155 * Return: true if work posted to worker thread, otherwise false.
13156 **/
13157static bool
13158lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13159{
13160 struct lpfc_cq_event *cq_event;
13161 unsigned long iflags;
13162
13163 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13164 "0392 Async Event: word0:x%x, word1:x%x, "
13165 "word2:x%x, word3:x%x\n", mcqe->word0,
13166 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13167
8a5ca109
JS
13168 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13169 if (!cq_event)
04c68496 13170 return false;
04c68496
JS
13171 spin_lock_irqsave(&phba->hbalock, iflags);
13172 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13173 /* Set the async event flag */
13174 phba->hba_flag |= ASYNC_EVENT;
13175 spin_unlock_irqrestore(&phba->hbalock, iflags);
13176
13177 return true;
13178}
13179
13180/**
13181 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13182 * @phba: Pointer to HBA context object.
13183 * @cqe: Pointer to mailbox completion queue entry.
13184 *
13185 * This routine process a mailbox completion queue entry with mailbox
13186 * completion event.
13187 *
13188 * Return: true if work posted to worker thread, otherwise false.
13189 **/
13190static bool
13191lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13192{
13193 uint32_t mcqe_status;
13194 MAILBOX_t *mbox, *pmbox;
13195 struct lpfc_mqe *mqe;
13196 struct lpfc_vport *vport;
13197 struct lpfc_nodelist *ndlp;
13198 struct lpfc_dmabuf *mp;
13199 unsigned long iflags;
13200 LPFC_MBOXQ_t *pmb;
13201 bool workposted = false;
13202 int rc;
13203
13204 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13205 if (!bf_get(lpfc_trailer_completed, mcqe))
13206 goto out_no_mqe_complete;
13207
13208 /* Get the reference to the active mbox command */
13209 spin_lock_irqsave(&phba->hbalock, iflags);
13210 pmb = phba->sli.mbox_active;
13211 if (unlikely(!pmb)) {
13212 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13213 "1832 No pending MBOX command to handle\n");
13214 spin_unlock_irqrestore(&phba->hbalock, iflags);
13215 goto out_no_mqe_complete;
13216 }
13217 spin_unlock_irqrestore(&phba->hbalock, iflags);
13218 mqe = &pmb->u.mqe;
13219 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13220 mbox = phba->mbox;
13221 vport = pmb->vport;
13222
13223 /* Reset heartbeat timer */
13224 phba->last_completion_time = jiffies;
13225 del_timer(&phba->sli.mbox_tmo);
13226
13227 /* Move mbox data to caller's mailbox region, do endian swapping */
13228 if (pmb->mbox_cmpl && mbox)
48f8fdb4 13229 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
04c68496 13230
73d91e50
JS
13231 /*
13232 * For mcqe errors, conditionally move a modified error code to
13233 * the mbox so that the error will not be missed.
13234 */
13235 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13236 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13237 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13238 bf_set(lpfc_mqe_status, mqe,
13239 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13240 }
04c68496
JS
13241 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13242 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13243 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13244 "MBOX dflt rpi: status:x%x rpi:x%x",
13245 mcqe_status,
13246 pmbox->un.varWords[0], 0);
13247 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
3e1f0718
JS
13248 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13249 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
04c68496
JS
13250 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13251 * RID of the PPI using the same mbox buffer.
13252 */
13253 lpfc_unreg_login(phba, vport->vpi,
13254 pmbox->un.varWords[0], pmb);
13255 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3e1f0718
JS
13256 pmb->ctx_buf = mp;
13257 pmb->ctx_ndlp = ndlp;
04c68496
JS
13258 pmb->vport = vport;
13259 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13260 if (rc != MBX_BUSY)
13261 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13262 LOG_SLI, "0385 rc should "
13263 "have been MBX_BUSY\n");
13264 if (rc != MBX_NOT_FINISHED)
13265 goto send_current_mbox;
13266 }
13267 }
13268 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13269 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13270 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13271
13272 /* There is mailbox completion work to do */
13273 spin_lock_irqsave(&phba->hbalock, iflags);
13274 __lpfc_mbox_cmpl_put(phba, pmb);
13275 phba->work_ha |= HA_MBATT;
13276 spin_unlock_irqrestore(&phba->hbalock, iflags);
13277 workposted = true;
13278
13279send_current_mbox:
13280 spin_lock_irqsave(&phba->hbalock, iflags);
13281 /* Release the mailbox command posting token */
13282 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13283 /* Setting active mailbox pointer need to be in sync to flag clear */
13284 phba->sli.mbox_active = NULL;
13285 spin_unlock_irqrestore(&phba->hbalock, iflags);
13286 /* Wake up worker thread to post the next pending mailbox command */
13287 lpfc_worker_wake_up(phba);
13288out_no_mqe_complete:
13289 if (bf_get(lpfc_trailer_consumed, mcqe))
13290 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13291 return workposted;
13292}
13293
13294/**
13295 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13296 * @phba: Pointer to HBA context object.
13297 * @cqe: Pointer to mailbox completion queue entry.
13298 *
13299 * This routine process a mailbox completion queue entry, it invokes the
13300 * proper mailbox complete handling or asynchrous event handling routine
13301 * according to the MCQE's async bit.
13302 *
13303 * Return: true if work posted to worker thread, otherwise false.
13304 **/
13305static bool
13306lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
13307{
13308 struct lpfc_mcqe mcqe;
13309 bool workposted;
13310
13311 /* Copy the mailbox MCQE and convert endian order as needed */
48f8fdb4 13312 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
04c68496
JS
13313
13314 /* Invoke the proper event handling routine */
13315 if (!bf_get(lpfc_trailer_async, &mcqe))
13316 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13317 else
13318 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13319 return workposted;
13320}
13321
4f774513
JS
13322/**
13323 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13324 * @phba: Pointer to HBA context object.
2a76a283 13325 * @cq: Pointer to associated CQ
4f774513
JS
13326 * @wcqe: Pointer to work-queue completion queue entry.
13327 *
13328 * This routine handles an ELS work-queue completion event.
13329 *
13330 * Return: true if work posted to worker thread, otherwise false.
13331 **/
13332static bool
2a76a283 13333lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13334 struct lpfc_wcqe_complete *wcqe)
13335{
4f774513
JS
13336 struct lpfc_iocbq *irspiocbq;
13337 unsigned long iflags;
2a76a283 13338 struct lpfc_sli_ring *pring = cq->pring;
0e9bb8d7
JS
13339 int txq_cnt = 0;
13340 int txcmplq_cnt = 0;
13341 int fcp_txcmplq_cnt = 0;
4f774513 13342
11f0e34f
JS
13343 /* Check for response status */
13344 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13345 /* Log the error status */
13346 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13347 "0357 ELS CQE error: status=x%x: "
13348 "CQE: %08x %08x %08x %08x\n",
13349 bf_get(lpfc_wcqe_c_status, wcqe),
13350 wcqe->word0, wcqe->total_data_placed,
13351 wcqe->parameter, wcqe->word3);
13352 }
13353
45ed1190 13354 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
13355 irspiocbq = lpfc_sli_get_iocbq(phba);
13356 if (!irspiocbq) {
0e9bb8d7
JS
13357 if (!list_empty(&pring->txq))
13358 txq_cnt++;
13359 if (!list_empty(&pring->txcmplq))
13360 txcmplq_cnt++;
4f774513 13361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2a9bf3d0
JS
13362 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13363 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
0e9bb8d7
JS
13364 txq_cnt, phba->iocb_cnt,
13365 fcp_txcmplq_cnt,
13366 txcmplq_cnt);
45ed1190 13367 return false;
4f774513 13368 }
4f774513 13369
45ed1190
JS
13370 /* Save off the slow-path queue event for work thread to process */
13371 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 13372 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 13373 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
13374 &phba->sli4_hba.sp_queue_event);
13375 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 13376 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 13377
45ed1190 13378 return true;
4f774513
JS
13379}
13380
13381/**
13382 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13383 * @phba: Pointer to HBA context object.
13384 * @wcqe: Pointer to work-queue completion queue entry.
13385 *
3f8b6fb7 13386 * This routine handles slow-path WQ entry consumed event by invoking the
4f774513
JS
13387 * proper WQ release routine to the slow-path WQ.
13388 **/
13389static void
13390lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13391 struct lpfc_wcqe_release *wcqe)
13392{
2e90f4b5
JS
13393 /* sanity check on queue memory */
13394 if (unlikely(!phba->sli4_hba.els_wq))
13395 return;
4f774513
JS
13396 /* Check for the slow-path ELS work queue */
13397 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13398 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13399 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13400 else
13401 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13402 "2579 Slow-path wqe consume event carries "
13403 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13404 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13405 phba->sli4_hba.els_wq->queue_id);
13406}
13407
13408/**
13409 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13410 * @phba: Pointer to HBA context object.
13411 * @cq: Pointer to a WQ completion queue.
13412 * @wcqe: Pointer to work-queue completion queue entry.
13413 *
13414 * This routine handles an XRI abort event.
13415 *
13416 * Return: true if work posted to worker thread, otherwise false.
13417 **/
13418static bool
13419lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13420 struct lpfc_queue *cq,
13421 struct sli4_wcqe_xri_aborted *wcqe)
13422{
13423 bool workposted = false;
13424 struct lpfc_cq_event *cq_event;
13425 unsigned long iflags;
13426
4f774513
JS
13427 switch (cq->subtype) {
13428 case LPFC_FCP:
8a5ca109
JS
13429 cq_event = lpfc_cq_event_setup(
13430 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13431 if (!cq_event)
13432 return false;
4f774513
JS
13433 spin_lock_irqsave(&phba->hbalock, iflags);
13434 list_add_tail(&cq_event->list,
13435 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13436 /* Set the fcp xri abort event flag */
13437 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13438 spin_unlock_irqrestore(&phba->hbalock, iflags);
13439 workposted = true;
13440 break;
422c4cb7 13441 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
4f774513 13442 case LPFC_ELS:
8a5ca109
JS
13443 cq_event = lpfc_cq_event_setup(
13444 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13445 if (!cq_event)
13446 return false;
4f774513
JS
13447 spin_lock_irqsave(&phba->hbalock, iflags);
13448 list_add_tail(&cq_event->list,
13449 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13450 /* Set the els xri abort event flag */
13451 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13452 spin_unlock_irqrestore(&phba->hbalock, iflags);
13453 workposted = true;
13454 break;
318083ad 13455 case LPFC_NVME:
8a5ca109
JS
13456 /* Notify aborted XRI for NVME work queue */
13457 if (phba->nvmet_support)
13458 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13459 else
13460 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13461
13462 workposted = false;
318083ad 13463 break;
4f774513
JS
13464 default:
13465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
318083ad
JS
13466 "0603 Invalid CQ subtype %d: "
13467 "%08x %08x %08x %08x\n",
13468 cq->subtype, wcqe->word0, wcqe->parameter,
13469 wcqe->word2, wcqe->word3);
4f774513
JS
13470 workposted = false;
13471 break;
13472 }
13473 return workposted;
13474}
13475
e817e5d7
JS
13476#define FC_RCTL_MDS_DIAGS 0xF4
13477
4f774513
JS
13478/**
13479 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13480 * @phba: Pointer to HBA context object.
13481 * @rcqe: Pointer to receive-queue completion queue entry.
13482 *
13483 * This routine process a receive-queue completion queue entry.
13484 *
13485 * Return: true if work posted to worker thread, otherwise false.
13486 **/
13487static bool
4d9ab994 13488lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 13489{
4f774513 13490 bool workposted = false;
e817e5d7 13491 struct fc_frame_header *fc_hdr;
4f774513
JS
13492 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13493 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
547077a4 13494 struct lpfc_nvmet_tgtport *tgtp;
4f774513 13495 struct hbq_dmabuf *dma_buf;
7851fe2c 13496 uint32_t status, rq_id;
4f774513
JS
13497 unsigned long iflags;
13498
2e90f4b5
JS
13499 /* sanity check on queue memory */
13500 if (unlikely(!hrq) || unlikely(!drq))
13501 return workposted;
13502
7851fe2c
JS
13503 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13504 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13505 else
13506 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13507 if (rq_id != hrq->queue_id)
4f774513
JS
13508 goto out;
13509
4d9ab994 13510 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
13511 switch (status) {
13512 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13514 "2537 Receive Frame Truncated!!\n");
13515 case FC_STATUS_RQ_SUCCESS:
13516 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13517 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
13518 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13519 if (!dma_buf) {
b84daac9 13520 hrq->RQ_no_buf_found++;
4f774513
JS
13521 spin_unlock_irqrestore(&phba->hbalock, iflags);
13522 goto out;
13523 }
b84daac9 13524 hrq->RQ_rcv_buf++;
547077a4 13525 hrq->RQ_buf_posted--;
4d9ab994 13526 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
895427bd 13527
e817e5d7
JS
13528 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13529
13530 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13531 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13532 spin_unlock_irqrestore(&phba->hbalock, iflags);
13533 /* Handle MDS Loopback frames */
13534 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13535 break;
13536 }
13537
13538 /* save off the frame for the work thread to process */
4d9ab994 13539 list_add_tail(&dma_buf->cq_event.list,
45ed1190 13540 &phba->sli4_hba.sp_queue_event);
4f774513 13541 /* Frame received */
45ed1190 13542 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
13543 spin_unlock_irqrestore(&phba->hbalock, iflags);
13544 workposted = true;
13545 break;
4f774513 13546 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13547 if (phba->nvmet_support) {
13548 tgtp = phba->targetport->private;
13549 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13550 "6402 RQE Error x%x, posted %d err_cnt "
13551 "%d: %x %x %x\n",
13552 status, hrq->RQ_buf_posted,
13553 hrq->RQ_no_posted_buf,
13554 atomic_read(&tgtp->rcv_fcp_cmd_in),
13555 atomic_read(&tgtp->rcv_fcp_cmd_out),
13556 atomic_read(&tgtp->xmt_fcp_release));
13557 }
13558 /* fallthrough */
13559
13560 case FC_STATUS_INSUFF_BUF_NEED_BUF:
b84daac9 13561 hrq->RQ_no_posted_buf++;
4f774513
JS
13562 /* Post more buffers if possible */
13563 spin_lock_irqsave(&phba->hbalock, iflags);
13564 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13565 spin_unlock_irqrestore(&phba->hbalock, iflags);
13566 workposted = true;
13567 break;
13568 }
13569out:
13570 return workposted;
4f774513
JS
13571}
13572
4d9ab994
JS
13573/**
13574 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13575 * @phba: Pointer to HBA context object.
13576 * @cq: Pointer to the completion queue.
13577 * @wcqe: Pointer to a completion queue entry.
13578 *
25985edc 13579 * This routine process a slow-path work-queue or receive queue completion queue
4d9ab994
JS
13580 * entry.
13581 *
13582 * Return: true if work posted to worker thread, otherwise false.
13583 **/
13584static bool
13585lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13586 struct lpfc_cqe *cqe)
13587{
45ed1190 13588 struct lpfc_cqe cqevt;
4d9ab994
JS
13589 bool workposted = false;
13590
13591 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13592 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
13593
13594 /* Check and process for different type of WCQE and dispatch */
45ed1190 13595 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 13596 case CQE_CODE_COMPL_WQE:
45ed1190 13597 /* Process the WQ/RQ complete event */
bc73905a 13598 phba->last_completion_time = jiffies;
2a76a283 13599 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
45ed1190 13600 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
13601 break;
13602 case CQE_CODE_RELEASE_WQE:
13603 /* Process the WQ release event */
13604 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 13605 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
13606 break;
13607 case CQE_CODE_XRI_ABORTED:
13608 /* Process the WQ XRI abort event */
bc73905a 13609 phba->last_completion_time = jiffies;
4d9ab994 13610 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 13611 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
13612 break;
13613 case CQE_CODE_RECEIVE:
7851fe2c 13614 case CQE_CODE_RECEIVE_V1:
4d9ab994 13615 /* Process the RQ event */
bc73905a 13616 phba->last_completion_time = jiffies;
4d9ab994 13617 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 13618 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
13619 break;
13620 default:
13621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13622 "0388 Not a valid WCQE code: x%x\n",
45ed1190 13623 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
13624 break;
13625 }
13626 return workposted;
13627}
13628
4f774513
JS
13629/**
13630 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13631 * @phba: Pointer to HBA context object.
13632 * @eqe: Pointer to fast-path event queue entry.
13633 *
13634 * This routine process a event queue entry from the slow-path event queue.
13635 * It will check the MajorCode and MinorCode to determine this is for a
13636 * completion event on a completion queue, if not, an error shall be logged
13637 * and just return. Otherwise, it will get to the corresponding completion
13638 * queue and process all the entries on that completion queue, rearm the
13639 * completion queue, and then return.
13640 *
13641 **/
f485c18d 13642static void
67d12733
JS
13643lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13644 struct lpfc_queue *speq)
4f774513 13645{
67d12733 13646 struct lpfc_queue *cq = NULL, *childq;
4f774513
JS
13647 uint16_t cqid;
13648
4f774513 13649 /* Get the reference to the corresponding CQ */
cb5172ea 13650 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
4f774513 13651
4f774513
JS
13652 list_for_each_entry(childq, &speq->child_list, list) {
13653 if (childq->queue_id == cqid) {
13654 cq = childq;
13655 break;
13656 }
13657 }
13658 if (unlikely(!cq)) {
75baf696
JS
13659 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13661 "0365 Slow-path CQ identifier "
13662 "(%d) does not exist\n", cqid);
f485c18d 13663 return;
4f774513
JS
13664 }
13665
895427bd
JS
13666 /* Save EQ associated with this CQ */
13667 cq->assoc_qp = speq;
13668
f485c18d
DK
13669 if (!queue_work(phba->wq, &cq->spwork))
13670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13671 "0390 Cannot schedule soft IRQ "
13672 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13673 cqid, cq->queue_id, smp_processor_id());
13674}
13675
13676/**
13677 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13678 * @phba: Pointer to HBA context object.
13679 *
13680 * This routine process a event queue entry from the slow-path event queue.
13681 * It will check the MajorCode and MinorCode to determine this is for a
13682 * completion event on a completion queue, if not, an error shall be logged
13683 * and just return. Otherwise, it will get to the corresponding completion
13684 * queue and process all the entries on that completion queue, rearm the
13685 * completion queue, and then return.
13686 *
13687 **/
13688static void
13689lpfc_sli4_sp_process_cq(struct work_struct *work)
13690{
13691 struct lpfc_queue *cq =
13692 container_of(work, struct lpfc_queue, spwork);
13693 struct lpfc_hba *phba = cq->phba;
13694 struct lpfc_cqe *cqe;
13695 bool workposted = false;
13696 int ccount = 0;
13697
4f774513
JS
13698 /* Process all the entries to the CQ */
13699 switch (cq->type) {
13700 case LPFC_MCQ:
13701 while ((cqe = lpfc_sli4_cq_get(cq))) {
13702 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
f485c18d 13703 if (!(++ccount % cq->entry_repost))
7869da18 13704 break;
b84daac9 13705 cq->CQ_mbox++;
4f774513
JS
13706 }
13707 break;
13708 case LPFC_WCQ:
13709 while ((cqe = lpfc_sli4_cq_get(cq))) {
c8a4ce0b
DK
13710 if (cq->subtype == LPFC_FCP ||
13711 cq->subtype == LPFC_NVME) {
13712#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13713 if (phba->ktime_on)
13714 cq->isr_timestamp = ktime_get_ns();
13715 else
13716 cq->isr_timestamp = 0;
13717#endif
895427bd 13718 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
0558056c 13719 cqe);
c8a4ce0b 13720 } else {
0558056c
JS
13721 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13722 cqe);
c8a4ce0b 13723 }
f485c18d 13724 if (!(++ccount % cq->entry_repost))
7869da18 13725 break;
4f774513 13726 }
b84daac9
JS
13727
13728 /* Track the max number of CQEs processed in 1 EQ */
f485c18d
DK
13729 if (ccount > cq->CQ_max_cqe)
13730 cq->CQ_max_cqe = ccount;
4f774513
JS
13731 break;
13732 default:
13733 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13734 "0370 Invalid completion queue type (%d)\n",
13735 cq->type);
f485c18d 13736 return;
4f774513
JS
13737 }
13738
13739 /* Catch the no cq entry condition, log an error */
f485c18d 13740 if (unlikely(ccount == 0))
4f774513
JS
13741 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13742 "0371 No entry from the CQ: identifier "
13743 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13744
13745 /* In any case, flash and re-arm the RCQ */
b71413dd 13746 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
4f774513
JS
13747
13748 /* wake up worker thread if there are works to be done */
13749 if (workposted)
13750 lpfc_worker_wake_up(phba);
13751}
13752
13753/**
13754 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
2a76a283
JS
13755 * @phba: Pointer to HBA context object.
13756 * @cq: Pointer to associated CQ
13757 * @wcqe: Pointer to work-queue completion queue entry.
4f774513
JS
13758 *
13759 * This routine process a fast-path work queue completion entry from fast-path
13760 * event queue for FCP command response completion.
13761 **/
13762static void
2a76a283 13763lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13764 struct lpfc_wcqe_complete *wcqe)
13765{
2a76a283 13766 struct lpfc_sli_ring *pring = cq->pring;
4f774513
JS
13767 struct lpfc_iocbq *cmdiocbq;
13768 struct lpfc_iocbq irspiocbq;
13769 unsigned long iflags;
13770
4f774513
JS
13771 /* Check for response status */
13772 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13773 /* If resource errors reported from HBA, reduce queue
13774 * depth of the SCSI device.
13775 */
e3d2b802
JS
13776 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13777 IOSTAT_LOCAL_REJECT)) &&
13778 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13779 IOERR_NO_RESOURCES))
4f774513 13780 phba->lpfc_rampdown_queue_depth(phba);
e3d2b802 13781
4f774513 13782 /* Log the error status */
11f0e34f
JS
13783 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13784 "0373 FCP CQE error: status=x%x: "
13785 "CQE: %08x %08x %08x %08x\n",
4f774513 13786 bf_get(lpfc_wcqe_c_status, wcqe),
11f0e34f
JS
13787 wcqe->word0, wcqe->total_data_placed,
13788 wcqe->parameter, wcqe->word3);
4f774513
JS
13789 }
13790
13791 /* Look up the FCP command IOCB and create pseudo response IOCB */
7e56aa25
JS
13792 spin_lock_irqsave(&pring->ring_lock, iflags);
13793 pring->stats.iocb_event++;
4f774513
JS
13794 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13795 bf_get(lpfc_wcqe_c_request_tag, wcqe));
7e56aa25 13796 spin_unlock_irqrestore(&pring->ring_lock, iflags);
4f774513
JS
13797 if (unlikely(!cmdiocbq)) {
13798 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13799 "0374 FCP complete with no corresponding "
13800 "cmdiocb: iotag (%d)\n",
13801 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13802 return;
13803 }
c8a4ce0b
DK
13804#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13805 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13806#endif
895427bd
JS
13807 if (cmdiocbq->iocb_cmpl == NULL) {
13808 if (cmdiocbq->wqe_cmpl) {
13809 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13810 spin_lock_irqsave(&phba->hbalock, iflags);
13811 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13812 spin_unlock_irqrestore(&phba->hbalock, iflags);
13813 }
13814
13815 /* Pass the cmd_iocb and the wcqe to the upper layer */
13816 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13817 return;
13818 }
4f774513
JS
13819 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13820 "0375 FCP cmdiocb not callback function "
13821 "iotag: (%d)\n",
13822 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13823 return;
13824 }
13825
13826 /* Fake the irspiocb and copy necessary response information */
341af102 13827 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
4f774513 13828
0f65ff68
JS
13829 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13830 spin_lock_irqsave(&phba->hbalock, iflags);
13831 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13832 spin_unlock_irqrestore(&phba->hbalock, iflags);
13833 }
13834
4f774513
JS
13835 /* Pass the cmd_iocb and the rsp state to the upper layer */
13836 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13837}
13838
13839/**
13840 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13841 * @phba: Pointer to HBA context object.
13842 * @cq: Pointer to completion queue.
13843 * @wcqe: Pointer to work-queue completion queue entry.
13844 *
3f8b6fb7 13845 * This routine handles an fast-path WQ entry consumed event by invoking the
4f774513
JS
13846 * proper WQ release routine to the slow-path WQ.
13847 **/
13848static void
13849lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13850 struct lpfc_wcqe_release *wcqe)
13851{
13852 struct lpfc_queue *childwq;
13853 bool wqid_matched = false;
895427bd 13854 uint16_t hba_wqid;
4f774513
JS
13855
13856 /* Check for fast-path FCP work queue release */
895427bd 13857 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
4f774513 13858 list_for_each_entry(childwq, &cq->child_list, list) {
895427bd 13859 if (childwq->queue_id == hba_wqid) {
4f774513
JS
13860 lpfc_sli4_wq_release(childwq,
13861 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
6e8e1c14
JS
13862 if (childwq->q_flag & HBA_NVMET_WQFULL)
13863 lpfc_nvmet_wqfull_process(phba, childwq);
4f774513
JS
13864 wqid_matched = true;
13865 break;
13866 }
13867 }
13868 /* Report warning log message if no match found */
13869 if (wqid_matched != true)
13870 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13871 "2580 Fast-path wqe consume event carries "
895427bd 13872 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
4f774513
JS
13873}
13874
13875/**
2d7dbc4c
JS
13876 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13877 * @phba: Pointer to HBA context object.
13878 * @rcqe: Pointer to receive-queue completion queue entry.
4f774513 13879 *
2d7dbc4c
JS
13880 * This routine process a receive-queue completion queue entry.
13881 *
13882 * Return: true if work posted to worker thread, otherwise false.
13883 **/
13884static bool
13885lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13886 struct lpfc_rcqe *rcqe)
13887{
13888 bool workposted = false;
13889 struct lpfc_queue *hrq;
13890 struct lpfc_queue *drq;
13891 struct rqb_dmabuf *dma_buf;
13892 struct fc_frame_header *fc_hdr;
547077a4 13893 struct lpfc_nvmet_tgtport *tgtp;
2d7dbc4c
JS
13894 uint32_t status, rq_id;
13895 unsigned long iflags;
13896 uint32_t fctl, idx;
13897
13898 if ((phba->nvmet_support == 0) ||
13899 (phba->sli4_hba.nvmet_cqset == NULL))
13900 return workposted;
13901
13902 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13903 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13904 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13905
13906 /* sanity check on queue memory */
13907 if (unlikely(!hrq) || unlikely(!drq))
13908 return workposted;
13909
13910 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13911 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13912 else
13913 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13914
13915 if ((phba->nvmet_support == 0) ||
13916 (rq_id != hrq->queue_id))
13917 return workposted;
13918
13919 status = bf_get(lpfc_rcqe_status, rcqe);
13920 switch (status) {
13921 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13923 "6126 Receive Frame Truncated!!\n");
78e1d200 13924 /* Drop thru */
2d7dbc4c 13925 case FC_STATUS_RQ_SUCCESS:
2d7dbc4c 13926 spin_lock_irqsave(&phba->hbalock, iflags);
cbc5de1b 13927 lpfc_sli4_rq_release(hrq, drq);
2d7dbc4c
JS
13928 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13929 if (!dma_buf) {
13930 hrq->RQ_no_buf_found++;
13931 spin_unlock_irqrestore(&phba->hbalock, iflags);
13932 goto out;
13933 }
13934 spin_unlock_irqrestore(&phba->hbalock, iflags);
13935 hrq->RQ_rcv_buf++;
547077a4 13936 hrq->RQ_buf_posted--;
2d7dbc4c
JS
13937 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13938
13939 /* Just some basic sanity checks on FCP Command frame */
13940 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13941 fc_hdr->fh_f_ctl[1] << 8 |
13942 fc_hdr->fh_f_ctl[2]);
13943 if (((fctl &
13944 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13945 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13946 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13947 goto drop;
13948
13949 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13950 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
d613b6a7 13951 lpfc_nvmet_unsol_fcp_event(
66d7ce93 13952 phba, idx, dma_buf,
c8a4ce0b 13953 cq->isr_timestamp);
2d7dbc4c
JS
13954 return false;
13955 }
13956drop:
13957 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13958 break;
2d7dbc4c 13959 case FC_STATUS_INSUFF_BUF_FRM_DISC:
547077a4
JS
13960 if (phba->nvmet_support) {
13961 tgtp = phba->targetport->private;
13962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13963 "6401 RQE Error x%x, posted %d err_cnt "
13964 "%d: %x %x %x\n",
13965 status, hrq->RQ_buf_posted,
13966 hrq->RQ_no_posted_buf,
13967 atomic_read(&tgtp->rcv_fcp_cmd_in),
13968 atomic_read(&tgtp->rcv_fcp_cmd_out),
13969 atomic_read(&tgtp->xmt_fcp_release));
13970 }
13971 /* fallthrough */
13972
13973 case FC_STATUS_INSUFF_BUF_NEED_BUF:
2d7dbc4c
JS
13974 hrq->RQ_no_posted_buf++;
13975 /* Post more buffers if possible */
2d7dbc4c
JS
13976 break;
13977 }
13978out:
13979 return workposted;
13980}
13981
4f774513 13982/**
895427bd 13983 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
4f774513
JS
13984 * @cq: Pointer to the completion queue.
13985 * @eqe: Pointer to fast-path completion queue entry.
13986 *
13987 * This routine process a fast-path work queue completion entry from fast-path
13988 * event queue for FCP command response completion.
13989 **/
13990static int
895427bd 13991lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
4f774513
JS
13992 struct lpfc_cqe *cqe)
13993{
13994 struct lpfc_wcqe_release wcqe;
13995 bool workposted = false;
13996
13997 /* Copy the work queue CQE and convert endian order if needed */
48f8fdb4 13998 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
4f774513
JS
13999
14000 /* Check and process for different type of WCQE and dispatch */
14001 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14002 case CQE_CODE_COMPL_WQE:
895427bd 14003 case CQE_CODE_NVME_ERSP:
b84daac9 14004 cq->CQ_wq++;
4f774513 14005 /* Process the WQ complete event */
98fc5dd9 14006 phba->last_completion_time = jiffies;
895427bd
JS
14007 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14008 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14009 (struct lpfc_wcqe_complete *)&wcqe);
14010 if (cq->subtype == LPFC_NVME_LS)
14011 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
4f774513
JS
14012 (struct lpfc_wcqe_complete *)&wcqe);
14013 break;
14014 case CQE_CODE_RELEASE_WQE:
b84daac9 14015 cq->CQ_release_wqe++;
4f774513
JS
14016 /* Process the WQ release event */
14017 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14018 (struct lpfc_wcqe_release *)&wcqe);
14019 break;
14020 case CQE_CODE_XRI_ABORTED:
b84daac9 14021 cq->CQ_xri_aborted++;
4f774513 14022 /* Process the WQ XRI abort event */
bc73905a 14023 phba->last_completion_time = jiffies;
4f774513
JS
14024 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14025 (struct sli4_wcqe_xri_aborted *)&wcqe);
14026 break;
895427bd
JS
14027 case CQE_CODE_RECEIVE_V1:
14028 case CQE_CODE_RECEIVE:
14029 phba->last_completion_time = jiffies;
2d7dbc4c
JS
14030 if (cq->subtype == LPFC_NVMET) {
14031 workposted = lpfc_sli4_nvmet_handle_rcqe(
14032 phba, cq, (struct lpfc_rcqe *)&wcqe);
14033 }
895427bd 14034 break;
4f774513
JS
14035 default:
14036 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
895427bd 14037 "0144 Not a valid CQE code: x%x\n",
4f774513
JS
14038 bf_get(lpfc_wcqe_c_code, &wcqe));
14039 break;
14040 }
14041 return workposted;
14042}
14043
14044/**
67d12733 14045 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
4f774513
JS
14046 * @phba: Pointer to HBA context object.
14047 * @eqe: Pointer to fast-path event queue entry.
14048 *
14049 * This routine process a event queue entry from the fast-path event queue.
14050 * It will check the MajorCode and MinorCode to determine this is for a
14051 * completion event on a completion queue, if not, an error shall be logged
14052 * and just return. Otherwise, it will get to the corresponding completion
14053 * queue and process all the entries on the completion queue, rearm the
14054 * completion queue, and then return.
14055 **/
f485c18d 14056static void
67d12733
JS
14057lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14058 uint32_t qidx)
4f774513 14059{
895427bd 14060 struct lpfc_queue *cq = NULL;
2d7dbc4c 14061 uint16_t cqid, id;
4f774513 14062
cb5172ea 14063 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
4f774513 14064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
67d12733 14065 "0366 Not a valid completion "
4f774513 14066 "event: majorcode=x%x, minorcode=x%x\n",
cb5172ea
JS
14067 bf_get_le32(lpfc_eqe_major_code, eqe),
14068 bf_get_le32(lpfc_eqe_minor_code, eqe));
f485c18d 14069 return;
4f774513
JS
14070 }
14071
67d12733
JS
14072 /* Get the reference to the corresponding CQ */
14073 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14074
cdb42bec
JS
14075 /* First check for NVME/SCSI completion */
14076 if (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map) {
14077 /* Process NVME / NVMET command completion */
14078 cq = phba->sli4_hba.hdwq[qidx].nvme_cq;
14079 goto process_cq;
14080 }
14081
14082 if (cqid == phba->sli4_hba.hdwq[qidx].fcp_cq_map) {
14083 /* Process FCP command completion */
14084 cq = phba->sli4_hba.hdwq[qidx].fcp_cq;
14085 goto process_cq;
14086 }
14087
14088 /* Next check for NVMET completion */
2d7dbc4c
JS
14089 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14090 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14091 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14092 /* Process NVMET unsol rcv */
14093 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14094 goto process_cq;
14095 }
67d12733
JS
14096 }
14097
895427bd
JS
14098 if (phba->sli4_hba.nvmels_cq &&
14099 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14100 /* Process NVME unsol rcv */
14101 cq = phba->sli4_hba.nvmels_cq;
14102 }
14103
14104 /* Otherwise this is a Slow path event */
14105 if (cq == NULL) {
cdb42bec
JS
14106 lpfc_sli4_sp_handle_eqe(phba, eqe,
14107 phba->sli4_hba.hdwq[qidx].hba_eq);
f485c18d 14108 return;
4f774513
JS
14109 }
14110
895427bd 14111process_cq:
4f774513
JS
14112 if (unlikely(cqid != cq->queue_id)) {
14113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14114 "0368 Miss-matched fast-path completion "
14115 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14116 cqid, cq->queue_id);
f485c18d 14117 return;
4f774513
JS
14118 }
14119
895427bd 14120 /* Save EQ associated with this CQ */
cdb42bec 14121 cq->assoc_qp = phba->sli4_hba.hdwq[qidx].hba_eq;
895427bd 14122
f485c18d
DK
14123 if (!queue_work(phba->wq, &cq->irqwork))
14124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14125 "0363 Cannot schedule soft IRQ "
14126 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14127 cqid, cq->queue_id, smp_processor_id());
14128}
14129
14130/**
14131 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14132 * @phba: Pointer to HBA context object.
14133 * @eqe: Pointer to fast-path event queue entry.
14134 *
14135 * This routine process a event queue entry from the fast-path event queue.
14136 * It will check the MajorCode and MinorCode to determine this is for a
14137 * completion event on a completion queue, if not, an error shall be logged
14138 * and just return. Otherwise, it will get to the corresponding completion
14139 * queue and process all the entries on the completion queue, rearm the
14140 * completion queue, and then return.
14141 **/
14142static void
14143lpfc_sli4_hba_process_cq(struct work_struct *work)
14144{
14145 struct lpfc_queue *cq =
14146 container_of(work, struct lpfc_queue, irqwork);
14147 struct lpfc_hba *phba = cq->phba;
14148 struct lpfc_cqe *cqe;
14149 bool workposted = false;
14150 int ccount = 0;
14151
4f774513
JS
14152 /* Process all the entries to the CQ */
14153 while ((cqe = lpfc_sli4_cq_get(cq))) {
c8a4ce0b
DK
14154#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14155 if (phba->ktime_on)
14156 cq->isr_timestamp = ktime_get_ns();
14157 else
14158 cq->isr_timestamp = 0;
14159#endif
895427bd 14160 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
f485c18d 14161 if (!(++ccount % cq->entry_repost))
7869da18 14162 break;
4f774513
JS
14163 }
14164
b84daac9 14165 /* Track the max number of CQEs processed in 1 EQ */
f485c18d
DK
14166 if (ccount > cq->CQ_max_cqe)
14167 cq->CQ_max_cqe = ccount;
14168 cq->assoc_qp->EQ_cqe_cnt += ccount;
b84daac9 14169
4f774513 14170 /* Catch the no cq entry condition */
f485c18d 14171 if (unlikely(ccount == 0))
4f774513
JS
14172 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14173 "0369 No entry from fast-path completion "
14174 "queue fcpcqid=%d\n", cq->queue_id);
14175
14176 /* In any case, flash and re-arm the CQ */
b71413dd 14177 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
4f774513
JS
14178
14179 /* wake up worker thread if there are works to be done */
14180 if (workposted)
14181 lpfc_worker_wake_up(phba);
14182}
14183
14184static void
14185lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
14186{
14187 struct lpfc_eqe *eqe;
14188
14189 /* walk all the EQ entries and drop on the floor */
14190 while ((eqe = lpfc_sli4_eq_get(eq)))
14191 ;
14192
14193 /* Clear and re-arm the EQ */
b71413dd 14194 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
4f774513
JS
14195}
14196
1ba981fd 14197
4f774513 14198/**
67d12733 14199 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
4f774513
JS
14200 * @irq: Interrupt number.
14201 * @dev_id: The device context pointer.
14202 *
14203 * This function is directly called from the PCI layer as an interrupt
14204 * service routine when device with SLI-4 interface spec is enabled with
14205 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14206 * ring event in the HBA. However, when the device is enabled with either
14207 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14208 * device-level interrupt handler. When the PCI slot is in error recovery
14209 * or the HBA is undergoing initialization, the interrupt handler will not
14210 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14211 * the intrrupt context. This function is called without any lock held.
14212 * It gets the hbalock to access and update SLI data structures. Note that,
14213 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14214 * equal to that of FCP CQ index.
14215 *
67d12733
JS
14216 * The link attention and ELS ring attention events are handled
14217 * by the worker thread. The interrupt handler signals the worker thread
14218 * and returns for these events. This function is called without any lock
14219 * held. It gets the hbalock to access and update SLI data structures.
14220 *
4f774513
JS
14221 * This function returns IRQ_HANDLED when interrupt is handled else it
14222 * returns IRQ_NONE.
14223 **/
14224irqreturn_t
67d12733 14225lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
4f774513
JS
14226{
14227 struct lpfc_hba *phba;
895427bd 14228 struct lpfc_hba_eq_hdl *hba_eq_hdl;
4f774513
JS
14229 struct lpfc_queue *fpeq;
14230 struct lpfc_eqe *eqe;
14231 unsigned long iflag;
14232 int ecount = 0;
895427bd 14233 int hba_eqidx;
4f774513
JS
14234
14235 /* Get the driver's phba structure from the dev_id */
895427bd
JS
14236 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14237 phba = hba_eq_hdl->phba;
14238 hba_eqidx = hba_eq_hdl->idx;
4f774513
JS
14239
14240 if (unlikely(!phba))
14241 return IRQ_NONE;
cdb42bec 14242 if (unlikely(!phba->sli4_hba.hdwq))
5350d872 14243 return IRQ_NONE;
4f774513
JS
14244
14245 /* Get to the EQ struct associated with this vector */
cdb42bec 14246 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
2e90f4b5
JS
14247 if (unlikely(!fpeq))
14248 return IRQ_NONE;
4f774513 14249
ba20c853 14250 if (lpfc_fcp_look_ahead) {
895427bd 14251 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
b71413dd 14252 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
ba20c853 14253 else {
895427bd 14254 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
14255 return IRQ_NONE;
14256 }
14257 }
14258
4f774513
JS
14259 /* Check device state for handling interrupt */
14260 if (unlikely(lpfc_intr_state_check(phba))) {
14261 /* Check again for link_state with lock held */
14262 spin_lock_irqsave(&phba->hbalock, iflag);
14263 if (phba->link_state < LPFC_LINK_DOWN)
14264 /* Flush, clear interrupt, and rearm the EQ */
14265 lpfc_sli4_eq_flush(phba, fpeq);
14266 spin_unlock_irqrestore(&phba->hbalock, iflag);
ba20c853 14267 if (lpfc_fcp_look_ahead)
895427bd 14268 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
4f774513
JS
14269 return IRQ_NONE;
14270 }
14271
14272 /*
14273 * Process all the event on FCP fast-path EQ
14274 */
14275 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
f485c18d
DK
14276 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14277 if (!(++ecount % fpeq->entry_repost))
7869da18 14278 break;
b84daac9 14279 fpeq->EQ_processed++;
4f774513
JS
14280 }
14281
b84daac9
JS
14282 /* Track the max number of EQEs processed in 1 intr */
14283 if (ecount > fpeq->EQ_max_eqe)
14284 fpeq->EQ_max_eqe = ecount;
14285
4f774513 14286 /* Always clear and re-arm the fast-path EQ */
b71413dd 14287 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
4f774513
JS
14288
14289 if (unlikely(ecount == 0)) {
b84daac9 14290 fpeq->EQ_no_entry++;
ba20c853
JS
14291
14292 if (lpfc_fcp_look_ahead) {
895427bd 14293 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
ba20c853
JS
14294 return IRQ_NONE;
14295 }
14296
4f774513
JS
14297 if (phba->intr_type == MSIX)
14298 /* MSI-X treated interrupt served as no EQ share INT */
14299 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14300 "0358 MSI-X interrupt with no EQE\n");
14301 else
14302 /* Non MSI-X treated on interrupt as EQ share INT */
14303 return IRQ_NONE;
14304 }
14305
ba20c853 14306 if (lpfc_fcp_look_ahead)
895427bd
JS
14307 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14308
4f774513
JS
14309 return IRQ_HANDLED;
14310} /* lpfc_sli4_fp_intr_handler */
14311
14312/**
14313 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14314 * @irq: Interrupt number.
14315 * @dev_id: The device context pointer.
14316 *
14317 * This function is the device-level interrupt handler to device with SLI-4
14318 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14319 * interrupt mode is enabled and there is an event in the HBA which requires
14320 * driver attention. This function invokes the slow-path interrupt attention
14321 * handling function and fast-path interrupt attention handling function in
14322 * turn to process the relevant HBA attention events. This function is called
14323 * without any lock held. It gets the hbalock to access and update SLI data
14324 * structures.
14325 *
14326 * This function returns IRQ_HANDLED when interrupt is handled, else it
14327 * returns IRQ_NONE.
14328 **/
14329irqreturn_t
14330lpfc_sli4_intr_handler(int irq, void *dev_id)
14331{
14332 struct lpfc_hba *phba;
67d12733
JS
14333 irqreturn_t hba_irq_rc;
14334 bool hba_handled = false;
895427bd 14335 int qidx;
4f774513
JS
14336
14337 /* Get the driver's phba structure from the dev_id */
14338 phba = (struct lpfc_hba *)dev_id;
14339
14340 if (unlikely(!phba))
14341 return IRQ_NONE;
14342
4f774513
JS
14343 /*
14344 * Invoke fast-path host attention interrupt handling as appropriate.
14345 */
cdb42bec 14346 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
67d12733 14347 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
895427bd 14348 &phba->sli4_hba.hba_eq_hdl[qidx]);
67d12733 14349 if (hba_irq_rc == IRQ_HANDLED)
1ba981fd
JS
14350 hba_handled |= true;
14351 }
14352
67d12733 14353 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
4f774513
JS
14354} /* lpfc_sli4_intr_handler */
14355
14356/**
14357 * lpfc_sli4_queue_free - free a queue structure and associated memory
14358 * @queue: The queue structure to free.
14359 *
b595076a 14360 * This function frees a queue structure and the DMAable memory used for
4f774513
JS
14361 * the host resident queue. This function must be called after destroying the
14362 * queue on the HBA.
14363 **/
14364void
14365lpfc_sli4_queue_free(struct lpfc_queue *queue)
14366{
14367 struct lpfc_dmabuf *dmabuf;
14368
14369 if (!queue)
14370 return;
14371
14372 while (!list_empty(&queue->page_list)) {
14373 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14374 list);
81b96eda 14375 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
4f774513
JS
14376 dmabuf->virt, dmabuf->phys);
14377 kfree(dmabuf);
14378 }
895427bd
JS
14379 if (queue->rqbp) {
14380 lpfc_free_rq_buffer(queue->phba, queue);
14381 kfree(queue->rqbp);
14382 }
d1f525aa
JS
14383
14384 if (!list_empty(&queue->wq_list))
14385 list_del(&queue->wq_list);
14386
4f774513
JS
14387 kfree(queue);
14388 return;
14389}
14390
14391/**
14392 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14393 * @phba: The HBA that this queue is being created on.
81b96eda 14394 * @page_size: The size of a queue page
4f774513
JS
14395 * @entry_size: The size of each queue entry for this queue.
14396 * @entry count: The number of entries that this queue will handle.
14397 *
14398 * This function allocates a queue structure and the DMAable memory used for
14399 * the host resident queue. This function must be called before creating the
14400 * queue on the HBA.
14401 **/
14402struct lpfc_queue *
81b96eda
JS
14403lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14404 uint32_t entry_size, uint32_t entry_count)
4f774513
JS
14405{
14406 struct lpfc_queue *queue;
14407 struct lpfc_dmabuf *dmabuf;
14408 int x, total_qe_count;
14409 void *dma_pointer;
cb5172ea 14410 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
4f774513 14411
cb5172ea 14412 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 14413 hw_page_size = page_size;
cb5172ea 14414
4f774513
JS
14415 queue = kzalloc(sizeof(struct lpfc_queue) +
14416 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14417 if (!queue)
14418 return NULL;
cb5172ea
JS
14419 queue->page_count = (ALIGN(entry_size * entry_count,
14420 hw_page_size))/hw_page_size;
895427bd
JS
14421
14422 /* If needed, Adjust page count to match the max the adapter supports */
4e87eb2f
EM
14423 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14424 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
895427bd
JS
14425 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14426
4f774513 14427 INIT_LIST_HEAD(&queue->list);
895427bd 14428 INIT_LIST_HEAD(&queue->wq_list);
6e8e1c14 14429 INIT_LIST_HEAD(&queue->wqfull_list);
4f774513
JS
14430 INIT_LIST_HEAD(&queue->page_list);
14431 INIT_LIST_HEAD(&queue->child_list);
81b96eda
JS
14432
14433 /* Set queue parameters now. If the system cannot provide memory
14434 * resources, the free routine needs to know what was allocated.
14435 */
14436 queue->entry_size = entry_size;
14437 queue->entry_count = entry_count;
14438 queue->page_size = hw_page_size;
14439 queue->phba = phba;
14440
4f774513
JS
14441 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14442 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14443 if (!dmabuf)
14444 goto out_fail;
1aee383d
JP
14445 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14446 hw_page_size, &dmabuf->phys,
14447 GFP_KERNEL);
4f774513
JS
14448 if (!dmabuf->virt) {
14449 kfree(dmabuf);
14450 goto out_fail;
14451 }
14452 dmabuf->buffer_tag = x;
14453 list_add_tail(&dmabuf->list, &queue->page_list);
14454 /* initialize queue's entry array */
14455 dma_pointer = dmabuf->virt;
14456 for (; total_qe_count < entry_count &&
cb5172ea 14457 dma_pointer < (hw_page_size + dmabuf->virt);
4f774513
JS
14458 total_qe_count++, dma_pointer += entry_size) {
14459 queue->qe[total_qe_count].address = dma_pointer;
14460 }
14461 }
f485c18d
DK
14462 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14463 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
4f774513 14464
64eb4dcb
JS
14465 /* entry_repost will be set during q creation */
14466
4f774513
JS
14467 return queue;
14468out_fail:
14469 lpfc_sli4_queue_free(queue);
14470 return NULL;
14471}
14472
962bc51b
JS
14473/**
14474 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14475 * @phba: HBA structure that indicates port to create a queue on.
14476 * @pci_barset: PCI BAR set flag.
14477 *
14478 * This function shall perform iomap of the specified PCI BAR address to host
14479 * memory address if not already done so and return it. The returned host
14480 * memory address can be NULL.
14481 */
14482static void __iomem *
14483lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14484{
962bc51b
JS
14485 if (!phba->pcidev)
14486 return NULL;
962bc51b
JS
14487
14488 switch (pci_barset) {
14489 case WQ_PCI_BAR_0_AND_1:
962bc51b
JS
14490 return phba->pci_bar0_memmap_p;
14491 case WQ_PCI_BAR_2_AND_3:
962bc51b
JS
14492 return phba->pci_bar2_memmap_p;
14493 case WQ_PCI_BAR_4_AND_5:
962bc51b
JS
14494 return phba->pci_bar4_memmap_p;
14495 default:
14496 break;
14497 }
14498 return NULL;
14499}
14500
173edbb2 14501/**
895427bd 14502 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
173edbb2
JS
14503 * @phba: HBA structure that indicates port to create a queue on.
14504 * @startq: The starting FCP EQ to modify
14505 *
14506 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
43140ca6
JS
14507 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14508 * updated in one mailbox command.
173edbb2
JS
14509 *
14510 * The @phba struct is used to send mailbox command to HBA. The @startq
14511 * is used to get the starting FCP EQ to change.
14512 * This function is asynchronous and will wait for the mailbox
14513 * command to finish before continuing.
14514 *
14515 * On success this function will return a zero. If unable to allocate enough
14516 * memory this function will return -ENOMEM. If the queue create mailbox command
14517 * fails this function will return -ENXIO.
14518 **/
a2fc4aef 14519int
0cf07f84
JS
14520lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14521 uint32_t numq, uint32_t imax)
173edbb2
JS
14522{
14523 struct lpfc_mbx_modify_eq_delay *eq_delay;
14524 LPFC_MBOXQ_t *mbox;
14525 struct lpfc_queue *eq;
14526 int cnt, rc, length, status = 0;
14527 uint32_t shdr_status, shdr_add_status;
0cf07f84 14528 uint32_t result, val;
895427bd 14529 int qidx;
173edbb2
JS
14530 union lpfc_sli4_cfg_shdr *shdr;
14531 uint16_t dmult;
14532
cdb42bec 14533 if (startq >= phba->cfg_hdw_queue)
173edbb2
JS
14534 return 0;
14535
14536 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14537 if (!mbox)
14538 return -ENOMEM;
14539 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14540 sizeof(struct lpfc_sli4_cfg_mhdr));
14541 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14542 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14543 length, LPFC_SLI4_MBX_EMBED);
14544 eq_delay = &mbox->u.mqe.un.eq_delay;
14545
14546 /* Calculate delay multiper from maximum interrupt per second */
cdb42bec 14547 result = imax / phba->cfg_hdw_queue;
895427bd 14548 if (result > LPFC_DMULT_CONST || result == 0)
ee02006b
JS
14549 dmult = 0;
14550 else
14551 dmult = LPFC_DMULT_CONST/result - 1;
0cf07f84
JS
14552 if (dmult > LPFC_DMULT_MAX)
14553 dmult = LPFC_DMULT_MAX;
173edbb2
JS
14554
14555 cnt = 0;
cdb42bec
JS
14556 for (qidx = startq; qidx < phba->cfg_hdw_queue; qidx++) {
14557 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
173edbb2
JS
14558 if (!eq)
14559 continue;
0cf07f84 14560 eq->q_mode = imax;
173edbb2
JS
14561 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14562 eq_delay->u.request.eq[cnt].phase = 0;
14563 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14564 cnt++;
0cf07f84
JS
14565
14566 /* q_mode is only used for auto_imax */
14567 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14568 /* Use EQ Delay Register method for q_mode */
14569
14570 /* Convert for EQ Delay register */
14571 val = phba->cfg_fcp_imax;
14572 if (val) {
14573 /* First, interrupts per sec per EQ */
cdb42bec 14574 val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
0cf07f84
JS
14575
14576 /* us delay between each interrupt */
14577 val = LPFC_SEC_TO_USEC / val;
14578 }
14579 eq->q_mode = val;
14580 } else {
14581 eq->q_mode = imax;
14582 }
14583
14584 if (cnt >= numq)
173edbb2
JS
14585 break;
14586 }
14587 eq_delay->u.request.num_eq = cnt;
14588
14589 mbox->vport = phba->pport;
14590 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14591 mbox->ctx_buf = NULL;
14592 mbox->ctx_ndlp = NULL;
173edbb2
JS
14593 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14594 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14595 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14596 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14597 if (shdr_status || shdr_add_status || rc) {
14598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14599 "2512 MODIFY_EQ_DELAY mailbox failed with "
14600 "status x%x add_status x%x, mbx status x%x\n",
14601 shdr_status, shdr_add_status, rc);
14602 status = -ENXIO;
14603 }
14604 mempool_free(mbox, phba->mbox_mem_pool);
14605 return status;
14606}
14607
4f774513
JS
14608/**
14609 * lpfc_eq_create - Create an Event Queue on the HBA
14610 * @phba: HBA structure that indicates port to create a queue on.
14611 * @eq: The queue structure to use to create the event queue.
14612 * @imax: The maximum interrupt per second limit.
14613 *
14614 * This function creates an event queue, as detailed in @eq, on a port,
14615 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14616 *
14617 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14618 * is used to get the entry count and entry size that are necessary to
14619 * determine the number of pages to allocate and use for this queue. This
14620 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14621 * event queue. This function is asynchronous and will wait for the mailbox
14622 * command to finish before continuing.
14623 *
14624 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14625 * memory this function will return -ENOMEM. If the queue create mailbox command
14626 * fails this function will return -ENXIO.
4f774513 14627 **/
a2fc4aef 14628int
ee02006b 14629lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
4f774513
JS
14630{
14631 struct lpfc_mbx_eq_create *eq_create;
14632 LPFC_MBOXQ_t *mbox;
14633 int rc, length, status = 0;
14634 struct lpfc_dmabuf *dmabuf;
14635 uint32_t shdr_status, shdr_add_status;
14636 union lpfc_sli4_cfg_shdr *shdr;
14637 uint16_t dmult;
49198b37
JS
14638 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14639
2e90f4b5
JS
14640 /* sanity check on queue memory */
14641 if (!eq)
14642 return -ENODEV;
49198b37
JS
14643 if (!phba->sli4_hba.pc_sli4_params.supported)
14644 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
14645
14646 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14647 if (!mbox)
14648 return -ENOMEM;
14649 length = (sizeof(struct lpfc_mbx_eq_create) -
14650 sizeof(struct lpfc_sli4_cfg_mhdr));
14651 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14652 LPFC_MBOX_OPCODE_EQ_CREATE,
14653 length, LPFC_SLI4_MBX_EMBED);
14654 eq_create = &mbox->u.mqe.un.eq_create;
7365f6fd 14655 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
4f774513
JS
14656 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14657 eq->page_count);
14658 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14659 LPFC_EQE_SIZE);
14660 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
7365f6fd
JS
14661
14662 /* Use version 2 of CREATE_EQ if eqav is set */
14663 if (phba->sli4_hba.pc_sli4_params.eqav) {
14664 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14665 LPFC_Q_CREATE_VERSION_2);
14666 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14667 phba->sli4_hba.pc_sli4_params.eqav);
14668 }
14669
2c9c5a00
JS
14670 /* don't setup delay multiplier using EQ_CREATE */
14671 dmult = 0;
4f774513
JS
14672 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14673 dmult);
14674 switch (eq->entry_count) {
14675 default:
14676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14677 "0360 Unsupported EQ count. (%d)\n",
14678 eq->entry_count);
14679 if (eq->entry_count < 256)
14680 return -EINVAL;
14681 /* otherwise default to smallest count (drop through) */
14682 case 256:
14683 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14684 LPFC_EQ_CNT_256);
14685 break;
14686 case 512:
14687 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14688 LPFC_EQ_CNT_512);
14689 break;
14690 case 1024:
14691 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14692 LPFC_EQ_CNT_1024);
14693 break;
14694 case 2048:
14695 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14696 LPFC_EQ_CNT_2048);
14697 break;
14698 case 4096:
14699 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14700 LPFC_EQ_CNT_4096);
14701 break;
14702 }
14703 list_for_each_entry(dmabuf, &eq->page_list, list) {
49198b37 14704 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
14705 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14706 putPaddrLow(dmabuf->phys);
14707 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14708 putPaddrHigh(dmabuf->phys);
14709 }
14710 mbox->vport = phba->pport;
14711 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718
JS
14712 mbox->ctx_buf = NULL;
14713 mbox->ctx_ndlp = NULL;
4f774513 14714 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4f774513
JS
14715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14717 if (shdr_status || shdr_add_status || rc) {
14718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14719 "2500 EQ_CREATE mailbox failed with "
14720 "status x%x add_status x%x, mbx status x%x\n",
14721 shdr_status, shdr_add_status, rc);
14722 status = -ENXIO;
14723 }
14724 eq->type = LPFC_EQ;
14725 eq->subtype = LPFC_NONE;
14726 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14727 if (eq->queue_id == 0xFFFF)
14728 status = -ENXIO;
14729 eq->host_index = 0;
14730 eq->hba_index = 0;
64eb4dcb 14731 eq->entry_repost = LPFC_EQ_REPOST;
4f774513 14732
8fa38513 14733 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14734 return status;
14735}
14736
14737/**
14738 * lpfc_cq_create - Create a Completion Queue on the HBA
14739 * @phba: HBA structure that indicates port to create a queue on.
14740 * @cq: The queue structure to use to create the completion queue.
14741 * @eq: The event queue to bind this completion queue to.
14742 *
14743 * This function creates a completion queue, as detailed in @wq, on a port,
14744 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14745 *
14746 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14747 * is used to get the entry count and entry size that are necessary to
14748 * determine the number of pages to allocate and use for this queue. The @eq
14749 * is used to indicate which event queue to bind this completion queue to. This
14750 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14751 * completion queue. This function is asynchronous and will wait for the mailbox
14752 * command to finish before continuing.
14753 *
14754 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
14755 * memory this function will return -ENOMEM. If the queue create mailbox command
14756 * fails this function will return -ENXIO.
4f774513 14757 **/
a2fc4aef 14758int
4f774513
JS
14759lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14760 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14761{
14762 struct lpfc_mbx_cq_create *cq_create;
14763 struct lpfc_dmabuf *dmabuf;
14764 LPFC_MBOXQ_t *mbox;
14765 int rc, length, status = 0;
14766 uint32_t shdr_status, shdr_add_status;
14767 union lpfc_sli4_cfg_shdr *shdr;
49198b37 14768
2e90f4b5
JS
14769 /* sanity check on queue memory */
14770 if (!cq || !eq)
14771 return -ENODEV;
49198b37 14772
4f774513
JS
14773 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14774 if (!mbox)
14775 return -ENOMEM;
14776 length = (sizeof(struct lpfc_mbx_cq_create) -
14777 sizeof(struct lpfc_sli4_cfg_mhdr));
14778 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14779 LPFC_MBOX_OPCODE_CQ_CREATE,
14780 length, LPFC_SLI4_MBX_EMBED);
14781 cq_create = &mbox->u.mqe.un.cq_create;
5a6f133e 14782 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
4f774513
JS
14783 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14784 cq->page_count);
14785 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14786 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
5a6f133e
JS
14787 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14788 phba->sli4_hba.pc_sli4_params.cqv);
14789 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
81b96eda
JS
14790 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14791 (cq->page_size / SLI4_PAGE_SIZE));
5a6f133e
JS
14792 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14793 eq->queue_id);
7365f6fd
JS
14794 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14795 phba->sli4_hba.pc_sli4_params.cqav);
5a6f133e
JS
14796 } else {
14797 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14798 eq->queue_id);
14799 }
4f774513 14800 switch (cq->entry_count) {
81b96eda
JS
14801 case 2048:
14802 case 4096:
14803 if (phba->sli4_hba.pc_sli4_params.cqv ==
14804 LPFC_Q_CREATE_VERSION_2) {
14805 cq_create->u.request.context.lpfc_cq_context_count =
14806 cq->entry_count;
14807 bf_set(lpfc_cq_context_count,
14808 &cq_create->u.request.context,
14809 LPFC_CQ_CNT_WORD7);
14810 break;
14811 }
14812 /* Fall Thru */
4f774513
JS
14813 default:
14814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2ea259ee 14815 "0361 Unsupported CQ count: "
64eb4dcb 14816 "entry cnt %d sz %d pg cnt %d\n",
2ea259ee 14817 cq->entry_count, cq->entry_size,
64eb4dcb 14818 cq->page_count);
4f4c1863
JS
14819 if (cq->entry_count < 256) {
14820 status = -EINVAL;
14821 goto out;
14822 }
4f774513
JS
14823 /* otherwise default to smallest count (drop through) */
14824 case 256:
14825 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14826 LPFC_CQ_CNT_256);
14827 break;
14828 case 512:
14829 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14830 LPFC_CQ_CNT_512);
14831 break;
14832 case 1024:
14833 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14834 LPFC_CQ_CNT_1024);
14835 break;
14836 }
14837 list_for_each_entry(dmabuf, &cq->page_list, list) {
81b96eda 14838 memset(dmabuf->virt, 0, cq->page_size);
4f774513
JS
14839 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14840 putPaddrLow(dmabuf->phys);
14841 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14842 putPaddrHigh(dmabuf->phys);
14843 }
14844 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14845
14846 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
14847 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14848 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14849 if (shdr_status || shdr_add_status || rc) {
14850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14851 "2501 CQ_CREATE mailbox failed with "
14852 "status x%x add_status x%x, mbx status x%x\n",
14853 shdr_status, shdr_add_status, rc);
14854 status = -ENXIO;
14855 goto out;
14856 }
14857 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14858 if (cq->queue_id == 0xFFFF) {
14859 status = -ENXIO;
14860 goto out;
14861 }
14862 /* link the cq onto the parent eq child list */
14863 list_add_tail(&cq->list, &eq->child_list);
14864 /* Set up completion queue's type and subtype */
14865 cq->type = type;
14866 cq->subtype = subtype;
14867 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
2a622bfb 14868 cq->assoc_qid = eq->queue_id;
4f774513
JS
14869 cq->host_index = 0;
14870 cq->hba_index = 0;
64eb4dcb 14871 cq->entry_repost = LPFC_CQ_REPOST;
4f774513 14872
8fa38513
JS
14873out:
14874 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
14875 return status;
14876}
14877
2d7dbc4c
JS
14878/**
14879 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14880 * @phba: HBA structure that indicates port to create a queue on.
14881 * @cqp: The queue structure array to use to create the completion queues.
cdb42bec 14882 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
2d7dbc4c
JS
14883 *
14884 * This function creates a set of completion queue, s to support MRQ
14885 * as detailed in @cqp, on a port,
14886 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14887 *
14888 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14889 * is used to get the entry count and entry size that are necessary to
14890 * determine the number of pages to allocate and use for this queue. The @eq
14891 * is used to indicate which event queue to bind this completion queue to. This
14892 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14893 * completion queue. This function is asynchronous and will wait for the mailbox
14894 * command to finish before continuing.
14895 *
14896 * On success this function will return a zero. If unable to allocate enough
14897 * memory this function will return -ENOMEM. If the queue create mailbox command
14898 * fails this function will return -ENXIO.
14899 **/
14900int
14901lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cdb42bec
JS
14902 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14903 uint32_t subtype)
2d7dbc4c
JS
14904{
14905 struct lpfc_queue *cq;
14906 struct lpfc_queue *eq;
14907 struct lpfc_mbx_cq_create_set *cq_set;
14908 struct lpfc_dmabuf *dmabuf;
14909 LPFC_MBOXQ_t *mbox;
14910 int rc, length, alloclen, status = 0;
14911 int cnt, idx, numcq, page_idx = 0;
14912 uint32_t shdr_status, shdr_add_status;
14913 union lpfc_sli4_cfg_shdr *shdr;
14914 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14915
14916 /* sanity check on queue memory */
14917 numcq = phba->cfg_nvmet_mrq;
cdb42bec 14918 if (!cqp || !hdwq || !numcq)
2d7dbc4c 14919 return -ENODEV;
2d7dbc4c
JS
14920
14921 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14922 if (!mbox)
14923 return -ENOMEM;
14924
14925 length = sizeof(struct lpfc_mbx_cq_create_set);
14926 length += ((numcq * cqp[0]->page_count) *
14927 sizeof(struct dma_address));
14928 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14929 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14930 LPFC_SLI4_MBX_NEMBED);
14931 if (alloclen < length) {
14932 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14933 "3098 Allocated DMA memory size (%d) is "
14934 "less than the requested DMA memory size "
14935 "(%d)\n", alloclen, length);
14936 status = -ENOMEM;
14937 goto out;
14938 }
14939 cq_set = mbox->sge_array->addr[0];
14940 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14941 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14942
14943 for (idx = 0; idx < numcq; idx++) {
14944 cq = cqp[idx];
cdb42bec 14945 eq = hdwq[idx].hba_eq;
2d7dbc4c
JS
14946 if (!cq || !eq) {
14947 status = -ENOMEM;
14948 goto out;
14949 }
81b96eda
JS
14950 if (!phba->sli4_hba.pc_sli4_params.supported)
14951 hw_page_size = cq->page_size;
2d7dbc4c
JS
14952
14953 switch (idx) {
14954 case 0:
14955 bf_set(lpfc_mbx_cq_create_set_page_size,
14956 &cq_set->u.request,
14957 (hw_page_size / SLI4_PAGE_SIZE));
14958 bf_set(lpfc_mbx_cq_create_set_num_pages,
14959 &cq_set->u.request, cq->page_count);
14960 bf_set(lpfc_mbx_cq_create_set_evt,
14961 &cq_set->u.request, 1);
14962 bf_set(lpfc_mbx_cq_create_set_valid,
14963 &cq_set->u.request, 1);
14964 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14965 &cq_set->u.request, 0);
14966 bf_set(lpfc_mbx_cq_create_set_num_cq,
14967 &cq_set->u.request, numcq);
7365f6fd
JS
14968 bf_set(lpfc_mbx_cq_create_set_autovalid,
14969 &cq_set->u.request,
14970 phba->sli4_hba.pc_sli4_params.cqav);
2d7dbc4c 14971 switch (cq->entry_count) {
81b96eda
JS
14972 case 2048:
14973 case 4096:
14974 if (phba->sli4_hba.pc_sli4_params.cqv ==
14975 LPFC_Q_CREATE_VERSION_2) {
14976 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14977 &cq_set->u.request,
14978 cq->entry_count);
14979 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14980 &cq_set->u.request,
14981 LPFC_CQ_CNT_WORD7);
14982 break;
14983 }
14984 /* Fall Thru */
2d7dbc4c
JS
14985 default:
14986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14987 "3118 Bad CQ count. (%d)\n",
14988 cq->entry_count);
14989 if (cq->entry_count < 256) {
14990 status = -EINVAL;
14991 goto out;
14992 }
14993 /* otherwise default to smallest (drop thru) */
14994 case 256:
14995 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14996 &cq_set->u.request, LPFC_CQ_CNT_256);
14997 break;
14998 case 512:
14999 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15000 &cq_set->u.request, LPFC_CQ_CNT_512);
15001 break;
15002 case 1024:
15003 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15004 &cq_set->u.request, LPFC_CQ_CNT_1024);
15005 break;
15006 }
15007 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15008 &cq_set->u.request, eq->queue_id);
15009 break;
15010 case 1:
15011 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15012 &cq_set->u.request, eq->queue_id);
15013 break;
15014 case 2:
15015 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15016 &cq_set->u.request, eq->queue_id);
15017 break;
15018 case 3:
15019 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15020 &cq_set->u.request, eq->queue_id);
15021 break;
15022 case 4:
15023 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15024 &cq_set->u.request, eq->queue_id);
15025 break;
15026 case 5:
15027 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15028 &cq_set->u.request, eq->queue_id);
15029 break;
15030 case 6:
15031 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15032 &cq_set->u.request, eq->queue_id);
15033 break;
15034 case 7:
15035 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15036 &cq_set->u.request, eq->queue_id);
15037 break;
15038 case 8:
15039 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15040 &cq_set->u.request, eq->queue_id);
15041 break;
15042 case 9:
15043 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15044 &cq_set->u.request, eq->queue_id);
15045 break;
15046 case 10:
15047 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15048 &cq_set->u.request, eq->queue_id);
15049 break;
15050 case 11:
15051 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15052 &cq_set->u.request, eq->queue_id);
15053 break;
15054 case 12:
15055 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15056 &cq_set->u.request, eq->queue_id);
15057 break;
15058 case 13:
15059 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15060 &cq_set->u.request, eq->queue_id);
15061 break;
15062 case 14:
15063 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15064 &cq_set->u.request, eq->queue_id);
15065 break;
15066 case 15:
15067 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15068 &cq_set->u.request, eq->queue_id);
15069 break;
15070 }
15071
15072 /* link the cq onto the parent eq child list */
15073 list_add_tail(&cq->list, &eq->child_list);
15074 /* Set up completion queue's type and subtype */
15075 cq->type = type;
15076 cq->subtype = subtype;
15077 cq->assoc_qid = eq->queue_id;
15078 cq->host_index = 0;
15079 cq->hba_index = 0;
64eb4dcb 15080 cq->entry_repost = LPFC_CQ_REPOST;
81b96eda 15081 cq->chann = idx;
2d7dbc4c
JS
15082
15083 rc = 0;
15084 list_for_each_entry(dmabuf, &cq->page_list, list) {
15085 memset(dmabuf->virt, 0, hw_page_size);
15086 cnt = page_idx + dmabuf->buffer_tag;
15087 cq_set->u.request.page[cnt].addr_lo =
15088 putPaddrLow(dmabuf->phys);
15089 cq_set->u.request.page[cnt].addr_hi =
15090 putPaddrHigh(dmabuf->phys);
15091 rc++;
15092 }
15093 page_idx += rc;
15094 }
15095
15096 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15097
15098 /* The IOCTL status is embedded in the mailbox subheader. */
15099 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15100 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15101 if (shdr_status || shdr_add_status || rc) {
15102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15103 "3119 CQ_CREATE_SET mailbox failed with "
15104 "status x%x add_status x%x, mbx status x%x\n",
15105 shdr_status, shdr_add_status, rc);
15106 status = -ENXIO;
15107 goto out;
15108 }
15109 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15110 if (rc == 0xFFFF) {
15111 status = -ENXIO;
15112 goto out;
15113 }
15114
15115 for (idx = 0; idx < numcq; idx++) {
15116 cq = cqp[idx];
15117 cq->queue_id = rc + idx;
15118 }
15119
15120out:
15121 lpfc_sli4_mbox_cmd_free(phba, mbox);
15122 return status;
15123}
15124
b19a061a
JS
15125/**
15126 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15127 * @phba: HBA structure that indicates port to create a queue on.
15128 * @mq: The queue structure to use to create the mailbox queue.
15129 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15130 * @cq: The completion queue to associate with this cq.
15131 *
15132 * This function provides failback (fb) functionality when the
15133 * mq_create_ext fails on older FW generations. It's purpose is identical
15134 * to mq_create_ext otherwise.
15135 *
15136 * This routine cannot fail as all attributes were previously accessed and
15137 * initialized in mq_create_ext.
15138 **/
15139static void
15140lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15141 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15142{
15143 struct lpfc_mbx_mq_create *mq_create;
15144 struct lpfc_dmabuf *dmabuf;
15145 int length;
15146
15147 length = (sizeof(struct lpfc_mbx_mq_create) -
15148 sizeof(struct lpfc_sli4_cfg_mhdr));
15149 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15150 LPFC_MBOX_OPCODE_MQ_CREATE,
15151 length, LPFC_SLI4_MBX_EMBED);
15152 mq_create = &mbox->u.mqe.un.mq_create;
15153 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15154 mq->page_count);
15155 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15156 cq->queue_id);
15157 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15158 switch (mq->entry_count) {
15159 case 16:
5a6f133e
JS
15160 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15161 LPFC_MQ_RING_SIZE_16);
b19a061a
JS
15162 break;
15163 case 32:
5a6f133e
JS
15164 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15165 LPFC_MQ_RING_SIZE_32);
b19a061a
JS
15166 break;
15167 case 64:
5a6f133e
JS
15168 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15169 LPFC_MQ_RING_SIZE_64);
b19a061a
JS
15170 break;
15171 case 128:
5a6f133e
JS
15172 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15173 LPFC_MQ_RING_SIZE_128);
b19a061a
JS
15174 break;
15175 }
15176 list_for_each_entry(dmabuf, &mq->page_list, list) {
15177 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15178 putPaddrLow(dmabuf->phys);
15179 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15180 putPaddrHigh(dmabuf->phys);
15181 }
15182}
15183
04c68496
JS
15184/**
15185 * lpfc_mq_create - Create a mailbox Queue on the HBA
15186 * @phba: HBA structure that indicates port to create a queue on.
15187 * @mq: The queue structure to use to create the mailbox queue.
b19a061a
JS
15188 * @cq: The completion queue to associate with this cq.
15189 * @subtype: The queue's subtype.
04c68496
JS
15190 *
15191 * This function creates a mailbox queue, as detailed in @mq, on a port,
15192 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15193 *
15194 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15195 * is used to get the entry count and entry size that are necessary to
15196 * determine the number of pages to allocate and use for this queue. This
15197 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15198 * mailbox queue. This function is asynchronous and will wait for the mailbox
15199 * command to finish before continuing.
15200 *
15201 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15202 * memory this function will return -ENOMEM. If the queue create mailbox command
15203 * fails this function will return -ENXIO.
04c68496 15204 **/
b19a061a 15205int32_t
04c68496
JS
15206lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15207 struct lpfc_queue *cq, uint32_t subtype)
15208{
15209 struct lpfc_mbx_mq_create *mq_create;
b19a061a 15210 struct lpfc_mbx_mq_create_ext *mq_create_ext;
04c68496
JS
15211 struct lpfc_dmabuf *dmabuf;
15212 LPFC_MBOXQ_t *mbox;
15213 int rc, length, status = 0;
15214 uint32_t shdr_status, shdr_add_status;
15215 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15216 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
04c68496 15217
2e90f4b5
JS
15218 /* sanity check on queue memory */
15219 if (!mq || !cq)
15220 return -ENODEV;
49198b37
JS
15221 if (!phba->sli4_hba.pc_sli4_params.supported)
15222 hw_page_size = SLI4_PAGE_SIZE;
b19a061a 15223
04c68496
JS
15224 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15225 if (!mbox)
15226 return -ENOMEM;
b19a061a 15227 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
04c68496
JS
15228 sizeof(struct lpfc_sli4_cfg_mhdr));
15229 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
b19a061a 15230 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
04c68496 15231 length, LPFC_SLI4_MBX_EMBED);
b19a061a
JS
15232
15233 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
5a6f133e 15234 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
70f3c073
JS
15235 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15236 &mq_create_ext->u.request, mq->page_count);
15237 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15238 &mq_create_ext->u.request, 1);
15239 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
b19a061a
JS
15240 &mq_create_ext->u.request, 1);
15241 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15242 &mq_create_ext->u.request, 1);
70f3c073
JS
15243 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15244 &mq_create_ext->u.request, 1);
15245 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15246 &mq_create_ext->u.request, 1);
b19a061a 15247 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
5a6f133e
JS
15248 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15249 phba->sli4_hba.pc_sli4_params.mqv);
15250 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15251 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15252 cq->queue_id);
15253 else
15254 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15255 cq->queue_id);
04c68496
JS
15256 switch (mq->entry_count) {
15257 default:
15258 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15259 "0362 Unsupported MQ count. (%d)\n",
15260 mq->entry_count);
4f4c1863
JS
15261 if (mq->entry_count < 16) {
15262 status = -EINVAL;
15263 goto out;
15264 }
04c68496
JS
15265 /* otherwise default to smallest count (drop through) */
15266 case 16:
5a6f133e
JS
15267 bf_set(lpfc_mq_context_ring_size,
15268 &mq_create_ext->u.request.context,
15269 LPFC_MQ_RING_SIZE_16);
04c68496
JS
15270 break;
15271 case 32:
5a6f133e
JS
15272 bf_set(lpfc_mq_context_ring_size,
15273 &mq_create_ext->u.request.context,
15274 LPFC_MQ_RING_SIZE_32);
04c68496
JS
15275 break;
15276 case 64:
5a6f133e
JS
15277 bf_set(lpfc_mq_context_ring_size,
15278 &mq_create_ext->u.request.context,
15279 LPFC_MQ_RING_SIZE_64);
04c68496
JS
15280 break;
15281 case 128:
5a6f133e
JS
15282 bf_set(lpfc_mq_context_ring_size,
15283 &mq_create_ext->u.request.context,
15284 LPFC_MQ_RING_SIZE_128);
04c68496
JS
15285 break;
15286 }
15287 list_for_each_entry(dmabuf, &mq->page_list, list) {
49198b37 15288 memset(dmabuf->virt, 0, hw_page_size);
b19a061a 15289 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
04c68496 15290 putPaddrLow(dmabuf->phys);
b19a061a 15291 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
04c68496
JS
15292 putPaddrHigh(dmabuf->phys);
15293 }
15294 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
b19a061a
JS
15295 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15296 &mq_create_ext->u.response);
15297 if (rc != MBX_SUCCESS) {
15298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15299 "2795 MQ_CREATE_EXT failed with "
15300 "status x%x. Failback to MQ_CREATE.\n",
15301 rc);
15302 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15303 mq_create = &mbox->u.mqe.un.mq_create;
15304 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15305 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15306 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15307 &mq_create->u.response);
15308 }
15309
04c68496 15310 /* The IOCTL status is embedded in the mailbox subheader. */
04c68496
JS
15311 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15312 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15313 if (shdr_status || shdr_add_status || rc) {
15314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15315 "2502 MQ_CREATE mailbox failed with "
15316 "status x%x add_status x%x, mbx status x%x\n",
15317 shdr_status, shdr_add_status, rc);
15318 status = -ENXIO;
15319 goto out;
15320 }
04c68496
JS
15321 if (mq->queue_id == 0xFFFF) {
15322 status = -ENXIO;
15323 goto out;
15324 }
15325 mq->type = LPFC_MQ;
2a622bfb 15326 mq->assoc_qid = cq->queue_id;
04c68496
JS
15327 mq->subtype = subtype;
15328 mq->host_index = 0;
15329 mq->hba_index = 0;
64eb4dcb 15330 mq->entry_repost = LPFC_MQ_REPOST;
04c68496
JS
15331
15332 /* link the mq onto the parent cq child list */
15333 list_add_tail(&mq->list, &cq->child_list);
15334out:
8fa38513 15335 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
15336 return status;
15337}
15338
4f774513
JS
15339/**
15340 * lpfc_wq_create - Create a Work Queue on the HBA
15341 * @phba: HBA structure that indicates port to create a queue on.
15342 * @wq: The queue structure to use to create the work queue.
15343 * @cq: The completion queue to bind this work queue to.
15344 * @subtype: The subtype of the work queue indicating its functionality.
15345 *
15346 * This function creates a work queue, as detailed in @wq, on a port, described
15347 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15348 *
15349 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15350 * is used to get the entry count and entry size that are necessary to
15351 * determine the number of pages to allocate and use for this queue. The @cq
15352 * is used to indicate which completion queue to bind this work queue to. This
15353 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15354 * work queue. This function is asynchronous and will wait for the mailbox
15355 * command to finish before continuing.
15356 *
15357 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15358 * memory this function will return -ENOMEM. If the queue create mailbox command
15359 * fails this function will return -ENXIO.
4f774513 15360 **/
a2fc4aef 15361int
4f774513
JS
15362lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15363 struct lpfc_queue *cq, uint32_t subtype)
15364{
15365 struct lpfc_mbx_wq_create *wq_create;
15366 struct lpfc_dmabuf *dmabuf;
15367 LPFC_MBOXQ_t *mbox;
15368 int rc, length, status = 0;
15369 uint32_t shdr_status, shdr_add_status;
15370 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15371 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
5a6f133e 15372 struct dma_address *page;
962bc51b
JS
15373 void __iomem *bar_memmap_p;
15374 uint32_t db_offset;
15375 uint16_t pci_barset;
1351e69f
JS
15376 uint8_t dpp_barset;
15377 uint32_t dpp_offset;
15378 unsigned long pg_addr;
81b96eda 15379 uint8_t wq_create_version;
49198b37 15380
2e90f4b5
JS
15381 /* sanity check on queue memory */
15382 if (!wq || !cq)
15383 return -ENODEV;
49198b37 15384 if (!phba->sli4_hba.pc_sli4_params.supported)
81b96eda 15385 hw_page_size = wq->page_size;
4f774513
JS
15386
15387 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15388 if (!mbox)
15389 return -ENOMEM;
15390 length = (sizeof(struct lpfc_mbx_wq_create) -
15391 sizeof(struct lpfc_sli4_cfg_mhdr));
15392 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15393 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15394 length, LPFC_SLI4_MBX_EMBED);
15395 wq_create = &mbox->u.mqe.un.wq_create;
5a6f133e 15396 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
4f774513
JS
15397 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15398 wq->page_count);
15399 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15400 cq->queue_id);
0c651878
JS
15401
15402 /* wqv is the earliest version supported, NOT the latest */
5a6f133e
JS
15403 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15404 phba->sli4_hba.pc_sli4_params.wqv);
962bc51b 15405
c176ffa0
JS
15406 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15407 (wq->page_size > SLI4_PAGE_SIZE))
81b96eda
JS
15408 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15409 else
15410 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15411
0c651878 15412
1351e69f
JS
15413 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15414 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15415 else
15416 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15417
15418 switch (wq_create_version) {
0c651878 15419 case LPFC_Q_CREATE_VERSION_1:
5a6f133e
JS
15420 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15421 wq->entry_count);
3f247de7
JS
15422 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15423 LPFC_Q_CREATE_VERSION_1);
15424
5a6f133e
JS
15425 switch (wq->entry_size) {
15426 default:
15427 case 64:
15428 bf_set(lpfc_mbx_wq_create_wqe_size,
15429 &wq_create->u.request_1,
15430 LPFC_WQ_WQE_SIZE_64);
15431 break;
15432 case 128:
15433 bf_set(lpfc_mbx_wq_create_wqe_size,
15434 &wq_create->u.request_1,
15435 LPFC_WQ_WQE_SIZE_128);
15436 break;
15437 }
1351e69f
JS
15438 /* Request DPP by default */
15439 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
8ea73db4
JS
15440 bf_set(lpfc_mbx_wq_create_page_size,
15441 &wq_create->u.request_1,
81b96eda 15442 (wq->page_size / SLI4_PAGE_SIZE));
5a6f133e 15443 page = wq_create->u.request_1.page;
0c651878
JS
15444 break;
15445 default:
1351e69f
JS
15446 page = wq_create->u.request.page;
15447 break;
5a6f133e 15448 }
0c651878 15449
4f774513 15450 list_for_each_entry(dmabuf, &wq->page_list, list) {
49198b37 15451 memset(dmabuf->virt, 0, hw_page_size);
5a6f133e
JS
15452 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15453 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
4f774513 15454 }
962bc51b
JS
15455
15456 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15457 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15458
4f774513
JS
15459 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15460 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15461 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15462 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15463 if (shdr_status || shdr_add_status || rc) {
15464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15465 "2503 WQ_CREATE mailbox failed with "
15466 "status x%x add_status x%x, mbx status x%x\n",
15467 shdr_status, shdr_add_status, rc);
15468 status = -ENXIO;
15469 goto out;
15470 }
1351e69f
JS
15471
15472 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15473 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15474 &wq_create->u.response);
15475 else
15476 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15477 &wq_create->u.response_1);
15478
4f774513
JS
15479 if (wq->queue_id == 0xFFFF) {
15480 status = -ENXIO;
15481 goto out;
15482 }
1351e69f
JS
15483
15484 wq->db_format = LPFC_DB_LIST_FORMAT;
15485 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15486 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15487 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15488 &wq_create->u.response);
15489 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15490 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15492 "3265 WQ[%d] doorbell format "
15493 "not supported: x%x\n",
15494 wq->queue_id, wq->db_format);
15495 status = -EINVAL;
15496 goto out;
15497 }
15498 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15499 &wq_create->u.response);
15500 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15501 pci_barset);
15502 if (!bar_memmap_p) {
15503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15504 "3263 WQ[%d] failed to memmap "
15505 "pci barset:x%x\n",
15506 wq->queue_id, pci_barset);
15507 status = -ENOMEM;
15508 goto out;
15509 }
15510 db_offset = wq_create->u.response.doorbell_offset;
15511 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15512 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15514 "3252 WQ[%d] doorbell offset "
15515 "not supported: x%x\n",
15516 wq->queue_id, db_offset);
15517 status = -EINVAL;
15518 goto out;
15519 }
15520 wq->db_regaddr = bar_memmap_p + db_offset;
15521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15522 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15523 "format:x%x\n", wq->queue_id,
15524 pci_barset, db_offset, wq->db_format);
15525 } else
15526 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15527 } else {
1351e69f
JS
15528 /* Check if DPP was honored by the firmware */
15529 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15530 &wq_create->u.response_1);
15531 if (wq->dpp_enable) {
15532 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15533 &wq_create->u.response_1);
15534 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15535 pci_barset);
15536 if (!bar_memmap_p) {
15537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15538 "3267 WQ[%d] failed to memmap "
15539 "pci barset:x%x\n",
15540 wq->queue_id, pci_barset);
15541 status = -ENOMEM;
15542 goto out;
15543 }
15544 db_offset = wq_create->u.response_1.doorbell_offset;
15545 wq->db_regaddr = bar_memmap_p + db_offset;
15546 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15547 &wq_create->u.response_1);
15548 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15549 &wq_create->u.response_1);
15550 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15551 dpp_barset);
15552 if (!bar_memmap_p) {
15553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15554 "3268 WQ[%d] failed to memmap "
15555 "pci barset:x%x\n",
15556 wq->queue_id, dpp_barset);
15557 status = -ENOMEM;
15558 goto out;
15559 }
15560 dpp_offset = wq_create->u.response_1.dpp_offset;
15561 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15563 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15564 "dpp_id:x%x dpp_barset:x%x "
15565 "dpp_offset:x%x\n",
15566 wq->queue_id, pci_barset, db_offset,
15567 wq->dpp_id, dpp_barset, dpp_offset);
15568
15569 /* Enable combined writes for DPP aperture */
15570 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15571#ifdef CONFIG_X86
15572 rc = set_memory_wc(pg_addr, 1);
15573 if (rc) {
15574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15575 "3272 Cannot setup Combined "
15576 "Write on WQ[%d] - disable DPP\n",
15577 wq->queue_id);
15578 phba->cfg_enable_dpp = 0;
15579 }
15580#else
15581 phba->cfg_enable_dpp = 0;
15582#endif
15583 } else
15584 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
962bc51b 15585 }
895427bd
JS
15586 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15587 if (wq->pring == NULL) {
15588 status = -ENOMEM;
15589 goto out;
15590 }
4f774513 15591 wq->type = LPFC_WQ;
2a622bfb 15592 wq->assoc_qid = cq->queue_id;
4f774513
JS
15593 wq->subtype = subtype;
15594 wq->host_index = 0;
15595 wq->hba_index = 0;
ff78d8f9 15596 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
4f774513
JS
15597
15598 /* link the wq onto the parent cq child list */
15599 list_add_tail(&wq->list, &cq->child_list);
15600out:
8fa38513 15601 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15602 return status;
15603}
15604
15605/**
15606 * lpfc_rq_create - Create a Receive Queue on the HBA
15607 * @phba: HBA structure that indicates port to create a queue on.
15608 * @hrq: The queue structure to use to create the header receive queue.
15609 * @drq: The queue structure to use to create the data receive queue.
15610 * @cq: The completion queue to bind this work queue to.
15611 *
15612 * This function creates a receive buffer queue pair , as detailed in @hrq and
15613 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15614 * to the HBA.
15615 *
15616 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15617 * struct is used to get the entry count that is necessary to determine the
15618 * number of pages to use for this queue. The @cq is used to indicate which
15619 * completion queue to bind received buffers that are posted to these queues to.
15620 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15621 * receive queue pair. This function is asynchronous and will wait for the
15622 * mailbox command to finish before continuing.
15623 *
15624 * On success this function will return a zero. If unable to allocate enough
d439d286
JS
15625 * memory this function will return -ENOMEM. If the queue create mailbox command
15626 * fails this function will return -ENXIO.
4f774513 15627 **/
a2fc4aef 15628int
4f774513
JS
15629lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15630 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15631{
15632 struct lpfc_mbx_rq_create *rq_create;
15633 struct lpfc_dmabuf *dmabuf;
15634 LPFC_MBOXQ_t *mbox;
15635 int rc, length, status = 0;
15636 uint32_t shdr_status, shdr_add_status;
15637 union lpfc_sli4_cfg_shdr *shdr;
49198b37 15638 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
962bc51b
JS
15639 void __iomem *bar_memmap_p;
15640 uint32_t db_offset;
15641 uint16_t pci_barset;
49198b37 15642
2e90f4b5
JS
15643 /* sanity check on queue memory */
15644 if (!hrq || !drq || !cq)
15645 return -ENODEV;
49198b37
JS
15646 if (!phba->sli4_hba.pc_sli4_params.supported)
15647 hw_page_size = SLI4_PAGE_SIZE;
4f774513
JS
15648
15649 if (hrq->entry_count != drq->entry_count)
15650 return -EINVAL;
15651 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15652 if (!mbox)
15653 return -ENOMEM;
15654 length = (sizeof(struct lpfc_mbx_rq_create) -
15655 sizeof(struct lpfc_sli4_cfg_mhdr));
15656 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15657 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15658 length, LPFC_SLI4_MBX_EMBED);
15659 rq_create = &mbox->u.mqe.un.rq_create;
5a6f133e
JS
15660 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15661 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15662 phba->sli4_hba.pc_sli4_params.rqv);
15663 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15664 bf_set(lpfc_rq_context_rqe_count_1,
15665 &rq_create->u.request.context,
15666 hrq->entry_count);
15667 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
c31098ce
JS
15668 bf_set(lpfc_rq_context_rqe_size,
15669 &rq_create->u.request.context,
15670 LPFC_RQE_SIZE_8);
15671 bf_set(lpfc_rq_context_page_size,
15672 &rq_create->u.request.context,
8ea73db4 15673 LPFC_RQ_PAGE_SIZE_4096);
5a6f133e
JS
15674 } else {
15675 switch (hrq->entry_count) {
15676 default:
15677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15678 "2535 Unsupported RQ count. (%d)\n",
15679 hrq->entry_count);
4f4c1863
JS
15680 if (hrq->entry_count < 512) {
15681 status = -EINVAL;
15682 goto out;
15683 }
5a6f133e
JS
15684 /* otherwise default to smallest count (drop through) */
15685 case 512:
15686 bf_set(lpfc_rq_context_rqe_count,
15687 &rq_create->u.request.context,
15688 LPFC_RQ_RING_SIZE_512);
15689 break;
15690 case 1024:
15691 bf_set(lpfc_rq_context_rqe_count,
15692 &rq_create->u.request.context,
15693 LPFC_RQ_RING_SIZE_1024);
15694 break;
15695 case 2048:
15696 bf_set(lpfc_rq_context_rqe_count,
15697 &rq_create->u.request.context,
15698 LPFC_RQ_RING_SIZE_2048);
15699 break;
15700 case 4096:
15701 bf_set(lpfc_rq_context_rqe_count,
15702 &rq_create->u.request.context,
15703 LPFC_RQ_RING_SIZE_4096);
15704 break;
15705 }
15706 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15707 LPFC_HDR_BUF_SIZE);
4f774513
JS
15708 }
15709 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15710 cq->queue_id);
15711 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15712 hrq->page_count);
4f774513 15713 list_for_each_entry(dmabuf, &hrq->page_list, list) {
49198b37 15714 memset(dmabuf->virt, 0, hw_page_size);
4f774513
JS
15715 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15716 putPaddrLow(dmabuf->phys);
15717 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15718 putPaddrHigh(dmabuf->phys);
15719 }
962bc51b
JS
15720 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15721 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15722
4f774513
JS
15723 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15724 /* The IOCTL status is embedded in the mailbox subheader. */
4f774513
JS
15725 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15726 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15727 if (shdr_status || shdr_add_status || rc) {
15728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15729 "2504 RQ_CREATE mailbox failed with "
15730 "status x%x add_status x%x, mbx status x%x\n",
15731 shdr_status, shdr_add_status, rc);
15732 status = -ENXIO;
15733 goto out;
15734 }
15735 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15736 if (hrq->queue_id == 0xFFFF) {
15737 status = -ENXIO;
15738 goto out;
15739 }
962bc51b
JS
15740
15741 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15742 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15743 &rq_create->u.response);
15744 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15745 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15747 "3262 RQ [%d] doorbell format not "
15748 "supported: x%x\n", hrq->queue_id,
15749 hrq->db_format);
15750 status = -EINVAL;
15751 goto out;
15752 }
15753
15754 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15755 &rq_create->u.response);
15756 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15757 if (!bar_memmap_p) {
15758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15759 "3269 RQ[%d] failed to memmap pci "
15760 "barset:x%x\n", hrq->queue_id,
15761 pci_barset);
15762 status = -ENOMEM;
15763 goto out;
15764 }
15765
15766 db_offset = rq_create->u.response.doorbell_offset;
15767 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15768 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15770 "3270 RQ[%d] doorbell offset not "
15771 "supported: x%x\n", hrq->queue_id,
15772 db_offset);
15773 status = -EINVAL;
15774 goto out;
15775 }
15776 hrq->db_regaddr = bar_memmap_p + db_offset;
15777 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
a22e7db3
JS
15778 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15779 "format:x%x\n", hrq->queue_id, pci_barset,
15780 db_offset, hrq->db_format);
962bc51b
JS
15781 } else {
15782 hrq->db_format = LPFC_DB_RING_FORMAT;
15783 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15784 }
4f774513 15785 hrq->type = LPFC_HRQ;
2a622bfb 15786 hrq->assoc_qid = cq->queue_id;
4f774513
JS
15787 hrq->subtype = subtype;
15788 hrq->host_index = 0;
15789 hrq->hba_index = 0;
61f3d4bf 15790 hrq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
15791
15792 /* now create the data queue */
15793 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15794 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15795 length, LPFC_SLI4_MBX_EMBED);
5a6f133e
JS
15796 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15797 phba->sli4_hba.pc_sli4_params.rqv);
15798 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15799 bf_set(lpfc_rq_context_rqe_count_1,
c31098ce 15800 &rq_create->u.request.context, hrq->entry_count);
3c603be9
JS
15801 if (subtype == LPFC_NVMET)
15802 rq_create->u.request.context.buffer_size =
15803 LPFC_NVMET_DATA_BUF_SIZE;
15804 else
15805 rq_create->u.request.context.buffer_size =
15806 LPFC_DATA_BUF_SIZE;
c31098ce
JS
15807 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15808 LPFC_RQE_SIZE_8);
15809 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15810 (PAGE_SIZE/SLI4_PAGE_SIZE));
5a6f133e
JS
15811 } else {
15812 switch (drq->entry_count) {
15813 default:
15814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15815 "2536 Unsupported RQ count. (%d)\n",
15816 drq->entry_count);
4f4c1863
JS
15817 if (drq->entry_count < 512) {
15818 status = -EINVAL;
15819 goto out;
15820 }
5a6f133e
JS
15821 /* otherwise default to smallest count (drop through) */
15822 case 512:
15823 bf_set(lpfc_rq_context_rqe_count,
15824 &rq_create->u.request.context,
15825 LPFC_RQ_RING_SIZE_512);
15826 break;
15827 case 1024:
15828 bf_set(lpfc_rq_context_rqe_count,
15829 &rq_create->u.request.context,
15830 LPFC_RQ_RING_SIZE_1024);
15831 break;
15832 case 2048:
15833 bf_set(lpfc_rq_context_rqe_count,
15834 &rq_create->u.request.context,
15835 LPFC_RQ_RING_SIZE_2048);
15836 break;
15837 case 4096:
15838 bf_set(lpfc_rq_context_rqe_count,
15839 &rq_create->u.request.context,
15840 LPFC_RQ_RING_SIZE_4096);
15841 break;
15842 }
3c603be9
JS
15843 if (subtype == LPFC_NVMET)
15844 bf_set(lpfc_rq_context_buf_size,
15845 &rq_create->u.request.context,
15846 LPFC_NVMET_DATA_BUF_SIZE);
15847 else
15848 bf_set(lpfc_rq_context_buf_size,
15849 &rq_create->u.request.context,
15850 LPFC_DATA_BUF_SIZE);
4f774513
JS
15851 }
15852 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15853 cq->queue_id);
15854 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15855 drq->page_count);
4f774513
JS
15856 list_for_each_entry(dmabuf, &drq->page_list, list) {
15857 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15858 putPaddrLow(dmabuf->phys);
15859 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15860 putPaddrHigh(dmabuf->phys);
15861 }
962bc51b
JS
15862 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15863 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
4f774513
JS
15864 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15865 /* The IOCTL status is embedded in the mailbox subheader. */
15866 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15867 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15868 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15869 if (shdr_status || shdr_add_status || rc) {
15870 status = -ENXIO;
15871 goto out;
15872 }
15873 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15874 if (drq->queue_id == 0xFFFF) {
15875 status = -ENXIO;
15876 goto out;
15877 }
15878 drq->type = LPFC_DRQ;
2a622bfb 15879 drq->assoc_qid = cq->queue_id;
4f774513
JS
15880 drq->subtype = subtype;
15881 drq->host_index = 0;
15882 drq->hba_index = 0;
61f3d4bf 15883 drq->entry_repost = LPFC_RQ_REPOST;
4f774513
JS
15884
15885 /* link the header and data RQs onto the parent cq child list */
15886 list_add_tail(&hrq->list, &cq->child_list);
15887 list_add_tail(&drq->list, &cq->child_list);
15888
15889out:
8fa38513 15890 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
15891 return status;
15892}
15893
2d7dbc4c
JS
15894/**
15895 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15896 * @phba: HBA structure that indicates port to create a queue on.
15897 * @hrqp: The queue structure array to use to create the header receive queues.
15898 * @drqp: The queue structure array to use to create the data receive queues.
15899 * @cqp: The completion queue array to bind these receive queues to.
15900 *
15901 * This function creates a receive buffer queue pair , as detailed in @hrq and
15902 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15903 * to the HBA.
15904 *
15905 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15906 * struct is used to get the entry count that is necessary to determine the
15907 * number of pages to use for this queue. The @cq is used to indicate which
15908 * completion queue to bind received buffers that are posted to these queues to.
15909 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15910 * receive queue pair. This function is asynchronous and will wait for the
15911 * mailbox command to finish before continuing.
15912 *
15913 * On success this function will return a zero. If unable to allocate enough
15914 * memory this function will return -ENOMEM. If the queue create mailbox command
15915 * fails this function will return -ENXIO.
15916 **/
15917int
15918lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15919 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15920 uint32_t subtype)
15921{
15922 struct lpfc_queue *hrq, *drq, *cq;
15923 struct lpfc_mbx_rq_create_v2 *rq_create;
15924 struct lpfc_dmabuf *dmabuf;
15925 LPFC_MBOXQ_t *mbox;
15926 int rc, length, alloclen, status = 0;
15927 int cnt, idx, numrq, page_idx = 0;
15928 uint32_t shdr_status, shdr_add_status;
15929 union lpfc_sli4_cfg_shdr *shdr;
15930 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15931
15932 numrq = phba->cfg_nvmet_mrq;
15933 /* sanity check on array memory */
15934 if (!hrqp || !drqp || !cqp || !numrq)
15935 return -ENODEV;
15936 if (!phba->sli4_hba.pc_sli4_params.supported)
15937 hw_page_size = SLI4_PAGE_SIZE;
15938
15939 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15940 if (!mbox)
15941 return -ENOMEM;
15942
15943 length = sizeof(struct lpfc_mbx_rq_create_v2);
15944 length += ((2 * numrq * hrqp[0]->page_count) *
15945 sizeof(struct dma_address));
15946
15947 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15948 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15949 LPFC_SLI4_MBX_NEMBED);
15950 if (alloclen < length) {
15951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15952 "3099 Allocated DMA memory size (%d) is "
15953 "less than the requested DMA memory size "
15954 "(%d)\n", alloclen, length);
15955 status = -ENOMEM;
15956 goto out;
15957 }
15958
15959
15960
15961 rq_create = mbox->sge_array->addr[0];
15962 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15963
15964 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15965 cnt = 0;
15966
15967 for (idx = 0; idx < numrq; idx++) {
15968 hrq = hrqp[idx];
15969 drq = drqp[idx];
15970 cq = cqp[idx];
15971
2d7dbc4c
JS
15972 /* sanity check on queue memory */
15973 if (!hrq || !drq || !cq) {
15974 status = -ENODEV;
15975 goto out;
15976 }
15977
7aabe84b
JS
15978 if (hrq->entry_count != drq->entry_count) {
15979 status = -EINVAL;
15980 goto out;
15981 }
15982
2d7dbc4c
JS
15983 if (idx == 0) {
15984 bf_set(lpfc_mbx_rq_create_num_pages,
15985 &rq_create->u.request,
15986 hrq->page_count);
15987 bf_set(lpfc_mbx_rq_create_rq_cnt,
15988 &rq_create->u.request, (numrq * 2));
15989 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15990 1);
15991 bf_set(lpfc_rq_context_base_cq,
15992 &rq_create->u.request.context,
15993 cq->queue_id);
15994 bf_set(lpfc_rq_context_data_size,
15995 &rq_create->u.request.context,
3c603be9 15996 LPFC_NVMET_DATA_BUF_SIZE);
2d7dbc4c
JS
15997 bf_set(lpfc_rq_context_hdr_size,
15998 &rq_create->u.request.context,
15999 LPFC_HDR_BUF_SIZE);
16000 bf_set(lpfc_rq_context_rqe_count_1,
16001 &rq_create->u.request.context,
16002 hrq->entry_count);
16003 bf_set(lpfc_rq_context_rqe_size,
16004 &rq_create->u.request.context,
16005 LPFC_RQE_SIZE_8);
16006 bf_set(lpfc_rq_context_page_size,
16007 &rq_create->u.request.context,
16008 (PAGE_SIZE/SLI4_PAGE_SIZE));
16009 }
16010 rc = 0;
16011 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16012 memset(dmabuf->virt, 0, hw_page_size);
16013 cnt = page_idx + dmabuf->buffer_tag;
16014 rq_create->u.request.page[cnt].addr_lo =
16015 putPaddrLow(dmabuf->phys);
16016 rq_create->u.request.page[cnt].addr_hi =
16017 putPaddrHigh(dmabuf->phys);
16018 rc++;
16019 }
16020 page_idx += rc;
16021
16022 rc = 0;
16023 list_for_each_entry(dmabuf, &drq->page_list, list) {
16024 memset(dmabuf->virt, 0, hw_page_size);
16025 cnt = page_idx + dmabuf->buffer_tag;
16026 rq_create->u.request.page[cnt].addr_lo =
16027 putPaddrLow(dmabuf->phys);
16028 rq_create->u.request.page[cnt].addr_hi =
16029 putPaddrHigh(dmabuf->phys);
16030 rc++;
16031 }
16032 page_idx += rc;
16033
16034 hrq->db_format = LPFC_DB_RING_FORMAT;
16035 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16036 hrq->type = LPFC_HRQ;
16037 hrq->assoc_qid = cq->queue_id;
16038 hrq->subtype = subtype;
16039 hrq->host_index = 0;
16040 hrq->hba_index = 0;
61f3d4bf 16041 hrq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
16042
16043 drq->db_format = LPFC_DB_RING_FORMAT;
16044 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16045 drq->type = LPFC_DRQ;
16046 drq->assoc_qid = cq->queue_id;
16047 drq->subtype = subtype;
16048 drq->host_index = 0;
16049 drq->hba_index = 0;
61f3d4bf 16050 drq->entry_repost = LPFC_RQ_REPOST;
2d7dbc4c
JS
16051
16052 list_add_tail(&hrq->list, &cq->child_list);
16053 list_add_tail(&drq->list, &cq->child_list);
16054 }
16055
16056 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16057 /* The IOCTL status is embedded in the mailbox subheader. */
16058 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16059 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16060 if (shdr_status || shdr_add_status || rc) {
16061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16062 "3120 RQ_CREATE mailbox failed with "
16063 "status x%x add_status x%x, mbx status x%x\n",
16064 shdr_status, shdr_add_status, rc);
16065 status = -ENXIO;
16066 goto out;
16067 }
16068 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16069 if (rc == 0xFFFF) {
16070 status = -ENXIO;
16071 goto out;
16072 }
16073
16074 /* Initialize all RQs with associated queue id */
16075 for (idx = 0; idx < numrq; idx++) {
16076 hrq = hrqp[idx];
16077 hrq->queue_id = rc + (2 * idx);
16078 drq = drqp[idx];
16079 drq->queue_id = rc + (2 * idx) + 1;
16080 }
16081
16082out:
16083 lpfc_sli4_mbox_cmd_free(phba, mbox);
16084 return status;
16085}
16086
4f774513
JS
16087/**
16088 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16089 * @eq: The queue structure associated with the queue to destroy.
16090 *
16091 * This function destroys a queue, as detailed in @eq by sending an mailbox
16092 * command, specific to the type of queue, to the HBA.
16093 *
16094 * The @eq struct is used to get the queue ID of the queue to destroy.
16095 *
16096 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16097 * command fails this function will return -ENXIO.
4f774513 16098 **/
a2fc4aef 16099int
4f774513
JS
16100lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16101{
16102 LPFC_MBOXQ_t *mbox;
16103 int rc, length, status = 0;
16104 uint32_t shdr_status, shdr_add_status;
16105 union lpfc_sli4_cfg_shdr *shdr;
16106
2e90f4b5 16107 /* sanity check on queue memory */
4f774513
JS
16108 if (!eq)
16109 return -ENODEV;
16110 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16111 if (!mbox)
16112 return -ENOMEM;
16113 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16114 sizeof(struct lpfc_sli4_cfg_mhdr));
16115 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16116 LPFC_MBOX_OPCODE_EQ_DESTROY,
16117 length, LPFC_SLI4_MBX_EMBED);
16118 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16119 eq->queue_id);
16120 mbox->vport = eq->phba->pport;
16121 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16122
16123 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16124 /* The IOCTL status is embedded in the mailbox subheader. */
16125 shdr = (union lpfc_sli4_cfg_shdr *)
16126 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16127 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16128 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16129 if (shdr_status || shdr_add_status || rc) {
16130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16131 "2505 EQ_DESTROY mailbox failed with "
16132 "status x%x add_status x%x, mbx status x%x\n",
16133 shdr_status, shdr_add_status, rc);
16134 status = -ENXIO;
16135 }
16136
16137 /* Remove eq from any list */
16138 list_del_init(&eq->list);
8fa38513 16139 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
16140 return status;
16141}
16142
16143/**
16144 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16145 * @cq: The queue structure associated with the queue to destroy.
16146 *
16147 * This function destroys a queue, as detailed in @cq by sending an mailbox
16148 * command, specific to the type of queue, to the HBA.
16149 *
16150 * The @cq struct is used to get the queue ID of the queue to destroy.
16151 *
16152 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16153 * command fails this function will return -ENXIO.
4f774513 16154 **/
a2fc4aef 16155int
4f774513
JS
16156lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16157{
16158 LPFC_MBOXQ_t *mbox;
16159 int rc, length, status = 0;
16160 uint32_t shdr_status, shdr_add_status;
16161 union lpfc_sli4_cfg_shdr *shdr;
16162
2e90f4b5 16163 /* sanity check on queue memory */
4f774513
JS
16164 if (!cq)
16165 return -ENODEV;
16166 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16167 if (!mbox)
16168 return -ENOMEM;
16169 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16170 sizeof(struct lpfc_sli4_cfg_mhdr));
16171 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16172 LPFC_MBOX_OPCODE_CQ_DESTROY,
16173 length, LPFC_SLI4_MBX_EMBED);
16174 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16175 cq->queue_id);
16176 mbox->vport = cq->phba->pport;
16177 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16178 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16179 /* The IOCTL status is embedded in the mailbox subheader. */
16180 shdr = (union lpfc_sli4_cfg_shdr *)
16181 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16182 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16183 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16184 if (shdr_status || shdr_add_status || rc) {
16185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16186 "2506 CQ_DESTROY mailbox failed with "
16187 "status x%x add_status x%x, mbx status x%x\n",
16188 shdr_status, shdr_add_status, rc);
16189 status = -ENXIO;
16190 }
16191 /* Remove cq from any list */
16192 list_del_init(&cq->list);
8fa38513 16193 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
16194 return status;
16195}
16196
04c68496
JS
16197/**
16198 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16199 * @qm: The queue structure associated with the queue to destroy.
16200 *
16201 * This function destroys a queue, as detailed in @mq by sending an mailbox
16202 * command, specific to the type of queue, to the HBA.
16203 *
16204 * The @mq struct is used to get the queue ID of the queue to destroy.
16205 *
16206 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16207 * command fails this function will return -ENXIO.
04c68496 16208 **/
a2fc4aef 16209int
04c68496
JS
16210lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16211{
16212 LPFC_MBOXQ_t *mbox;
16213 int rc, length, status = 0;
16214 uint32_t shdr_status, shdr_add_status;
16215 union lpfc_sli4_cfg_shdr *shdr;
16216
2e90f4b5 16217 /* sanity check on queue memory */
04c68496
JS
16218 if (!mq)
16219 return -ENODEV;
16220 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16221 if (!mbox)
16222 return -ENOMEM;
16223 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16224 sizeof(struct lpfc_sli4_cfg_mhdr));
16225 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16226 LPFC_MBOX_OPCODE_MQ_DESTROY,
16227 length, LPFC_SLI4_MBX_EMBED);
16228 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16229 mq->queue_id);
16230 mbox->vport = mq->phba->pport;
16231 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16232 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16233 /* The IOCTL status is embedded in the mailbox subheader. */
16234 shdr = (union lpfc_sli4_cfg_shdr *)
16235 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16238 if (shdr_status || shdr_add_status || rc) {
16239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16240 "2507 MQ_DESTROY mailbox failed with "
16241 "status x%x add_status x%x, mbx status x%x\n",
16242 shdr_status, shdr_add_status, rc);
16243 status = -ENXIO;
16244 }
16245 /* Remove mq from any list */
16246 list_del_init(&mq->list);
8fa38513 16247 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
16248 return status;
16249}
16250
4f774513
JS
16251/**
16252 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16253 * @wq: The queue structure associated with the queue to destroy.
16254 *
16255 * This function destroys a queue, as detailed in @wq by sending an mailbox
16256 * command, specific to the type of queue, to the HBA.
16257 *
16258 * The @wq struct is used to get the queue ID of the queue to destroy.
16259 *
16260 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16261 * command fails this function will return -ENXIO.
4f774513 16262 **/
a2fc4aef 16263int
4f774513
JS
16264lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16265{
16266 LPFC_MBOXQ_t *mbox;
16267 int rc, length, status = 0;
16268 uint32_t shdr_status, shdr_add_status;
16269 union lpfc_sli4_cfg_shdr *shdr;
16270
2e90f4b5 16271 /* sanity check on queue memory */
4f774513
JS
16272 if (!wq)
16273 return -ENODEV;
16274 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16275 if (!mbox)
16276 return -ENOMEM;
16277 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16278 sizeof(struct lpfc_sli4_cfg_mhdr));
16279 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16280 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16281 length, LPFC_SLI4_MBX_EMBED);
16282 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16283 wq->queue_id);
16284 mbox->vport = wq->phba->pport;
16285 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16286 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16287 shdr = (union lpfc_sli4_cfg_shdr *)
16288 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16289 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16290 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16291 if (shdr_status || shdr_add_status || rc) {
16292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16293 "2508 WQ_DESTROY mailbox failed with "
16294 "status x%x add_status x%x, mbx status x%x\n",
16295 shdr_status, shdr_add_status, rc);
16296 status = -ENXIO;
16297 }
16298 /* Remove wq from any list */
16299 list_del_init(&wq->list);
d1f525aa
JS
16300 kfree(wq->pring);
16301 wq->pring = NULL;
8fa38513 16302 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
16303 return status;
16304}
16305
16306/**
16307 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16308 * @rq: The queue structure associated with the queue to destroy.
16309 *
16310 * This function destroys a queue, as detailed in @rq by sending an mailbox
16311 * command, specific to the type of queue, to the HBA.
16312 *
16313 * The @rq struct is used to get the queue ID of the queue to destroy.
16314 *
16315 * On success this function will return a zero. If the queue destroy mailbox
d439d286 16316 * command fails this function will return -ENXIO.
4f774513 16317 **/
a2fc4aef 16318int
4f774513
JS
16319lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16320 struct lpfc_queue *drq)
16321{
16322 LPFC_MBOXQ_t *mbox;
16323 int rc, length, status = 0;
16324 uint32_t shdr_status, shdr_add_status;
16325 union lpfc_sli4_cfg_shdr *shdr;
16326
2e90f4b5 16327 /* sanity check on queue memory */
4f774513
JS
16328 if (!hrq || !drq)
16329 return -ENODEV;
16330 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16331 if (!mbox)
16332 return -ENOMEM;
16333 length = (sizeof(struct lpfc_mbx_rq_destroy) -
fedd3b7b 16334 sizeof(struct lpfc_sli4_cfg_mhdr));
4f774513
JS
16335 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16336 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16337 length, LPFC_SLI4_MBX_EMBED);
16338 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16339 hrq->queue_id);
16340 mbox->vport = hrq->phba->pport;
16341 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16342 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16343 /* The IOCTL status is embedded in the mailbox subheader. */
16344 shdr = (union lpfc_sli4_cfg_shdr *)
16345 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16346 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16347 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16348 if (shdr_status || shdr_add_status || rc) {
16349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16350 "2509 RQ_DESTROY mailbox failed with "
16351 "status x%x add_status x%x, mbx status x%x\n",
16352 shdr_status, shdr_add_status, rc);
16353 if (rc != MBX_TIMEOUT)
16354 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16355 return -ENXIO;
16356 }
16357 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16358 drq->queue_id);
16359 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16360 shdr = (union lpfc_sli4_cfg_shdr *)
16361 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16362 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16363 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16364 if (shdr_status || shdr_add_status || rc) {
16365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16366 "2510 RQ_DESTROY mailbox failed with "
16367 "status x%x add_status x%x, mbx status x%x\n",
16368 shdr_status, shdr_add_status, rc);
16369 status = -ENXIO;
16370 }
16371 list_del_init(&hrq->list);
16372 list_del_init(&drq->list);
8fa38513 16373 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
16374 return status;
16375}
16376
16377/**
16378 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16379 * @phba: The virtual port for which this call being executed.
16380 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16381 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16382 * @xritag: the xritag that ties this io to the SGL pages.
16383 *
16384 * This routine will post the sgl pages for the IO that has the xritag
16385 * that is in the iocbq structure. The xritag is assigned during iocbq
16386 * creation and persists for as long as the driver is loaded.
16387 * if the caller has fewer than 256 scatter gather segments to map then
16388 * pdma_phys_addr1 should be 0.
16389 * If the caller needs to map more than 256 scatter gather segment then
16390 * pdma_phys_addr1 should be a valid physical address.
16391 * physical address for SGLs must be 64 byte aligned.
16392 * If you are going to map 2 SGL's then the first one must have 256 entries
16393 * the second sgl can have between 1 and 256 entries.
16394 *
16395 * Return codes:
16396 * 0 - Success
16397 * -ENXIO, -ENOMEM - Failure
16398 **/
16399int
16400lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16401 dma_addr_t pdma_phys_addr0,
16402 dma_addr_t pdma_phys_addr1,
16403 uint16_t xritag)
16404{
16405 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16406 LPFC_MBOXQ_t *mbox;
16407 int rc;
16408 uint32_t shdr_status, shdr_add_status;
6d368e53 16409 uint32_t mbox_tmo;
4f774513
JS
16410 union lpfc_sli4_cfg_shdr *shdr;
16411
16412 if (xritag == NO_XRI) {
16413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16414 "0364 Invalid param:\n");
16415 return -EINVAL;
16416 }
16417
16418 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16419 if (!mbox)
16420 return -ENOMEM;
16421
16422 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16423 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16424 sizeof(struct lpfc_mbx_post_sgl_pages) -
fedd3b7b 16425 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
4f774513
JS
16426
16427 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16428 &mbox->u.mqe.un.post_sgl_pages;
16429 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16430 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16431
16432 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16433 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16434 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16435 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16436
16437 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16438 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16439 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16440 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16441 if (!phba->sli4_hba.intr_enable)
16442 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6d368e53 16443 else {
a183a15f 16444 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6d368e53
JS
16445 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16446 }
4f774513
JS
16447 /* The IOCTL status is embedded in the mailbox subheader. */
16448 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16449 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16451 if (rc != MBX_TIMEOUT)
16452 mempool_free(mbox, phba->mbox_mem_pool);
16453 if (shdr_status || shdr_add_status || rc) {
16454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16455 "2511 POST_SGL mailbox failed with "
16456 "status x%x add_status x%x, mbx status x%x\n",
16457 shdr_status, shdr_add_status, rc);
4f774513
JS
16458 }
16459 return 0;
16460}
4f774513 16461
6d368e53 16462/**
88a2cfbb 16463 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
6d368e53
JS
16464 * @phba: pointer to lpfc hba data structure.
16465 *
16466 * This routine is invoked to post rpi header templates to the
88a2cfbb
JS
16467 * HBA consistent with the SLI-4 interface spec. This routine
16468 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16469 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6d368e53 16470 *
88a2cfbb
JS
16471 * Returns
16472 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16473 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16474 **/
5d8b8167 16475static uint16_t
6d368e53
JS
16476lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16477{
16478 unsigned long xri;
16479
16480 /*
16481 * Fetch the next logical xri. Because this index is logical,
16482 * the driver starts at 0 each time.
16483 */
16484 spin_lock_irq(&phba->hbalock);
16485 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16486 phba->sli4_hba.max_cfg_param.max_xri, 0);
16487 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16488 spin_unlock_irq(&phba->hbalock);
16489 return NO_XRI;
16490 } else {
16491 set_bit(xri, phba->sli4_hba.xri_bmask);
16492 phba->sli4_hba.max_cfg_param.xri_used++;
6d368e53 16493 }
6d368e53
JS
16494 spin_unlock_irq(&phba->hbalock);
16495 return xri;
16496}
16497
16498/**
16499 * lpfc_sli4_free_xri - Release an xri for reuse.
16500 * @phba: pointer to lpfc hba data structure.
16501 *
16502 * This routine is invoked to release an xri to the pool of
16503 * available rpis maintained by the driver.
16504 **/
5d8b8167 16505static void
6d368e53
JS
16506__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16507{
16508 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
6d368e53
JS
16509 phba->sli4_hba.max_cfg_param.xri_used--;
16510 }
16511}
16512
16513/**
16514 * lpfc_sli4_free_xri - Release an xri for reuse.
16515 * @phba: pointer to lpfc hba data structure.
16516 *
16517 * This routine is invoked to release an xri to the pool of
16518 * available rpis maintained by the driver.
16519 **/
16520void
16521lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16522{
16523 spin_lock_irq(&phba->hbalock);
16524 __lpfc_sli4_free_xri(phba, xri);
16525 spin_unlock_irq(&phba->hbalock);
16526}
16527
4f774513
JS
16528/**
16529 * lpfc_sli4_next_xritag - Get an xritag for the io
16530 * @phba: Pointer to HBA context object.
16531 *
16532 * This function gets an xritag for the iocb. If there is no unused xritag
16533 * it will return 0xffff.
16534 * The function returns the allocated xritag if successful, else returns zero.
16535 * Zero is not a valid xritag.
16536 * The caller is not required to hold any lock.
16537 **/
16538uint16_t
16539lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16540{
6d368e53 16541 uint16_t xri_index;
4f774513 16542
6d368e53 16543 xri_index = lpfc_sli4_alloc_xri(phba);
81378052
JS
16544 if (xri_index == NO_XRI)
16545 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16546 "2004 Failed to allocate XRI.last XRITAG is %d"
16547 " Max XRI is %d, Used XRI is %d\n",
16548 xri_index,
16549 phba->sli4_hba.max_cfg_param.max_xri,
16550 phba->sli4_hba.max_cfg_param.xri_used);
16551 return xri_index;
4f774513
JS
16552}
16553
16554/**
895427bd 16555 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
4f774513 16556 * @phba: pointer to lpfc hba data structure.
8a9d2e80
JS
16557 * @post_sgl_list: pointer to els sgl entry list.
16558 * @count: number of els sgl entries on the list.
4f774513
JS
16559 *
16560 * This routine is invoked to post a block of driver's sgl pages to the
16561 * HBA using non-embedded mailbox command. No Lock is held. This routine
16562 * is only called when the driver is loading and after all IO has been
16563 * stopped.
16564 **/
8a9d2e80 16565static int
895427bd 16566lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
8a9d2e80
JS
16567 struct list_head *post_sgl_list,
16568 int post_cnt)
4f774513 16569{
8a9d2e80 16570 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4f774513
JS
16571 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16572 struct sgl_page_pairs *sgl_pg_pairs;
16573 void *viraddr;
16574 LPFC_MBOXQ_t *mbox;
16575 uint32_t reqlen, alloclen, pg_pairs;
16576 uint32_t mbox_tmo;
8a9d2e80
JS
16577 uint16_t xritag_start = 0;
16578 int rc = 0;
4f774513
JS
16579 uint32_t shdr_status, shdr_add_status;
16580 union lpfc_sli4_cfg_shdr *shdr;
16581
895427bd 16582 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
4f774513 16583 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16584 if (reqlen > SLI4_PAGE_SIZE) {
895427bd 16585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4f774513
JS
16586 "2559 Block sgl registration required DMA "
16587 "size (%d) great than a page\n", reqlen);
16588 return -ENOMEM;
16589 }
895427bd 16590
4f774513 16591 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6d368e53 16592 if (!mbox)
4f774513 16593 return -ENOMEM;
4f774513
JS
16594
16595 /* Allocate DMA memory and set up the non-embedded mailbox command */
16596 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16597 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16598 LPFC_SLI4_MBX_NEMBED);
16599
16600 if (alloclen < reqlen) {
16601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16602 "0285 Allocated DMA memory size (%d) is "
16603 "less than the requested DMA memory "
16604 "size (%d)\n", alloclen, reqlen);
16605 lpfc_sli4_mbox_cmd_free(phba, mbox);
16606 return -ENOMEM;
16607 }
4f774513 16608 /* Set up the SGL pages in the non-embedded DMA pages */
6d368e53 16609 viraddr = mbox->sge_array->addr[0];
4f774513
JS
16610 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16611 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16612
8a9d2e80
JS
16613 pg_pairs = 0;
16614 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
4f774513
JS
16615 /* Set up the sge entry */
16616 sgl_pg_pairs->sgl_pg0_addr_lo =
16617 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16618 sgl_pg_pairs->sgl_pg0_addr_hi =
16619 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16620 sgl_pg_pairs->sgl_pg1_addr_lo =
16621 cpu_to_le32(putPaddrLow(0));
16622 sgl_pg_pairs->sgl_pg1_addr_hi =
16623 cpu_to_le32(putPaddrHigh(0));
6d368e53 16624
4f774513
JS
16625 /* Keep the first xritag on the list */
16626 if (pg_pairs == 0)
16627 xritag_start = sglq_entry->sli4_xritag;
16628 sgl_pg_pairs++;
8a9d2e80 16629 pg_pairs++;
4f774513 16630 }
6d368e53
JS
16631
16632 /* Complete initialization and perform endian conversion. */
4f774513 16633 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
895427bd 16634 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
4f774513 16635 sgl->word0 = cpu_to_le32(sgl->word0);
895427bd 16636
4f774513
JS
16637 if (!phba->sli4_hba.intr_enable)
16638 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16639 else {
a183a15f 16640 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16641 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16642 }
16643 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16644 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16645 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16646 if (rc != MBX_TIMEOUT)
16647 lpfc_sli4_mbox_cmd_free(phba, mbox);
16648 if (shdr_status || shdr_add_status || rc) {
16649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16650 "2513 POST_SGL_BLOCK mailbox command failed "
16651 "status x%x add_status x%x mbx status x%x\n",
16652 shdr_status, shdr_add_status, rc);
16653 rc = -ENXIO;
16654 }
16655 return rc;
16656}
16657
16658/**
0794d601 16659 * lpfc_sli4_post_common_sgl_block - post a block of nvme sgl list to firmware
4f774513 16660 * @phba: pointer to lpfc hba data structure.
0794d601 16661 * @nblist: pointer to nvme buffer list.
4f774513
JS
16662 * @count: number of scsi buffers on the list.
16663 *
16664 * This routine is invoked to post a block of @count scsi sgl pages from a
0794d601 16665 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
4f774513
JS
16666 * No Lock is held.
16667 *
16668 **/
0794d601
JS
16669static int
16670lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba,
16671 struct list_head *nblist,
16672 int count)
4f774513 16673{
0794d601 16674 struct lpfc_nvme_buf *lpfc_ncmd;
4f774513
JS
16675 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16676 struct sgl_page_pairs *sgl_pg_pairs;
16677 void *viraddr;
16678 LPFC_MBOXQ_t *mbox;
16679 uint32_t reqlen, alloclen, pg_pairs;
16680 uint32_t mbox_tmo;
16681 uint16_t xritag_start = 0;
16682 int rc = 0;
16683 uint32_t shdr_status, shdr_add_status;
16684 dma_addr_t pdma_phys_bpl1;
16685 union lpfc_sli4_cfg_shdr *shdr;
16686
16687 /* Calculate the requested length of the dma memory */
8a9d2e80 16688 reqlen = count * sizeof(struct sgl_page_pairs) +
4f774513 16689 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
49198b37 16690 if (reqlen > SLI4_PAGE_SIZE) {
4f774513 16691 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0794d601 16692 "6118 Block sgl registration required DMA "
4f774513
JS
16693 "size (%d) great than a page\n", reqlen);
16694 return -ENOMEM;
16695 }
16696 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16697 if (!mbox) {
16698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16699 "6119 Failed to allocate mbox cmd memory\n");
4f774513
JS
16700 return -ENOMEM;
16701 }
16702
16703 /* Allocate DMA memory and set up the non-embedded mailbox command */
16704 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
0794d601
JS
16705 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16706 reqlen, LPFC_SLI4_MBX_NEMBED);
4f774513
JS
16707
16708 if (alloclen < reqlen) {
16709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0794d601 16710 "6120 Allocated DMA memory size (%d) is "
4f774513
JS
16711 "less than the requested DMA memory "
16712 "size (%d)\n", alloclen, reqlen);
16713 lpfc_sli4_mbox_cmd_free(phba, mbox);
16714 return -ENOMEM;
16715 }
6d368e53 16716
4f774513 16717 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
16718 viraddr = mbox->sge_array->addr[0];
16719
16720 /* Set up the SGL pages in the non-embedded DMA pages */
16721 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16722 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16723
16724 pg_pairs = 0;
0794d601 16725 list_for_each_entry(lpfc_ncmd, nblist, list) {
4f774513
JS
16726 /* Set up the sge entry */
16727 sgl_pg_pairs->sgl_pg0_addr_lo =
0794d601 16728 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
4f774513 16729 sgl_pg_pairs->sgl_pg0_addr_hi =
0794d601 16730 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
4f774513 16731 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
0794d601
JS
16732 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16733 SGL_PAGE_SIZE;
4f774513
JS
16734 else
16735 pdma_phys_bpl1 = 0;
16736 sgl_pg_pairs->sgl_pg1_addr_lo =
16737 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16738 sgl_pg_pairs->sgl_pg1_addr_hi =
16739 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16740 /* Keep the first xritag on the list */
16741 if (pg_pairs == 0)
0794d601 16742 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
4f774513
JS
16743 sgl_pg_pairs++;
16744 pg_pairs++;
16745 }
16746 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16747 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16748 /* Perform endian conversion if necessary */
16749 sgl->word0 = cpu_to_le32(sgl->word0);
16750
0794d601 16751 if (!phba->sli4_hba.intr_enable) {
4f774513 16752 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
0794d601 16753 } else {
a183a15f 16754 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4f774513
JS
16755 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16756 }
0794d601 16757 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
4f774513
JS
16758 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16759 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16760 if (rc != MBX_TIMEOUT)
16761 lpfc_sli4_mbox_cmd_free(phba, mbox);
16762 if (shdr_status || shdr_add_status || rc) {
16763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
0794d601 16764 "6125 POST_SGL_BLOCK mailbox command failed "
4f774513
JS
16765 "status x%x add_status x%x mbx status x%x\n",
16766 shdr_status, shdr_add_status, rc);
16767 rc = -ENXIO;
16768 }
16769 return rc;
16770}
16771
0794d601
JS
16772/**
16773 * lpfc_sli4_post_common_sgl_list - Post blocks of nvme buffer sgls from a list
16774 * @phba: pointer to lpfc hba data structure.
16775 * @post_nblist: pointer to the nvme buffer list.
16776 *
16777 * This routine walks a list of nvme buffers that was passed in. It attempts
16778 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16779 * uses the non-embedded SGL block post mailbox commands to post to the port.
16780 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16781 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16782 * must be local list, thus no lock is needed when manipulate the list.
16783 *
16784 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16785 **/
16786int
16787lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
16788 struct list_head *post_nblist, int sb_count)
16789{
16790 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
16791 int status, sgl_size;
16792 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16793 dma_addr_t pdma_phys_sgl1;
16794 int last_xritag = NO_XRI;
16795 int cur_xritag;
16796 unsigned long iflag;
16797 LIST_HEAD(prep_nblist);
16798 LIST_HEAD(blck_nblist);
16799 LIST_HEAD(nvme_nblist);
16800
16801 /* sanity check */
16802 if (sb_count <= 0)
16803 return -EINVAL;
16804
16805 sgl_size = phba->cfg_sg_dma_buf_size;
16806 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16807 list_del_init(&lpfc_ncmd->list);
16808 block_cnt++;
16809 if ((last_xritag != NO_XRI) &&
16810 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16811 /* a hole in xri block, form a sgl posting block */
16812 list_splice_init(&prep_nblist, &blck_nblist);
16813 post_cnt = block_cnt - 1;
16814 /* prepare list for next posting block */
16815 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16816 block_cnt = 1;
16817 } else {
16818 /* prepare list for next posting block */
16819 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16820 /* enough sgls for non-embed sgl mbox command */
16821 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16822 list_splice_init(&prep_nblist, &blck_nblist);
16823 post_cnt = block_cnt;
16824 block_cnt = 0;
16825 }
16826 }
16827 num_posting++;
16828 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16829
16830 /* end of repost sgl list condition for NVME buffers */
16831 if (num_posting == sb_count) {
16832 if (post_cnt == 0) {
16833 /* last sgl posting block */
16834 list_splice_init(&prep_nblist, &blck_nblist);
16835 post_cnt = block_cnt;
16836 } else if (block_cnt == 1) {
16837 /* last single sgl with non-contiguous xri */
16838 if (sgl_size > SGL_PAGE_SIZE)
16839 pdma_phys_sgl1 =
16840 lpfc_ncmd->dma_phys_sgl +
16841 SGL_PAGE_SIZE;
16842 else
16843 pdma_phys_sgl1 = 0;
16844 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16845 status = lpfc_sli4_post_sgl(
16846 phba, lpfc_ncmd->dma_phys_sgl,
16847 pdma_phys_sgl1, cur_xritag);
16848 if (status) {
16849 /* failure, put on abort nvme list */
16850 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
16851 } else {
16852 /* success, put on NVME buffer list */
16853 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
16854 lpfc_ncmd->status = IOSTAT_SUCCESS;
16855 num_posted++;
16856 }
16857 /* success, put on NVME buffer sgl list */
16858 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16859 }
16860 }
16861
16862 /* continue until a nembed page worth of sgls */
16863 if (post_cnt == 0)
16864 continue;
16865
16866 /* post block of NVME buffer list sgls */
16867 status = lpfc_sli4_post_common_sgl_block(phba, &blck_nblist,
16868 post_cnt);
16869
16870 /* don't reset xirtag due to hole in xri block */
16871 if (block_cnt == 0)
16872 last_xritag = NO_XRI;
16873
16874 /* reset NVME buffer post count for next round of posting */
16875 post_cnt = 0;
16876
16877 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16878 while (!list_empty(&blck_nblist)) {
16879 list_remove_head(&blck_nblist, lpfc_ncmd,
16880 struct lpfc_nvme_buf, list);
16881 if (status) {
16882 /* failure, put on abort nvme list */
16883 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
16884 } else {
16885 /* success, put on NVME buffer list */
16886 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
16887 lpfc_ncmd->status = IOSTAT_SUCCESS;
16888 num_posted++;
16889 }
16890 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16891 }
16892 }
16893 /* Push NVME buffers with sgl posted to the available list */
16894 while (!list_empty(&nvme_nblist)) {
16895 list_remove_head(&nvme_nblist, lpfc_ncmd,
16896 struct lpfc_nvme_buf, list);
16897 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
16898 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
16899 spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
16900 list_add_tail(&lpfc_ncmd->list,
16901 &phba->lpfc_common_buf_list_put);
16902 phba->put_common_bufs++;
16903 spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
16904 }
16905 return num_posted;
16906}
16907
4f774513
JS
16908/**
16909 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16910 * @phba: pointer to lpfc_hba struct that the frame was received on
16911 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16912 *
16913 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16914 * valid type of frame that the LPFC driver will handle. This function will
16915 * return a zero if the frame is a valid frame or a non zero value when the
16916 * frame does not pass the check.
16917 **/
16918static int
16919lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16920{
474ffb74 16921 /* make rctl_names static to save stack space */
4f774513 16922 struct fc_vft_header *fc_vft_hdr;
546fc854 16923 uint32_t *header = (uint32_t *) fc_hdr;
4f774513
JS
16924
16925 switch (fc_hdr->fh_r_ctl) {
16926 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16927 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16928 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16929 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16930 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16931 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16932 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16933 case FC_RCTL_DD_CMD_STATUS: /* command status */
16934 case FC_RCTL_ELS_REQ: /* extended link services request */
16935 case FC_RCTL_ELS_REP: /* extended link services reply */
16936 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16937 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16938 case FC_RCTL_BA_NOP: /* basic link service NOP */
16939 case FC_RCTL_BA_ABTS: /* basic link service abort */
16940 case FC_RCTL_BA_RMC: /* remove connection */
16941 case FC_RCTL_BA_ACC: /* basic accept */
16942 case FC_RCTL_BA_RJT: /* basic reject */
16943 case FC_RCTL_BA_PRMT:
16944 case FC_RCTL_ACK_1: /* acknowledge_1 */
16945 case FC_RCTL_ACK_0: /* acknowledge_0 */
16946 case FC_RCTL_P_RJT: /* port reject */
16947 case FC_RCTL_F_RJT: /* fabric reject */
16948 case FC_RCTL_P_BSY: /* port busy */
16949 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16950 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16951 case FC_RCTL_LCR: /* link credit reset */
ae9e28f3 16952 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
4f774513
JS
16953 case FC_RCTL_END: /* end */
16954 break;
16955 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16956 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16957 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16958 return lpfc_fc_frame_check(phba, fc_hdr);
16959 default:
16960 goto drop;
16961 }
ae9e28f3 16962
4f774513
JS
16963 switch (fc_hdr->fh_type) {
16964 case FC_TYPE_BLS:
16965 case FC_TYPE_ELS:
16966 case FC_TYPE_FCP:
16967 case FC_TYPE_CT:
895427bd 16968 case FC_TYPE_NVME:
4f774513
JS
16969 break;
16970 case FC_TYPE_IP:
16971 case FC_TYPE_ILS:
16972 default:
16973 goto drop;
16974 }
546fc854 16975
4f774513 16976 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
78e1d200 16977 "2538 Received frame rctl:x%x, type:x%x, "
88f43a08 16978 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
78e1d200
JS
16979 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16980 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16981 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16982 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16983 be32_to_cpu(header[6]));
4f774513
JS
16984 return 0;
16985drop:
16986 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
78e1d200
JS
16987 "2539 Dropped frame rctl:x%x type:x%x\n",
16988 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
4f774513
JS
16989 return 1;
16990}
16991
16992/**
16993 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16994 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16995 *
16996 * This function processes the FC header to retrieve the VFI from the VF
16997 * header, if one exists. This function will return the VFI if one exists
16998 * or 0 if no VSAN Header exists.
16999 **/
17000static uint32_t
17001lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17002{
17003 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17004
17005 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17006 return 0;
17007 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17008}
17009
17010/**
17011 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17012 * @phba: Pointer to the HBA structure to search for the vport on
17013 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17014 * @fcfi: The FC Fabric ID that the frame came from
17015 *
17016 * This function searches the @phba for a vport that matches the content of the
17017 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17018 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17019 * returns the matching vport pointer or NULL if unable to match frame to a
17020 * vport.
17021 **/
17022static struct lpfc_vport *
17023lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
895427bd 17024 uint16_t fcfi, uint32_t did)
4f774513
JS
17025{
17026 struct lpfc_vport **vports;
17027 struct lpfc_vport *vport = NULL;
17028 int i;
939723a4 17029
bf08611b
JS
17030 if (did == Fabric_DID)
17031 return phba->pport;
939723a4
JS
17032 if ((phba->pport->fc_flag & FC_PT2PT) &&
17033 !(phba->link_state == LPFC_HBA_READY))
17034 return phba->pport;
17035
4f774513 17036 vports = lpfc_create_vport_work_array(phba);
895427bd 17037 if (vports != NULL) {
4f774513
JS
17038 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17039 if (phba->fcf.fcfi == fcfi &&
17040 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17041 vports[i]->fc_myDID == did) {
17042 vport = vports[i];
17043 break;
17044 }
17045 }
895427bd 17046 }
4f774513
JS
17047 lpfc_destroy_vport_work_array(phba, vports);
17048 return vport;
17049}
17050
45ed1190
JS
17051/**
17052 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17053 * @vport: The vport to work on.
17054 *
17055 * This function updates the receive sequence time stamp for this vport. The
17056 * receive sequence time stamp indicates the time that the last frame of the
17057 * the sequence that has been idle for the longest amount of time was received.
17058 * the driver uses this time stamp to indicate if any received sequences have
17059 * timed out.
17060 **/
5d8b8167 17061static void
45ed1190
JS
17062lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17063{
17064 struct lpfc_dmabuf *h_buf;
17065 struct hbq_dmabuf *dmabuf = NULL;
17066
17067 /* get the oldest sequence on the rcv list */
17068 h_buf = list_get_first(&vport->rcv_buffer_list,
17069 struct lpfc_dmabuf, list);
17070 if (!h_buf)
17071 return;
17072 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17073 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17074}
17075
17076/**
17077 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17078 * @vport: The vport that the received sequences were sent to.
17079 *
17080 * This function cleans up all outstanding received sequences. This is called
17081 * by the driver when a link event or user action invalidates all the received
17082 * sequences.
17083 **/
17084void
17085lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17086{
17087 struct lpfc_dmabuf *h_buf, *hnext;
17088 struct lpfc_dmabuf *d_buf, *dnext;
17089 struct hbq_dmabuf *dmabuf = NULL;
17090
17091 /* start with the oldest sequence on the rcv list */
17092 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17093 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17094 list_del_init(&dmabuf->hbuf.list);
17095 list_for_each_entry_safe(d_buf, dnext,
17096 &dmabuf->dbuf.list, list) {
17097 list_del_init(&d_buf->list);
17098 lpfc_in_buf_free(vport->phba, d_buf);
17099 }
17100 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17101 }
17102}
17103
17104/**
17105 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17106 * @vport: The vport that the received sequences were sent to.
17107 *
17108 * This function determines whether any received sequences have timed out by
17109 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17110 * indicates that there is at least one timed out sequence this routine will
17111 * go through the received sequences one at a time from most inactive to most
17112 * active to determine which ones need to be cleaned up. Once it has determined
17113 * that a sequence needs to be cleaned up it will simply free up the resources
17114 * without sending an abort.
17115 **/
17116void
17117lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17118{
17119 struct lpfc_dmabuf *h_buf, *hnext;
17120 struct lpfc_dmabuf *d_buf, *dnext;
17121 struct hbq_dmabuf *dmabuf = NULL;
17122 unsigned long timeout;
17123 int abort_count = 0;
17124
17125 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17126 vport->rcv_buffer_time_stamp);
17127 if (list_empty(&vport->rcv_buffer_list) ||
17128 time_before(jiffies, timeout))
17129 return;
17130 /* start with the oldest sequence on the rcv list */
17131 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17132 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17133 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17134 dmabuf->time_stamp);
17135 if (time_before(jiffies, timeout))
17136 break;
17137 abort_count++;
17138 list_del_init(&dmabuf->hbuf.list);
17139 list_for_each_entry_safe(d_buf, dnext,
17140 &dmabuf->dbuf.list, list) {
17141 list_del_init(&d_buf->list);
17142 lpfc_in_buf_free(vport->phba, d_buf);
17143 }
17144 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17145 }
17146 if (abort_count)
17147 lpfc_update_rcv_time_stamp(vport);
17148}
17149
4f774513
JS
17150/**
17151 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17152 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17153 *
17154 * This function searches through the existing incomplete sequences that have
17155 * been sent to this @vport. If the frame matches one of the incomplete
17156 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17157 * make up that sequence. If no sequence is found that matches this frame then
17158 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17159 * This function returns a pointer to the first dmabuf in the sequence list that
17160 * the frame was linked to.
17161 **/
17162static struct hbq_dmabuf *
17163lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17164{
17165 struct fc_frame_header *new_hdr;
17166 struct fc_frame_header *temp_hdr;
17167 struct lpfc_dmabuf *d_buf;
17168 struct lpfc_dmabuf *h_buf;
17169 struct hbq_dmabuf *seq_dmabuf = NULL;
17170 struct hbq_dmabuf *temp_dmabuf = NULL;
4360ca9c 17171 uint8_t found = 0;
4f774513 17172
4d9ab994 17173 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 17174 dmabuf->time_stamp = jiffies;
4f774513 17175 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
4360ca9c 17176
4f774513
JS
17177 /* Use the hdr_buf to find the sequence that this frame belongs to */
17178 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17179 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17180 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17181 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17182 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17183 continue;
17184 /* found a pending sequence that matches this frame */
17185 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17186 break;
17187 }
17188 if (!seq_dmabuf) {
17189 /*
17190 * This indicates first frame received for this sequence.
17191 * Queue the buffer on the vport's rcv_buffer_list.
17192 */
17193 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 17194 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17195 return dmabuf;
17196 }
17197 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
17198 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17199 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
17200 list_del_init(&seq_dmabuf->hbuf.list);
17201 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17202 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 17203 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
17204 return dmabuf;
17205 }
45ed1190
JS
17206 /* move this sequence to the tail to indicate a young sequence */
17207 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17208 seq_dmabuf->time_stamp = jiffies;
17209 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
17210 if (list_empty(&seq_dmabuf->dbuf.list)) {
17211 temp_hdr = dmabuf->hbuf.virt;
17212 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17213 return seq_dmabuf;
17214 }
4f774513 17215 /* find the correct place in the sequence to insert this frame */
4360ca9c
JS
17216 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17217 while (!found) {
4f774513
JS
17218 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17219 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17220 /*
17221 * If the frame's sequence count is greater than the frame on
17222 * the list then insert the frame right after this frame
17223 */
eeead811
JS
17224 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17225 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513 17226 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
4360ca9c
JS
17227 found = 1;
17228 break;
4f774513 17229 }
4360ca9c
JS
17230
17231 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17232 break;
17233 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
4f774513 17234 }
4360ca9c
JS
17235
17236 if (found)
17237 return seq_dmabuf;
4f774513
JS
17238 return NULL;
17239}
17240
6669f9bb
JS
17241/**
17242 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17243 * @vport: pointer to a vitural port
17244 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17245 *
17246 * This function tries to abort from the partially assembed sequence, described
17247 * by the information from basic abbort @dmabuf. It checks to see whether such
17248 * partially assembled sequence held by the driver. If so, it shall free up all
17249 * the frames from the partially assembled sequence.
17250 *
17251 * Return
17252 * true -- if there is matching partially assembled sequence present and all
17253 * the frames freed with the sequence;
17254 * false -- if there is no matching partially assembled sequence present so
17255 * nothing got aborted in the lower layer driver
17256 **/
17257static bool
17258lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17259 struct hbq_dmabuf *dmabuf)
17260{
17261 struct fc_frame_header *new_hdr;
17262 struct fc_frame_header *temp_hdr;
17263 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17264 struct hbq_dmabuf *seq_dmabuf = NULL;
17265
17266 /* Use the hdr_buf to find the sequence that matches this frame */
17267 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17268 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17269 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17270 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17271 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17272 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17273 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17274 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17275 continue;
17276 /* found a pending sequence that matches this frame */
17277 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17278 break;
17279 }
17280
17281 /* Free up all the frames from the partially assembled sequence */
17282 if (seq_dmabuf) {
17283 list_for_each_entry_safe(d_buf, n_buf,
17284 &seq_dmabuf->dbuf.list, list) {
17285 list_del_init(&d_buf->list);
17286 lpfc_in_buf_free(vport->phba, d_buf);
17287 }
17288 return true;
17289 }
17290 return false;
17291}
17292
6dd9e31c
JS
17293/**
17294 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17295 * @vport: pointer to a vitural port
17296 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17297 *
17298 * This function tries to abort from the assembed sequence from upper level
17299 * protocol, described by the information from basic abbort @dmabuf. It
17300 * checks to see whether such pending context exists at upper level protocol.
17301 * If so, it shall clean up the pending context.
17302 *
17303 * Return
17304 * true -- if there is matching pending context of the sequence cleaned
17305 * at ulp;
17306 * false -- if there is no matching pending context of the sequence present
17307 * at ulp.
17308 **/
17309static bool
17310lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17311{
17312 struct lpfc_hba *phba = vport->phba;
17313 int handled;
17314
17315 /* Accepting abort at ulp with SLI4 only */
17316 if (phba->sli_rev < LPFC_SLI_REV4)
17317 return false;
17318
17319 /* Register all caring upper level protocols to attend abort */
17320 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17321 if (handled)
17322 return true;
17323
17324 return false;
17325}
17326
6669f9bb 17327/**
546fc854 17328 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
6669f9bb
JS
17329 * @phba: Pointer to HBA context object.
17330 * @cmd_iocbq: pointer to the command iocbq structure.
17331 * @rsp_iocbq: pointer to the response iocbq structure.
17332 *
546fc854 17333 * This function handles the sequence abort response iocb command complete
6669f9bb
JS
17334 * event. It properly releases the memory allocated to the sequence abort
17335 * accept iocb.
17336 **/
17337static void
546fc854 17338lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
6669f9bb
JS
17339 struct lpfc_iocbq *cmd_iocbq,
17340 struct lpfc_iocbq *rsp_iocbq)
17341{
6dd9e31c
JS
17342 struct lpfc_nodelist *ndlp;
17343
17344 if (cmd_iocbq) {
17345 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17346 lpfc_nlp_put(ndlp);
17347 lpfc_nlp_not_used(ndlp);
6669f9bb 17348 lpfc_sli_release_iocbq(phba, cmd_iocbq);
6dd9e31c 17349 }
6b5151fd
JS
17350
17351 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17352 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17353 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17354 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17355 rsp_iocbq->iocb.ulpStatus,
17356 rsp_iocbq->iocb.un.ulpWord[4]);
6669f9bb
JS
17357}
17358
6d368e53
JS
17359/**
17360 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17361 * @phba: Pointer to HBA context object.
17362 * @xri: xri id in transaction.
17363 *
17364 * This function validates the xri maps to the known range of XRIs allocated an
17365 * used by the driver.
17366 **/
7851fe2c 17367uint16_t
6d368e53
JS
17368lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17369 uint16_t xri)
17370{
a2fc4aef 17371 uint16_t i;
6d368e53
JS
17372
17373 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17374 if (xri == phba->sli4_hba.xri_ids[i])
17375 return i;
17376 }
17377 return NO_XRI;
17378}
17379
6669f9bb 17380/**
546fc854 17381 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
6669f9bb
JS
17382 * @phba: Pointer to HBA context object.
17383 * @fc_hdr: pointer to a FC frame header.
17384 *
546fc854 17385 * This function sends a basic response to a previous unsol sequence abort
6669f9bb
JS
17386 * event after aborting the sequence handling.
17387 **/
86c67379 17388void
6dd9e31c
JS
17389lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17390 struct fc_frame_header *fc_hdr, bool aborted)
6669f9bb 17391{
6dd9e31c 17392 struct lpfc_hba *phba = vport->phba;
6669f9bb
JS
17393 struct lpfc_iocbq *ctiocb = NULL;
17394 struct lpfc_nodelist *ndlp;
ee0f4fe1 17395 uint16_t oxid, rxid, xri, lxri;
5ffc266e 17396 uint32_t sid, fctl;
6669f9bb 17397 IOCB_t *icmd;
546fc854 17398 int rc;
6669f9bb
JS
17399
17400 if (!lpfc_is_link_up(phba))
17401 return;
17402
17403 sid = sli4_sid_from_fc_hdr(fc_hdr);
17404 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 17405 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb 17406
6dd9e31c 17407 ndlp = lpfc_findnode_did(vport, sid);
6669f9bb 17408 if (!ndlp) {
9d3d340d 17409 ndlp = lpfc_nlp_init(vport, sid);
6dd9e31c
JS
17410 if (!ndlp) {
17411 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17412 "1268 Failed to allocate ndlp for "
17413 "oxid:x%x SID:x%x\n", oxid, sid);
17414 return;
17415 }
6dd9e31c
JS
17416 /* Put ndlp onto pport node list */
17417 lpfc_enqueue_node(vport, ndlp);
17418 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17419 /* re-setup ndlp without removing from node list */
17420 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17421 if (!ndlp) {
17422 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17423 "3275 Failed to active ndlp found "
17424 "for oxid:x%x SID:x%x\n", oxid, sid);
17425 return;
17426 }
6669f9bb
JS
17427 }
17428
546fc854 17429 /* Allocate buffer for rsp iocb */
6669f9bb
JS
17430 ctiocb = lpfc_sli_get_iocbq(phba);
17431 if (!ctiocb)
17432 return;
17433
5ffc266e
JS
17434 /* Extract the F_CTL field from FC_HDR */
17435 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17436
6669f9bb 17437 icmd = &ctiocb->iocb;
6669f9bb 17438 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 17439 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
17440 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17441 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17442 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17443
17444 /* Fill in the rest of iocb fields */
17445 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17446 icmd->ulpBdeCount = 0;
17447 icmd->ulpLe = 1;
17448 icmd->ulpClass = CLASS3;
6d368e53 17449 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6dd9e31c 17450 ctiocb->context1 = lpfc_nlp_get(ndlp);
6669f9bb 17451
6669f9bb
JS
17452 ctiocb->iocb_cmpl = NULL;
17453 ctiocb->vport = phba->pport;
546fc854 17454 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
6d368e53 17455 ctiocb->sli4_lxritag = NO_XRI;
546fc854
JS
17456 ctiocb->sli4_xritag = NO_XRI;
17457
ee0f4fe1
JS
17458 if (fctl & FC_FC_EX_CTX)
17459 /* Exchange responder sent the abort so we
17460 * own the oxid.
17461 */
17462 xri = oxid;
17463 else
17464 xri = rxid;
17465 lxri = lpfc_sli4_xri_inrange(phba, xri);
17466 if (lxri != NO_XRI)
17467 lpfc_set_rrq_active(phba, ndlp, lxri,
17468 (xri == oxid) ? rxid : oxid, 0);
6dd9e31c
JS
17469 /* For BA_ABTS from exchange responder, if the logical xri with
17470 * the oxid maps to the FCP XRI range, the port no longer has
17471 * that exchange context, send a BLS_RJT. Override the IOCB for
17472 * a BA_RJT.
17473 */
17474 if ((fctl & FC_FC_EX_CTX) &&
895427bd 17475 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
6dd9e31c
JS
17476 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17477 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17478 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17479 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17480 }
17481
17482 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17483 * the driver no longer has that exchange, send a BLS_RJT. Override
17484 * the IOCB for a BA_RJT.
546fc854 17485 */
6dd9e31c 17486 if (aborted == false) {
546fc854
JS
17487 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17488 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17489 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17490 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17491 }
6669f9bb 17492
5ffc266e
JS
17493 if (fctl & FC_FC_EX_CTX) {
17494 /* ABTS sent by responder to CT exchange, construction
17495 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17496 * field and RX_ID from ABTS for RX_ID field.
17497 */
546fc854 17498 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
5ffc266e
JS
17499 } else {
17500 /* ABTS sent by initiator to CT exchange, construction
17501 * of BA_ACC will need to allocate a new XRI as for the
f09c3acc 17502 * XRI_TAG field.
5ffc266e 17503 */
546fc854 17504 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
5ffc266e 17505 }
f09c3acc 17506 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
546fc854 17507 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
5ffc266e 17508
546fc854 17509 /* Xmit CT abts response on exchange <xid> */
6dd9e31c
JS
17510 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17511 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17512 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
546fc854
JS
17513
17514 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17515 if (rc == IOCB_ERROR) {
6dd9e31c
JS
17516 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17517 "2925 Failed to issue CT ABTS RSP x%x on "
17518 "xri x%x, Data x%x\n",
17519 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17520 phba->link_state);
17521 lpfc_nlp_put(ndlp);
17522 ctiocb->context1 = NULL;
546fc854
JS
17523 lpfc_sli_release_iocbq(phba, ctiocb);
17524 }
6669f9bb
JS
17525}
17526
17527/**
17528 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17529 * @vport: Pointer to the vport on which this sequence was received
17530 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17531 *
17532 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17533 * receive sequence is only partially assembed by the driver, it shall abort
17534 * the partially assembled frames for the sequence. Otherwise, if the
17535 * unsolicited receive sequence has been completely assembled and passed to
17536 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17537 * unsolicited sequence has been aborted. After that, it will issue a basic
17538 * accept to accept the abort.
17539 **/
5d8b8167 17540static void
6669f9bb
JS
17541lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17542 struct hbq_dmabuf *dmabuf)
17543{
17544 struct lpfc_hba *phba = vport->phba;
17545 struct fc_frame_header fc_hdr;
5ffc266e 17546 uint32_t fctl;
6dd9e31c 17547 bool aborted;
6669f9bb 17548
6669f9bb
JS
17549 /* Make a copy of fc_hdr before the dmabuf being released */
17550 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 17551 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 17552
5ffc266e 17553 if (fctl & FC_FC_EX_CTX) {
6dd9e31c
JS
17554 /* ABTS by responder to exchange, no cleanup needed */
17555 aborted = true;
5ffc266e 17556 } else {
6dd9e31c
JS
17557 /* ABTS by initiator to exchange, need to do cleanup */
17558 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17559 if (aborted == false)
17560 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
5ffc266e 17561 }
6dd9e31c
JS
17562 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17563
86c67379
JS
17564 if (phba->nvmet_support) {
17565 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17566 return;
17567 }
17568
6dd9e31c
JS
17569 /* Respond with BA_ACC or BA_RJT accordingly */
17570 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
6669f9bb
JS
17571}
17572
4f774513
JS
17573/**
17574 * lpfc_seq_complete - Indicates if a sequence is complete
17575 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17576 *
17577 * This function checks the sequence, starting with the frame described by
17578 * @dmabuf, to see if all the frames associated with this sequence are present.
17579 * the frames associated with this sequence are linked to the @dmabuf using the
17580 * dbuf list. This function looks for two major things. 1) That the first frame
17581 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17582 * set. 3) That there are no holes in the sequence count. The function will
17583 * return 1 when the sequence is complete, otherwise it will return 0.
17584 **/
17585static int
17586lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17587{
17588 struct fc_frame_header *hdr;
17589 struct lpfc_dmabuf *d_buf;
17590 struct hbq_dmabuf *seq_dmabuf;
17591 uint32_t fctl;
17592 int seq_count = 0;
17593
17594 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17595 /* make sure first fame of sequence has a sequence count of zero */
17596 if (hdr->fh_seq_cnt != seq_count)
17597 return 0;
17598 fctl = (hdr->fh_f_ctl[0] << 16 |
17599 hdr->fh_f_ctl[1] << 8 |
17600 hdr->fh_f_ctl[2]);
17601 /* If last frame of sequence we can return success. */
17602 if (fctl & FC_FC_END_SEQ)
17603 return 1;
17604 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17605 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17606 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17607 /* If there is a hole in the sequence count then fail. */
eeead811 17608 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
17609 return 0;
17610 fctl = (hdr->fh_f_ctl[0] << 16 |
17611 hdr->fh_f_ctl[1] << 8 |
17612 hdr->fh_f_ctl[2]);
17613 /* If last frame of sequence we can return success. */
17614 if (fctl & FC_FC_END_SEQ)
17615 return 1;
17616 }
17617 return 0;
17618}
17619
17620/**
17621 * lpfc_prep_seq - Prep sequence for ULP processing
17622 * @vport: Pointer to the vport on which this sequence was received
17623 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17624 *
17625 * This function takes a sequence, described by a list of frames, and creates
17626 * a list of iocbq structures to describe the sequence. This iocbq list will be
17627 * used to issue to the generic unsolicited sequence handler. This routine
17628 * returns a pointer to the first iocbq in the list. If the function is unable
17629 * to allocate an iocbq then it throw out the received frames that were not
17630 * able to be described and return a pointer to the first iocbq. If unable to
17631 * allocate any iocbqs (including the first) this function will return NULL.
17632 **/
17633static struct lpfc_iocbq *
17634lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17635{
7851fe2c 17636 struct hbq_dmabuf *hbq_buf;
4f774513
JS
17637 struct lpfc_dmabuf *d_buf, *n_buf;
17638 struct lpfc_iocbq *first_iocbq, *iocbq;
17639 struct fc_frame_header *fc_hdr;
17640 uint32_t sid;
7851fe2c 17641 uint32_t len, tot_len;
eeead811 17642 struct ulp_bde64 *pbde;
4f774513
JS
17643
17644 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17645 /* remove from receive buffer list */
17646 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 17647 lpfc_update_rcv_time_stamp(vport);
4f774513 17648 /* get the Remote Port's SID */
6669f9bb 17649 sid = sli4_sid_from_fc_hdr(fc_hdr);
7851fe2c 17650 tot_len = 0;
4f774513
JS
17651 /* Get an iocbq struct to fill in. */
17652 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17653 if (first_iocbq) {
17654 /* Initialize the first IOCB. */
8fa38513 17655 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513 17656 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
895427bd 17657 first_iocbq->vport = vport;
939723a4
JS
17658
17659 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17660 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17661 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17662 first_iocbq->iocb.un.rcvels.parmRo =
17663 sli4_did_from_fc_hdr(fc_hdr);
17664 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17665 } else
17666 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7851fe2c
JS
17667 first_iocbq->iocb.ulpContext = NO_XRI;
17668 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17669 be16_to_cpu(fc_hdr->fh_ox_id);
17670 /* iocbq is prepped for internal consumption. Physical vpi. */
17671 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17672 vport->phba->vpi_ids[vport->vpi];
4f774513 17673 /* put the first buffer into the first IOCBq */
48a5a664
JS
17674 tot_len = bf_get(lpfc_rcqe_length,
17675 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17676
4f774513
JS
17677 first_iocbq->context2 = &seq_dmabuf->dbuf;
17678 first_iocbq->context3 = NULL;
17679 first_iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17680 if (tot_len > LPFC_DATA_BUF_SIZE)
17681 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17682 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17683 else
17684 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17685
4f774513 17686 first_iocbq->iocb.un.rcvels.remoteID = sid;
48a5a664 17687
7851fe2c 17688 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
4f774513
JS
17689 }
17690 iocbq = first_iocbq;
17691 /*
17692 * Each IOCBq can have two Buffers assigned, so go through the list
17693 * of buffers for this sequence and save two buffers in each IOCBq
17694 */
17695 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17696 if (!iocbq) {
17697 lpfc_in_buf_free(vport->phba, d_buf);
17698 continue;
17699 }
17700 if (!iocbq->context3) {
17701 iocbq->context3 = d_buf;
17702 iocbq->iocb.ulpBdeCount++;
7851fe2c
JS
17703 /* We need to get the size out of the right CQE */
17704 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17705 len = bf_get(lpfc_rcqe_length,
17706 &hbq_buf->cq_event.cqe.rcqe_cmpl);
48a5a664
JS
17707 pbde = (struct ulp_bde64 *)
17708 &iocbq->iocb.unsli3.sli3Words[4];
17709 if (len > LPFC_DATA_BUF_SIZE)
17710 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17711 else
17712 pbde->tus.f.bdeSize = len;
17713
7851fe2c
JS
17714 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17715 tot_len += len;
4f774513
JS
17716 } else {
17717 iocbq = lpfc_sli_get_iocbq(vport->phba);
17718 if (!iocbq) {
17719 if (first_iocbq) {
17720 first_iocbq->iocb.ulpStatus =
17721 IOSTAT_FCP_RSP_ERROR;
17722 first_iocbq->iocb.un.ulpWord[4] =
17723 IOERR_NO_RESOURCES;
17724 }
17725 lpfc_in_buf_free(vport->phba, d_buf);
17726 continue;
17727 }
48a5a664
JS
17728 /* We need to get the size out of the right CQE */
17729 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17730 len = bf_get(lpfc_rcqe_length,
17731 &hbq_buf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
17732 iocbq->context2 = d_buf;
17733 iocbq->context3 = NULL;
17734 iocbq->iocb.ulpBdeCount = 1;
48a5a664
JS
17735 if (len > LPFC_DATA_BUF_SIZE)
17736 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
4f774513 17737 LPFC_DATA_BUF_SIZE;
48a5a664
JS
17738 else
17739 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
7851fe2c 17740
7851fe2c
JS
17741 tot_len += len;
17742 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17743
4f774513
JS
17744 iocbq->iocb.un.rcvels.remoteID = sid;
17745 list_add_tail(&iocbq->list, &first_iocbq->list);
17746 }
17747 }
17748 return first_iocbq;
17749}
17750
6669f9bb
JS
17751static void
17752lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17753 struct hbq_dmabuf *seq_dmabuf)
17754{
17755 struct fc_frame_header *fc_hdr;
17756 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17757 struct lpfc_hba *phba = vport->phba;
17758
17759 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17760 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17761 if (!iocbq) {
17762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17763 "2707 Ring %d handler: Failed to allocate "
17764 "iocb Rctl x%x Type x%x received\n",
17765 LPFC_ELS_RING,
17766 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17767 return;
17768 }
17769 if (!lpfc_complete_unsol_iocb(phba,
895427bd 17770 phba->sli4_hba.els_wq->pring,
6669f9bb
JS
17771 iocbq, fc_hdr->fh_r_ctl,
17772 fc_hdr->fh_type))
6d368e53 17773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6669f9bb
JS
17774 "2540 Ring %d handler: unexpected Rctl "
17775 "x%x Type x%x received\n",
17776 LPFC_ELS_RING,
17777 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17778
17779 /* Free iocb created in lpfc_prep_seq */
17780 list_for_each_entry_safe(curr_iocb, next_iocb,
17781 &iocbq->list, list) {
17782 list_del_init(&curr_iocb->list);
17783 lpfc_sli_release_iocbq(phba, curr_iocb);
17784 }
17785 lpfc_sli_release_iocbq(phba, iocbq);
17786}
17787
ae9e28f3
JS
17788static void
17789lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17790 struct lpfc_iocbq *rspiocb)
17791{
17792 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17793
17794 if (pcmd && pcmd->virt)
771db5c0 17795 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3
JS
17796 kfree(pcmd);
17797 lpfc_sli_release_iocbq(phba, cmdiocb);
e817e5d7 17798 lpfc_drain_txq(phba);
ae9e28f3
JS
17799}
17800
17801static void
17802lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17803 struct hbq_dmabuf *dmabuf)
17804{
17805 struct fc_frame_header *fc_hdr;
17806 struct lpfc_hba *phba = vport->phba;
17807 struct lpfc_iocbq *iocbq = NULL;
17808 union lpfc_wqe *wqe;
17809 struct lpfc_dmabuf *pcmd = NULL;
17810 uint32_t frame_len;
17811 int rc;
e817e5d7 17812 unsigned long iflags;
ae9e28f3
JS
17813
17814 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17815 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17816
17817 /* Send the received frame back */
17818 iocbq = lpfc_sli_get_iocbq(phba);
e817e5d7
JS
17819 if (!iocbq) {
17820 /* Queue cq event and wakeup worker thread to process it */
17821 spin_lock_irqsave(&phba->hbalock, iflags);
17822 list_add_tail(&dmabuf->cq_event.list,
17823 &phba->sli4_hba.sp_queue_event);
17824 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17825 spin_unlock_irqrestore(&phba->hbalock, iflags);
17826 lpfc_worker_wake_up(phba);
17827 return;
17828 }
ae9e28f3
JS
17829
17830 /* Allocate buffer for command payload */
17831 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17832 if (pcmd)
771db5c0 17833 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
ae9e28f3
JS
17834 &pcmd->phys);
17835 if (!pcmd || !pcmd->virt)
17836 goto exit;
17837
17838 INIT_LIST_HEAD(&pcmd->list);
17839
17840 /* copyin the payload */
17841 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17842
17843 /* fill in BDE's for command */
17844 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17845 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17846 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17847 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17848
17849 iocbq->context2 = pcmd;
17850 iocbq->vport = vport;
17851 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17852 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17853
17854 /*
17855 * Setup rest of the iocb as though it were a WQE
17856 * Build the SEND_FRAME WQE
17857 */
17858 wqe = (union lpfc_wqe *)&iocbq->iocb;
17859
17860 wqe->send_frame.frame_len = frame_len;
17861 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17862 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17863 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17864 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17865 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17866 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17867
17868 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17869 iocbq->iocb.ulpLe = 1;
17870 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17871 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17872 if (rc == IOCB_ERROR)
17873 goto exit;
17874
17875 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17876 return;
17877
17878exit:
17879 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17880 "2023 Unable to process MDS loopback frame\n");
17881 if (pcmd && pcmd->virt)
771db5c0 17882 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
ae9e28f3 17883 kfree(pcmd);
401bb416
DK
17884 if (iocbq)
17885 lpfc_sli_release_iocbq(phba, iocbq);
ae9e28f3
JS
17886 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17887}
17888
4f774513
JS
17889/**
17890 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17891 * @phba: Pointer to HBA context object.
17892 *
17893 * This function is called with no lock held. This function processes all
17894 * the received buffers and gives it to upper layers when a received buffer
17895 * indicates that it is the final frame in the sequence. The interrupt
895427bd 17896 * service routine processes received buffers at interrupt contexts.
4f774513
JS
17897 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17898 * appropriate receive function when the final frame in a sequence is received.
17899 **/
4d9ab994
JS
17900void
17901lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17902 struct hbq_dmabuf *dmabuf)
4f774513 17903{
4d9ab994 17904 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
17905 struct fc_frame_header *fc_hdr;
17906 struct lpfc_vport *vport;
17907 uint32_t fcfi;
939723a4 17908 uint32_t did;
4f774513 17909
4f774513 17910 /* Process each received buffer */
4d9ab994 17911 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
2ea259ee 17912
e817e5d7
JS
17913 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17914 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17915 vport = phba->pport;
17916 /* Handle MDS Loopback frames */
17917 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17918 return;
17919 }
17920
4d9ab994
JS
17921 /* check to see if this a valid type of frame */
17922 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17923 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17924 return;
17925 }
2ea259ee 17926
7851fe2c
JS
17927 if ((bf_get(lpfc_cqe_code,
17928 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17929 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17930 &dmabuf->cq_event.cqe.rcqe_cmpl);
17931 else
17932 fcfi = bf_get(lpfc_rcqe_fcf_id,
17933 &dmabuf->cq_event.cqe.rcqe_cmpl);
939723a4 17934
895427bd
JS
17935 /* d_id this frame is directed to */
17936 did = sli4_did_from_fc_hdr(fc_hdr);
17937
17938 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
939723a4 17939 if (!vport) {
4d9ab994
JS
17940 /* throw out the frame */
17941 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17942 return;
17943 }
939723a4 17944
939723a4
JS
17945 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17946 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17947 (did != Fabric_DID)) {
17948 /*
17949 * Throw out the frame if we are not pt2pt.
17950 * The pt2pt protocol allows for discovery frames
17951 * to be received without a registered VPI.
17952 */
17953 if (!(vport->fc_flag & FC_PT2PT) ||
17954 (phba->link_state == LPFC_HBA_READY)) {
17955 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17956 return;
17957 }
17958 }
17959
6669f9bb
JS
17960 /* Handle the basic abort sequence (BA_ABTS) event */
17961 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17962 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17963 return;
17964 }
17965
4d9ab994
JS
17966 /* Link this frame */
17967 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17968 if (!seq_dmabuf) {
17969 /* unable to add frame to vport - throw it out */
17970 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17971 return;
17972 }
17973 /* If not last frame in sequence continue processing frames. */
def9c7a9 17974 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 17975 return;
def9c7a9 17976
6669f9bb
JS
17977 /* Send the complete sequence to the upper layer protocol */
17978 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 17979}
6fb120a7
JS
17980
17981/**
17982 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17983 * @phba: pointer to lpfc hba data structure.
17984 *
17985 * This routine is invoked to post rpi header templates to the
17986 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
17987 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17988 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
17989 *
17990 * This routine does not require any locks. It's usage is expected
17991 * to be driver load or reset recovery when the driver is
17992 * sequential.
17993 *
17994 * Return codes
af901ca1 17995 * 0 - successful
d439d286 17996 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
17997 * When this error occurs, the driver is not guaranteed
17998 * to have any rpi regions posted to the device and
17999 * must either attempt to repost the regions or take a
18000 * fatal error.
18001 **/
18002int
18003lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18004{
18005 struct lpfc_rpi_hdr *rpi_page;
18006 uint32_t rc = 0;
6d368e53
JS
18007 uint16_t lrpi = 0;
18008
18009 /* SLI4 ports that support extents do not require RPI headers. */
18010 if (!phba->sli4_hba.rpi_hdrs_in_use)
18011 goto exit;
18012 if (phba->sli4_hba.extents_in_use)
18013 return -EIO;
6fb120a7 18014
6fb120a7 18015 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6d368e53
JS
18016 /*
18017 * Assign the rpi headers a physical rpi only if the driver
18018 * has not initialized those resources. A port reset only
18019 * needs the headers posted.
18020 */
18021 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18022 LPFC_RPI_RSRC_RDY)
18023 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18024
6fb120a7
JS
18025 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18026 if (rc != MBX_SUCCESS) {
18027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18028 "2008 Error %d posting all rpi "
18029 "headers\n", rc);
18030 rc = -EIO;
18031 break;
18032 }
18033 }
18034
6d368e53
JS
18035 exit:
18036 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18037 LPFC_RPI_RSRC_RDY);
6fb120a7
JS
18038 return rc;
18039}
18040
18041/**
18042 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18043 * @phba: pointer to lpfc hba data structure.
18044 * @rpi_page: pointer to the rpi memory region.
18045 *
18046 * This routine is invoked to post a single rpi header to the
18047 * HBA consistent with the SLI-4 interface spec. This memory region
18048 * maps up to 64 rpi context regions.
18049 *
18050 * Return codes
af901ca1 18051 * 0 - successful
d439d286
JS
18052 * -ENOMEM - No available memory
18053 * -EIO - The mailbox failed to complete successfully.
6fb120a7
JS
18054 **/
18055int
18056lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18057{
18058 LPFC_MBOXQ_t *mboxq;
18059 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18060 uint32_t rc = 0;
6fb120a7
JS
18061 uint32_t shdr_status, shdr_add_status;
18062 union lpfc_sli4_cfg_shdr *shdr;
18063
6d368e53
JS
18064 /* SLI4 ports that support extents do not require RPI headers. */
18065 if (!phba->sli4_hba.rpi_hdrs_in_use)
18066 return rc;
18067 if (phba->sli4_hba.extents_in_use)
18068 return -EIO;
18069
6fb120a7
JS
18070 /* The port is notified of the header region via a mailbox command. */
18071 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18072 if (!mboxq) {
18073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18074 "2001 Unable to allocate memory for issuing "
18075 "SLI_CONFIG_SPECIAL mailbox command\n");
18076 return -ENOMEM;
18077 }
18078
18079 /* Post all rpi memory regions to the port. */
18080 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
6fb120a7
JS
18081 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18082 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18083 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
fedd3b7b
JS
18084 sizeof(struct lpfc_sli4_cfg_mhdr),
18085 LPFC_SLI4_MBX_EMBED);
6d368e53
JS
18086
18087
18088 /* Post the physical rpi to the port for this rpi header. */
6fb120a7
JS
18089 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18090 rpi_page->start_rpi);
6d368e53
JS
18091 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18092 hdr_tmpl, rpi_page->page_count);
18093
6fb120a7
JS
18094 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18095 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 18096 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
18097 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18098 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18099 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18100 if (rc != MBX_TIMEOUT)
18101 mempool_free(mboxq, phba->mbox_mem_pool);
18102 if (shdr_status || shdr_add_status || rc) {
18103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18104 "2514 POST_RPI_HDR mailbox failed with "
18105 "status x%x add_status x%x, mbx status x%x\n",
18106 shdr_status, shdr_add_status, rc);
18107 rc = -ENXIO;
845d9e8d
JS
18108 } else {
18109 /*
18110 * The next_rpi stores the next logical module-64 rpi value used
18111 * to post physical rpis in subsequent rpi postings.
18112 */
18113 spin_lock_irq(&phba->hbalock);
18114 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18115 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
18116 }
18117 return rc;
18118}
18119
18120/**
18121 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18122 * @phba: pointer to lpfc hba data structure.
18123 *
18124 * This routine is invoked to post rpi header templates to the
18125 * HBA consistent with the SLI-4 interface spec. This routine
49198b37
JS
18126 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18127 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
6fb120a7
JS
18128 *
18129 * Returns
af901ca1 18130 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
18131 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18132 **/
18133int
18134lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18135{
6d368e53
JS
18136 unsigned long rpi;
18137 uint16_t max_rpi, rpi_limit;
18138 uint16_t rpi_remaining, lrpi = 0;
6fb120a7 18139 struct lpfc_rpi_hdr *rpi_hdr;
4902b381 18140 unsigned long iflag;
6fb120a7 18141
6fb120a7 18142 /*
6d368e53
JS
18143 * Fetch the next logical rpi. Because this index is logical,
18144 * the driver starts at 0 each time.
6fb120a7 18145 */
4902b381 18146 spin_lock_irqsave(&phba->hbalock, iflag);
be6bb941
JS
18147 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18148 rpi_limit = phba->sli4_hba.next_rpi;
18149
6d368e53
JS
18150 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18151 if (rpi >= rpi_limit)
6fb120a7
JS
18152 rpi = LPFC_RPI_ALLOC_ERROR;
18153 else {
18154 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18155 phba->sli4_hba.max_cfg_param.rpi_used++;
18156 phba->sli4_hba.rpi_count++;
18157 }
be6bb941
JS
18158 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18159 "0001 rpi:%x max:%x lim:%x\n",
18160 (int) rpi, max_rpi, rpi_limit);
6fb120a7
JS
18161
18162 /*
18163 * Don't try to allocate more rpi header regions if the device limit
6d368e53 18164 * has been exhausted.
6fb120a7
JS
18165 */
18166 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18167 (phba->sli4_hba.rpi_count >= max_rpi)) {
4902b381 18168 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18169 return rpi;
18170 }
18171
6d368e53
JS
18172 /*
18173 * RPI header postings are not required for SLI4 ports capable of
18174 * extents.
18175 */
18176 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4902b381 18177 spin_unlock_irqrestore(&phba->hbalock, iflag);
6d368e53
JS
18178 return rpi;
18179 }
18180
6fb120a7
JS
18181 /*
18182 * If the driver is running low on rpi resources, allocate another
18183 * page now. Note that the next_rpi value is used because
18184 * it represents how many are actually in use whereas max_rpi notes
18185 * how many are supported max by the device.
18186 */
6d368e53 18187 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
4902b381 18188 spin_unlock_irqrestore(&phba->hbalock, iflag);
6fb120a7
JS
18189 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18190 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18191 if (!rpi_hdr) {
18192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18193 "2002 Error Could not grow rpi "
18194 "count\n");
18195 } else {
6d368e53
JS
18196 lrpi = rpi_hdr->start_rpi;
18197 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
6fb120a7
JS
18198 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18199 }
18200 }
18201
18202 return rpi;
18203}
18204
d7c47992
JS
18205/**
18206 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18207 * @phba: pointer to lpfc hba data structure.
18208 *
18209 * This routine is invoked to release an rpi to the pool of
18210 * available rpis maintained by the driver.
18211 **/
5d8b8167 18212static void
d7c47992
JS
18213__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18214{
18215 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18216 phba->sli4_hba.rpi_count--;
18217 phba->sli4_hba.max_cfg_param.rpi_used--;
18218 }
18219}
18220
6fb120a7
JS
18221/**
18222 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18223 * @phba: pointer to lpfc hba data structure.
18224 *
18225 * This routine is invoked to release an rpi to the pool of
18226 * available rpis maintained by the driver.
18227 **/
18228void
18229lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18230{
18231 spin_lock_irq(&phba->hbalock);
d7c47992 18232 __lpfc_sli4_free_rpi(phba, rpi);
6fb120a7
JS
18233 spin_unlock_irq(&phba->hbalock);
18234}
18235
18236/**
18237 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18238 * @phba: pointer to lpfc hba data structure.
18239 *
18240 * This routine is invoked to remove the memory region that
18241 * provided rpi via a bitmask.
18242 **/
18243void
18244lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18245{
18246 kfree(phba->sli4_hba.rpi_bmask);
6d368e53
JS
18247 kfree(phba->sli4_hba.rpi_ids);
18248 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6fb120a7
JS
18249}
18250
18251/**
18252 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18253 * @phba: pointer to lpfc hba data structure.
18254 *
18255 * This routine is invoked to remove the memory region that
18256 * provided rpi via a bitmask.
18257 **/
18258int
6b5151fd
JS
18259lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18260 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
6fb120a7
JS
18261{
18262 LPFC_MBOXQ_t *mboxq;
18263 struct lpfc_hba *phba = ndlp->phba;
18264 int rc;
18265
18266 /* The port is notified of the header region via a mailbox command. */
18267 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18268 if (!mboxq)
18269 return -ENOMEM;
18270
18271 /* Post all rpi memory regions to the port. */
18272 lpfc_resume_rpi(mboxq, ndlp);
6b5151fd
JS
18273 if (cmpl) {
18274 mboxq->mbox_cmpl = cmpl;
3e1f0718
JS
18275 mboxq->ctx_buf = arg;
18276 mboxq->ctx_ndlp = ndlp;
72859909
JS
18277 } else
18278 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6b5151fd 18279 mboxq->vport = ndlp->vport;
6fb120a7
JS
18280 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18281 if (rc == MBX_NOT_FINISHED) {
18282 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18283 "2010 Resume RPI Mailbox failed "
18284 "status %d, mbxStatus x%x\n", rc,
18285 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18286 mempool_free(mboxq, phba->mbox_mem_pool);
18287 return -EIO;
18288 }
18289 return 0;
18290}
18291
18292/**
18293 * lpfc_sli4_init_vpi - Initialize a vpi with the port
76a95d75 18294 * @vport: Pointer to the vport for which the vpi is being initialized
6fb120a7 18295 *
76a95d75 18296 * This routine is invoked to activate a vpi with the port.
6fb120a7
JS
18297 *
18298 * Returns:
18299 * 0 success
18300 * -Evalue otherwise
18301 **/
18302int
76a95d75 18303lpfc_sli4_init_vpi(struct lpfc_vport *vport)
6fb120a7
JS
18304{
18305 LPFC_MBOXQ_t *mboxq;
18306 int rc = 0;
6a9c52cf 18307 int retval = MBX_SUCCESS;
6fb120a7 18308 uint32_t mbox_tmo;
76a95d75 18309 struct lpfc_hba *phba = vport->phba;
6fb120a7
JS
18310 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18311 if (!mboxq)
18312 return -ENOMEM;
76a95d75 18313 lpfc_init_vpi(phba, mboxq, vport->vpi);
a183a15f 18314 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
6fb120a7 18315 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7 18316 if (rc != MBX_SUCCESS) {
76a95d75 18317 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
6fb120a7
JS
18318 "2022 INIT VPI Mailbox failed "
18319 "status %d, mbxStatus x%x\n", rc,
18320 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 18321 retval = -EIO;
6fb120a7 18322 }
6a9c52cf 18323 if (rc != MBX_TIMEOUT)
76a95d75 18324 mempool_free(mboxq, vport->phba->mbox_mem_pool);
6a9c52cf
JS
18325
18326 return retval;
6fb120a7
JS
18327}
18328
18329/**
18330 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18331 * @phba: pointer to lpfc hba data structure.
18332 * @mboxq: Pointer to mailbox object.
18333 *
18334 * This routine is invoked to manually add a single FCF record. The caller
18335 * must pass a completely initialized FCF_Record. This routine takes
18336 * care of the nonembedded mailbox operations.
18337 **/
18338static void
18339lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18340{
18341 void *virt_addr;
18342 union lpfc_sli4_cfg_shdr *shdr;
18343 uint32_t shdr_status, shdr_add_status;
18344
18345 virt_addr = mboxq->sge_array->addr[0];
18346 /* The IOCTL status is embedded in the mailbox subheader. */
18347 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18348 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18349 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18350
18351 if ((shdr_status || shdr_add_status) &&
18352 (shdr_status != STATUS_FCF_IN_USE))
18353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18354 "2558 ADD_FCF_RECORD mailbox failed with "
18355 "status x%x add_status x%x\n",
18356 shdr_status, shdr_add_status);
18357
18358 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18359}
18360
18361/**
18362 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18363 * @phba: pointer to lpfc hba data structure.
18364 * @fcf_record: pointer to the initialized fcf record to add.
18365 *
18366 * This routine is invoked to manually add a single FCF record. The caller
18367 * must pass a completely initialized FCF_Record. This routine takes
18368 * care of the nonembedded mailbox operations.
18369 **/
18370int
18371lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18372{
18373 int rc = 0;
18374 LPFC_MBOXQ_t *mboxq;
18375 uint8_t *bytep;
18376 void *virt_addr;
6fb120a7
JS
18377 struct lpfc_mbx_sge sge;
18378 uint32_t alloc_len, req_len;
18379 uint32_t fcfindex;
18380
18381 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18382 if (!mboxq) {
18383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18384 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18385 return -ENOMEM;
18386 }
18387
18388 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18389 sizeof(uint32_t);
18390
18391 /* Allocate DMA memory and set up the non-embedded mailbox command */
18392 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18393 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18394 req_len, LPFC_SLI4_MBX_NEMBED);
18395 if (alloc_len < req_len) {
18396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18397 "2523 Allocated DMA memory size (x%x) is "
18398 "less than the requested DMA memory "
18399 "size (x%x)\n", alloc_len, req_len);
18400 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18401 return -ENOMEM;
18402 }
18403
18404 /*
18405 * Get the first SGE entry from the non-embedded DMA memory. This
18406 * routine only uses a single SGE.
18407 */
18408 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
18409 virt_addr = mboxq->sge_array->addr[0];
18410 /*
18411 * Configure the FCF record for FCFI 0. This is the driver's
18412 * hardcoded default and gets used in nonFIP mode.
18413 */
18414 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18415 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18416 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18417
18418 /*
18419 * Copy the fcf_index and the FCF Record Data. The data starts after
18420 * the FCoE header plus word10. The data copy needs to be endian
18421 * correct.
18422 */
18423 bytep += sizeof(uint32_t);
18424 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18425 mboxq->vport = phba->pport;
18426 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18427 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18428 if (rc == MBX_NOT_FINISHED) {
18429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18430 "2515 ADD_FCF_RECORD mailbox failed with "
18431 "status 0x%x\n", rc);
18432 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18433 rc = -EIO;
18434 } else
18435 rc = 0;
18436
18437 return rc;
18438}
18439
18440/**
18441 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18442 * @phba: pointer to lpfc hba data structure.
18443 * @fcf_record: pointer to the fcf record to write the default data.
18444 * @fcf_index: FCF table entry index.
18445 *
18446 * This routine is invoked to build the driver's default FCF record. The
18447 * values used are hardcoded. This routine handles memory initialization.
18448 *
18449 **/
18450void
18451lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18452 struct fcf_record *fcf_record,
18453 uint16_t fcf_index)
18454{
18455 memset(fcf_record, 0, sizeof(struct fcf_record));
18456 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18457 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18458 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18459 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18460 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18461 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18462 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18463 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18464 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18465 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18466 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18467 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18468 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 18469 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
18470 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18471 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18472 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18473 /* Set the VLAN bit map */
18474 if (phba->valid_vlan) {
18475 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18476 = 1 << (phba->vlan_id % 8);
18477 }
18478}
18479
18480/**
0c9ab6f5 18481 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
6fb120a7
JS
18482 * @phba: pointer to lpfc hba data structure.
18483 * @fcf_index: FCF table entry offset.
18484 *
0c9ab6f5
JS
18485 * This routine is invoked to scan the entire FCF table by reading FCF
18486 * record and processing it one at a time starting from the @fcf_index
18487 * for initial FCF discovery or fast FCF failover rediscovery.
18488 *
25985edc 18489 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5 18490 * otherwise.
6fb120a7
JS
18491 **/
18492int
0c9ab6f5 18493lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
6fb120a7
JS
18494{
18495 int rc = 0, error;
18496 LPFC_MBOXQ_t *mboxq;
6fb120a7 18497
32b9793f 18498 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
80c17849 18499 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
6fb120a7
JS
18500 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18501 if (!mboxq) {
18502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18503 "2000 Failed to allocate mbox for "
18504 "READ_FCF cmd\n");
4d9ab994 18505 error = -ENOMEM;
0c9ab6f5 18506 goto fail_fcf_scan;
6fb120a7 18507 }
ecfd03c6 18508 /* Construct the read FCF record mailbox command */
0c9ab6f5 18509 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
ecfd03c6
JS
18510 if (rc) {
18511 error = -EINVAL;
0c9ab6f5 18512 goto fail_fcf_scan;
6fb120a7 18513 }
ecfd03c6 18514 /* Issue the mailbox command asynchronously */
6fb120a7 18515 mboxq->vport = phba->pport;
0c9ab6f5 18516 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
a93ff37a
JS
18517
18518 spin_lock_irq(&phba->hbalock);
18519 phba->hba_flag |= FCF_TS_INPROG;
18520 spin_unlock_irq(&phba->hbalock);
18521
6fb120a7 18522 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
ecfd03c6 18523 if (rc == MBX_NOT_FINISHED)
6fb120a7 18524 error = -EIO;
ecfd03c6 18525 else {
38b92ef8
JS
18526 /* Reset eligible FCF count for new scan */
18527 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
999d813f 18528 phba->fcf.eligible_fcf_cnt = 0;
6fb120a7 18529 error = 0;
32b9793f 18530 }
0c9ab6f5 18531fail_fcf_scan:
4d9ab994
JS
18532 if (error) {
18533 if (mboxq)
18534 lpfc_sli4_mbox_cmd_free(phba, mboxq);
a93ff37a 18535 /* FCF scan failed, clear FCF_TS_INPROG flag */
4d9ab994 18536 spin_lock_irq(&phba->hbalock);
a93ff37a 18537 phba->hba_flag &= ~FCF_TS_INPROG;
4d9ab994
JS
18538 spin_unlock_irq(&phba->hbalock);
18539 }
6fb120a7
JS
18540 return error;
18541}
a0c87cbd 18542
0c9ab6f5 18543/**
a93ff37a 18544 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
0c9ab6f5
JS
18545 * @phba: pointer to lpfc hba data structure.
18546 * @fcf_index: FCF table entry offset.
18547 *
18548 * This routine is invoked to read an FCF record indicated by @fcf_index
a93ff37a 18549 * and to use it for FLOGI roundrobin FCF failover.
0c9ab6f5 18550 *
25985edc 18551 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18552 * otherwise.
18553 **/
18554int
18555lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18556{
18557 int rc = 0, error;
18558 LPFC_MBOXQ_t *mboxq;
18559
18560 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18561 if (!mboxq) {
18562 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18563 "2763 Failed to allocate mbox for "
18564 "READ_FCF cmd\n");
18565 error = -ENOMEM;
18566 goto fail_fcf_read;
18567 }
18568 /* Construct the read FCF record mailbox command */
18569 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18570 if (rc) {
18571 error = -EINVAL;
18572 goto fail_fcf_read;
18573 }
18574 /* Issue the mailbox command asynchronously */
18575 mboxq->vport = phba->pport;
18576 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18577 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18578 if (rc == MBX_NOT_FINISHED)
18579 error = -EIO;
18580 else
18581 error = 0;
18582
18583fail_fcf_read:
18584 if (error && mboxq)
18585 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18586 return error;
18587}
18588
18589/**
18590 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18591 * @phba: pointer to lpfc hba data structure.
18592 * @fcf_index: FCF table entry offset.
18593 *
18594 * This routine is invoked to read an FCF record indicated by @fcf_index to
a93ff37a 18595 * determine whether it's eligible for FLOGI roundrobin failover list.
0c9ab6f5 18596 *
25985edc 18597 * Return 0 if the mailbox command is submitted successfully, none 0
0c9ab6f5
JS
18598 * otherwise.
18599 **/
18600int
18601lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18602{
18603 int rc = 0, error;
18604 LPFC_MBOXQ_t *mboxq;
18605
18606 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18607 if (!mboxq) {
18608 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18609 "2758 Failed to allocate mbox for "
18610 "READ_FCF cmd\n");
18611 error = -ENOMEM;
18612 goto fail_fcf_read;
18613 }
18614 /* Construct the read FCF record mailbox command */
18615 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18616 if (rc) {
18617 error = -EINVAL;
18618 goto fail_fcf_read;
18619 }
18620 /* Issue the mailbox command asynchronously */
18621 mboxq->vport = phba->pport;
18622 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18623 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18624 if (rc == MBX_NOT_FINISHED)
18625 error = -EIO;
18626 else
18627 error = 0;
18628
18629fail_fcf_read:
18630 if (error && mboxq)
18631 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18632 return error;
18633}
18634
7d791df7 18635/**
f5cb5304 18636 * lpfc_check_next_fcf_pri_level
7d791df7
JS
18637 * phba pointer to the lpfc_hba struct for this port.
18638 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18639 * routine when the rr_bmask is empty. The FCF indecies are put into the
18640 * rr_bmask based on their priority level. Starting from the highest priority
18641 * to the lowest. The most likely FCF candidate will be in the highest
18642 * priority group. When this routine is called it searches the fcf_pri list for
18643 * next lowest priority group and repopulates the rr_bmask with only those
18644 * fcf_indexes.
18645 * returns:
18646 * 1=success 0=failure
18647 **/
5d8b8167 18648static int
7d791df7
JS
18649lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18650{
18651 uint16_t next_fcf_pri;
18652 uint16_t last_index;
18653 struct lpfc_fcf_pri *fcf_pri;
18654 int rc;
18655 int ret = 0;
18656
18657 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18658 LPFC_SLI4_FCF_TBL_INDX_MAX);
18659 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18660 "3060 Last IDX %d\n", last_index);
2562669c
JS
18661
18662 /* Verify the priority list has 2 or more entries */
18663 spin_lock_irq(&phba->hbalock);
18664 if (list_empty(&phba->fcf.fcf_pri_list) ||
18665 list_is_singular(&phba->fcf.fcf_pri_list)) {
18666 spin_unlock_irq(&phba->hbalock);
7d791df7
JS
18667 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18668 "3061 Last IDX %d\n", last_index);
18669 return 0; /* Empty rr list */
18670 }
2562669c
JS
18671 spin_unlock_irq(&phba->hbalock);
18672
7d791df7
JS
18673 next_fcf_pri = 0;
18674 /*
18675 * Clear the rr_bmask and set all of the bits that are at this
18676 * priority.
18677 */
18678 memset(phba->fcf.fcf_rr_bmask, 0,
18679 sizeof(*phba->fcf.fcf_rr_bmask));
18680 spin_lock_irq(&phba->hbalock);
18681 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18682 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18683 continue;
18684 /*
18685 * the 1st priority that has not FLOGI failed
18686 * will be the highest.
18687 */
18688 if (!next_fcf_pri)
18689 next_fcf_pri = fcf_pri->fcf_rec.priority;
18690 spin_unlock_irq(&phba->hbalock);
18691 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18692 rc = lpfc_sli4_fcf_rr_index_set(phba,
18693 fcf_pri->fcf_rec.fcf_index);
18694 if (rc)
18695 return 0;
18696 }
18697 spin_lock_irq(&phba->hbalock);
18698 }
18699 /*
18700 * if next_fcf_pri was not set above and the list is not empty then
18701 * we have failed flogis on all of them. So reset flogi failed
4907cb7b 18702 * and start at the beginning.
7d791df7
JS
18703 */
18704 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18705 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18706 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18707 /*
18708 * the 1st priority that has not FLOGI failed
18709 * will be the highest.
18710 */
18711 if (!next_fcf_pri)
18712 next_fcf_pri = fcf_pri->fcf_rec.priority;
18713 spin_unlock_irq(&phba->hbalock);
18714 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18715 rc = lpfc_sli4_fcf_rr_index_set(phba,
18716 fcf_pri->fcf_rec.fcf_index);
18717 if (rc)
18718 return 0;
18719 }
18720 spin_lock_irq(&phba->hbalock);
18721 }
18722 } else
18723 ret = 1;
18724 spin_unlock_irq(&phba->hbalock);
18725
18726 return ret;
18727}
0c9ab6f5
JS
18728/**
18729 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18730 * @phba: pointer to lpfc hba data structure.
18731 *
18732 * This routine is to get the next eligible FCF record index in a round
18733 * robin fashion. If the next eligible FCF record index equals to the
a93ff37a 18734 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
0c9ab6f5
JS
18735 * shall be returned, otherwise, the next eligible FCF record's index
18736 * shall be returned.
18737 **/
18738uint16_t
18739lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18740{
18741 uint16_t next_fcf_index;
18742
421c6622 18743initial_priority:
3804dc84 18744 /* Search start from next bit of currently registered FCF index */
421c6622
JS
18745 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18746
7d791df7 18747next_priority:
421c6622
JS
18748 /* Determine the next fcf index to check */
18749 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
0c9ab6f5
JS
18750 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18751 LPFC_SLI4_FCF_TBL_INDX_MAX,
3804dc84
JS
18752 next_fcf_index);
18753
0c9ab6f5 18754 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
7d791df7
JS
18755 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18756 /*
18757 * If we have wrapped then we need to clear the bits that
18758 * have been tested so that we can detect when we should
18759 * change the priority level.
18760 */
0c9ab6f5
JS
18761 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18762 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
7d791df7
JS
18763 }
18764
3804dc84
JS
18765
18766 /* Check roundrobin failover list empty condition */
7d791df7
JS
18767 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18768 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18769 /*
18770 * If next fcf index is not found check if there are lower
18771 * Priority level fcf's in the fcf_priority list.
18772 * Set up the rr_bmask with all of the avaiable fcf bits
18773 * at that level and continue the selection process.
18774 */
18775 if (lpfc_check_next_fcf_pri_level(phba))
421c6622 18776 goto initial_priority;
3804dc84
JS
18777 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18778 "2844 No roundrobin failover FCF available\n");
036cad1f
JS
18779
18780 return LPFC_FCOE_FCF_NEXT_NONE;
3804dc84
JS
18781 }
18782
7d791df7
JS
18783 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18784 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
f5cb5304
JS
18785 LPFC_FCF_FLOGI_FAILED) {
18786 if (list_is_singular(&phba->fcf.fcf_pri_list))
18787 return LPFC_FCOE_FCF_NEXT_NONE;
18788
7d791df7 18789 goto next_priority;
f5cb5304 18790 }
7d791df7 18791
3804dc84 18792 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
18793 "2845 Get next roundrobin failover FCF (x%x)\n",
18794 next_fcf_index);
18795
0c9ab6f5
JS
18796 return next_fcf_index;
18797}
18798
18799/**
18800 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18801 * @phba: pointer to lpfc hba data structure.
18802 *
18803 * This routine sets the FCF record index in to the eligible bmask for
a93ff37a 18804 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18805 * does not go beyond the range of the driver allocated bmask dimension
18806 * before setting the bit.
18807 *
18808 * Returns 0 if the index bit successfully set, otherwise, it returns
18809 * -EINVAL.
18810 **/
18811int
18812lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18813{
18814 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18815 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18816 "2610 FCF (x%x) reached driver's book "
18817 "keeping dimension:x%x\n",
0c9ab6f5
JS
18818 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18819 return -EINVAL;
18820 }
18821 /* Set the eligible FCF record index bmask */
18822 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18823
3804dc84 18824 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18825 "2790 Set FCF (x%x) to roundrobin FCF failover "
3804dc84
JS
18826 "bmask\n", fcf_index);
18827
0c9ab6f5
JS
18828 return 0;
18829}
18830
18831/**
3804dc84 18832 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
0c9ab6f5
JS
18833 * @phba: pointer to lpfc hba data structure.
18834 *
18835 * This routine clears the FCF record index from the eligible bmask for
a93ff37a 18836 * roundrobin failover search. It checks to make sure that the index
0c9ab6f5
JS
18837 * does not go beyond the range of the driver allocated bmask dimension
18838 * before clearing the bit.
18839 **/
18840void
18841lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18842{
9a803a74 18843 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
0c9ab6f5
JS
18844 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18845 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
a93ff37a
JS
18846 "2762 FCF (x%x) reached driver's book "
18847 "keeping dimension:x%x\n",
0c9ab6f5
JS
18848 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18849 return;
18850 }
18851 /* Clear the eligible FCF record index bmask */
7d791df7 18852 spin_lock_irq(&phba->hbalock);
9a803a74
JS
18853 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18854 list) {
7d791df7
JS
18855 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18856 list_del_init(&fcf_pri->list);
18857 break;
18858 }
18859 }
18860 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 18861 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
3804dc84
JS
18862
18863 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18864 "2791 Clear FCF (x%x) from roundrobin failover "
3804dc84 18865 "bmask\n", fcf_index);
0c9ab6f5
JS
18866}
18867
ecfd03c6
JS
18868/**
18869 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18870 * @phba: pointer to lpfc hba data structure.
18871 *
18872 * This routine is the completion routine for the rediscover FCF table mailbox
18873 * command. If the mailbox command returned failure, it will try to stop the
18874 * FCF rediscover wait timer.
18875 **/
5d8b8167 18876static void
ecfd03c6
JS
18877lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18878{
18879 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18880 uint32_t shdr_status, shdr_add_status;
18881
18882 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18883
18884 shdr_status = bf_get(lpfc_mbox_hdr_status,
18885 &redisc_fcf->header.cfg_shdr.response);
18886 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18887 &redisc_fcf->header.cfg_shdr.response);
18888 if (shdr_status || shdr_add_status) {
0c9ab6f5 18889 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
ecfd03c6
JS
18890 "2746 Requesting for FCF rediscovery failed "
18891 "status x%x add_status x%x\n",
18892 shdr_status, shdr_add_status);
0c9ab6f5 18893 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
fc2b989b 18894 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18895 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b
JS
18896 spin_unlock_irq(&phba->hbalock);
18897 /*
18898 * CVL event triggered FCF rediscover request failed,
18899 * last resort to re-try current registered FCF entry.
18900 */
18901 lpfc_retry_pport_discovery(phba);
18902 } else {
18903 spin_lock_irq(&phba->hbalock);
0c9ab6f5 18904 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
fc2b989b
JS
18905 spin_unlock_irq(&phba->hbalock);
18906 /*
18907 * DEAD FCF event triggered FCF rediscover request
18908 * failed, last resort to fail over as a link down
18909 * to FCF registration.
18910 */
18911 lpfc_sli4_fcf_dead_failthrough(phba);
18912 }
0c9ab6f5
JS
18913 } else {
18914 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 18915 "2775 Start FCF rediscover quiescent timer\n");
ecfd03c6
JS
18916 /*
18917 * Start FCF rediscovery wait timer for pending FCF
18918 * before rescan FCF record table.
18919 */
18920 lpfc_fcf_redisc_wait_start_timer(phba);
0c9ab6f5 18921 }
ecfd03c6
JS
18922
18923 mempool_free(mbox, phba->mbox_mem_pool);
18924}
18925
18926/**
3804dc84 18927 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
ecfd03c6
JS
18928 * @phba: pointer to lpfc hba data structure.
18929 *
18930 * This routine is invoked to request for rediscovery of the entire FCF table
18931 * by the port.
18932 **/
18933int
18934lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18935{
18936 LPFC_MBOXQ_t *mbox;
18937 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18938 int rc, length;
18939
0c9ab6f5
JS
18940 /* Cancel retry delay timers to all vports before FCF rediscover */
18941 lpfc_cancel_all_vport_retry_delay_timer(phba);
18942
ecfd03c6
JS
18943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18944 if (!mbox) {
18945 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18946 "2745 Failed to allocate mbox for "
18947 "requesting FCF rediscover.\n");
18948 return -ENOMEM;
18949 }
18950
18951 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18952 sizeof(struct lpfc_sli4_cfg_mhdr));
18953 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18954 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18955 length, LPFC_SLI4_MBX_EMBED);
18956
18957 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18958 /* Set count to 0 for invalidating the entire FCF database */
18959 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18960
18961 /* Issue the mailbox command asynchronously */
18962 mbox->vport = phba->pport;
18963 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18964 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18965
18966 if (rc == MBX_NOT_FINISHED) {
18967 mempool_free(mbox, phba->mbox_mem_pool);
18968 return -EIO;
18969 }
18970 return 0;
18971}
18972
fc2b989b
JS
18973/**
18974 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18975 * @phba: pointer to lpfc hba data structure.
18976 *
18977 * This function is the failover routine as a last resort to the FCF DEAD
18978 * event when driver failed to perform fast FCF failover.
18979 **/
18980void
18981lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18982{
18983 uint32_t link_state;
18984
18985 /*
18986 * Last resort as FCF DEAD event failover will treat this as
18987 * a link down, but save the link state because we don't want
18988 * it to be changed to Link Down unless it is already down.
18989 */
18990 link_state = phba->link_state;
18991 lpfc_linkdown(phba);
18992 phba->link_state = link_state;
18993
18994 /* Unregister FCF if no devices connected to it */
18995 lpfc_unregister_unused_fcf(phba);
18996}
18997
a0c87cbd 18998/**
026abb87 18999 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
a0c87cbd 19000 * @phba: pointer to lpfc hba data structure.
026abb87 19001 * @rgn23_data: pointer to configure region 23 data.
a0c87cbd 19002 *
026abb87
JS
19003 * This function gets SLI3 port configure region 23 data through memory dump
19004 * mailbox command. When it successfully retrieves data, the size of the data
19005 * will be returned, otherwise, 0 will be returned.
a0c87cbd 19006 **/
026abb87
JS
19007static uint32_t
19008lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
a0c87cbd
JS
19009{
19010 LPFC_MBOXQ_t *pmb = NULL;
19011 MAILBOX_t *mb;
026abb87 19012 uint32_t offset = 0;
a0c87cbd
JS
19013 int rc;
19014
026abb87
JS
19015 if (!rgn23_data)
19016 return 0;
19017
a0c87cbd
JS
19018 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19019 if (!pmb) {
19020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
026abb87
JS
19021 "2600 failed to allocate mailbox memory\n");
19022 return 0;
a0c87cbd
JS
19023 }
19024 mb = &pmb->u.mb;
19025
a0c87cbd
JS
19026 do {
19027 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19028 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19029
19030 if (rc != MBX_SUCCESS) {
19031 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
026abb87
JS
19032 "2601 failed to read config "
19033 "region 23, rc 0x%x Status 0x%x\n",
19034 rc, mb->mbxStatus);
a0c87cbd
JS
19035 mb->un.varDmp.word_cnt = 0;
19036 }
19037 /*
19038 * dump mem may return a zero when finished or we got a
19039 * mailbox error, either way we are done.
19040 */
19041 if (mb->un.varDmp.word_cnt == 0)
19042 break;
19043 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19044 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19045
19046 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
026abb87
JS
19047 rgn23_data + offset,
19048 mb->un.varDmp.word_cnt);
a0c87cbd
JS
19049 offset += mb->un.varDmp.word_cnt;
19050 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19051
026abb87
JS
19052 mempool_free(pmb, phba->mbox_mem_pool);
19053 return offset;
19054}
19055
19056/**
19057 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19058 * @phba: pointer to lpfc hba data structure.
19059 * @rgn23_data: pointer to configure region 23 data.
19060 *
19061 * This function gets SLI4 port configure region 23 data through memory dump
19062 * mailbox command. When it successfully retrieves data, the size of the data
19063 * will be returned, otherwise, 0 will be returned.
19064 **/
19065static uint32_t
19066lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19067{
19068 LPFC_MBOXQ_t *mboxq = NULL;
19069 struct lpfc_dmabuf *mp = NULL;
19070 struct lpfc_mqe *mqe;
19071 uint32_t data_length = 0;
19072 int rc;
19073
19074 if (!rgn23_data)
19075 return 0;
19076
19077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19078 if (!mboxq) {
19079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19080 "3105 failed to allocate mailbox memory\n");
19081 return 0;
19082 }
19083
19084 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19085 goto out;
19086 mqe = &mboxq->u.mqe;
3e1f0718 19087 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
026abb87
JS
19088 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19089 if (rc)
19090 goto out;
19091 data_length = mqe->un.mb_words[5];
19092 if (data_length == 0)
19093 goto out;
19094 if (data_length > DMP_RGN23_SIZE) {
19095 data_length = 0;
19096 goto out;
19097 }
19098 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19099out:
19100 mempool_free(mboxq, phba->mbox_mem_pool);
19101 if (mp) {
19102 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19103 kfree(mp);
19104 }
19105 return data_length;
19106}
19107
19108/**
19109 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19110 * @phba: pointer to lpfc hba data structure.
19111 *
19112 * This function read region 23 and parse TLV for port status to
19113 * decide if the user disaled the port. If the TLV indicates the
19114 * port is disabled, the hba_flag is set accordingly.
19115 **/
19116void
19117lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19118{
19119 uint8_t *rgn23_data = NULL;
19120 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19121 uint32_t offset = 0;
19122
19123 /* Get adapter Region 23 data */
19124 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19125 if (!rgn23_data)
19126 goto out;
19127
19128 if (phba->sli_rev < LPFC_SLI_REV4)
19129 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19130 else {
19131 if_type = bf_get(lpfc_sli_intf_if_type,
19132 &phba->sli4_hba.sli_intf);
19133 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19134 goto out;
19135 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19136 }
a0c87cbd
JS
19137
19138 if (!data_size)
19139 goto out;
19140
19141 /* Check the region signature first */
19142 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19144 "2619 Config region 23 has bad signature\n");
19145 goto out;
19146 }
19147 offset += 4;
19148
19149 /* Check the data structure version */
19150 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19152 "2620 Config region 23 has bad version\n");
19153 goto out;
19154 }
19155 offset += 4;
19156
19157 /* Parse TLV entries in the region */
19158 while (offset < data_size) {
19159 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19160 break;
19161 /*
19162 * If the TLV is not driver specific TLV or driver id is
19163 * not linux driver id, skip the record.
19164 */
19165 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19166 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19167 (rgn23_data[offset + 3] != 0)) {
19168 offset += rgn23_data[offset + 1] * 4 + 4;
19169 continue;
19170 }
19171
19172 /* Driver found a driver specific TLV in the config region */
19173 sub_tlv_len = rgn23_data[offset + 1] * 4;
19174 offset += 4;
19175 tlv_offset = 0;
19176
19177 /*
19178 * Search for configured port state sub-TLV.
19179 */
19180 while ((offset < data_size) &&
19181 (tlv_offset < sub_tlv_len)) {
19182 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19183 offset += 4;
19184 tlv_offset += 4;
19185 break;
19186 }
19187 if (rgn23_data[offset] != PORT_STE_TYPE) {
19188 offset += rgn23_data[offset + 1] * 4 + 4;
19189 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19190 continue;
19191 }
19192
19193 /* This HBA contains PORT_STE configured */
19194 if (!rgn23_data[offset + 2])
19195 phba->hba_flag |= LINK_DISABLED;
19196
19197 goto out;
19198 }
19199 }
026abb87 19200
a0c87cbd 19201out:
a0c87cbd
JS
19202 kfree(rgn23_data);
19203 return;
19204}
695a814e 19205
52d52440
JS
19206/**
19207 * lpfc_wr_object - write an object to the firmware
19208 * @phba: HBA structure that indicates port to create a queue on.
19209 * @dmabuf_list: list of dmabufs to write to the port.
19210 * @size: the total byte value of the objects to write to the port.
19211 * @offset: the current offset to be used to start the transfer.
19212 *
19213 * This routine will create a wr_object mailbox command to send to the port.
19214 * the mailbox command will be constructed using the dma buffers described in
19215 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19216 * BDEs that the imbedded mailbox can support. The @offset variable will be
19217 * used to indicate the starting offset of the transfer and will also return
19218 * the offset after the write object mailbox has completed. @size is used to
19219 * determine the end of the object and whether the eof bit should be set.
19220 *
19221 * Return 0 is successful and offset will contain the the new offset to use
19222 * for the next write.
19223 * Return negative value for error cases.
19224 **/
19225int
19226lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19227 uint32_t size, uint32_t *offset)
19228{
19229 struct lpfc_mbx_wr_object *wr_object;
19230 LPFC_MBOXQ_t *mbox;
19231 int rc = 0, i = 0;
5021267a 19232 uint32_t shdr_status, shdr_add_status, shdr_change_status;
52d52440 19233 uint32_t mbox_tmo;
52d52440
JS
19234 struct lpfc_dmabuf *dmabuf;
19235 uint32_t written = 0;
5021267a 19236 bool check_change_status = false;
52d52440
JS
19237
19238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19239 if (!mbox)
19240 return -ENOMEM;
19241
19242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19243 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19244 sizeof(struct lpfc_mbx_wr_object) -
19245 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19246
19247 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19248 wr_object->u.request.write_offset = *offset;
19249 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19250 wr_object->u.request.object_name[0] =
19251 cpu_to_le32(wr_object->u.request.object_name[0]);
19252 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19253 list_for_each_entry(dmabuf, dmabuf_list, list) {
19254 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19255 break;
19256 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19257 wr_object->u.request.bde[i].addrHigh =
19258 putPaddrHigh(dmabuf->phys);
19259 if (written + SLI4_PAGE_SIZE >= size) {
19260 wr_object->u.request.bde[i].tus.f.bdeSize =
19261 (size - written);
19262 written += (size - written);
19263 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
5021267a
JS
19264 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19265 check_change_status = true;
52d52440
JS
19266 } else {
19267 wr_object->u.request.bde[i].tus.f.bdeSize =
19268 SLI4_PAGE_SIZE;
19269 written += SLI4_PAGE_SIZE;
19270 }
19271 i++;
19272 }
19273 wr_object->u.request.bde_count = i;
19274 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19275 if (!phba->sli4_hba.intr_enable)
19276 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19277 else {
a183a15f 19278 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
52d52440
JS
19279 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19280 }
19281 /* The IOCTL status is embedded in the mailbox subheader. */
5021267a
JS
19282 shdr_status = bf_get(lpfc_mbox_hdr_status,
19283 &wr_object->header.cfg_shdr.response);
19284 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19285 &wr_object->header.cfg_shdr.response);
19286 if (check_change_status) {
19287 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19288 &wr_object->u.response);
19289 switch (shdr_change_status) {
19290 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19292 "3198 Firmware write complete: System "
19293 "reboot required to instantiate\n");
19294 break;
19295 case (LPFC_CHANGE_STATUS_FW_RESET):
19296 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19297 "3199 Firmware write complete: Firmware"
19298 " reset required to instantiate\n");
19299 break;
19300 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19301 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19302 "3200 Firmware write complete: Port "
19303 "Migration or PCI Reset required to "
19304 "instantiate\n");
19305 break;
19306 case (LPFC_CHANGE_STATUS_PCI_RESET):
19307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19308 "3201 Firmware write complete: PCI "
19309 "Reset required to instantiate\n");
19310 break;
19311 default:
19312 break;
19313 }
19314 }
52d52440
JS
19315 if (rc != MBX_TIMEOUT)
19316 mempool_free(mbox, phba->mbox_mem_pool);
19317 if (shdr_status || shdr_add_status || rc) {
19318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19319 "3025 Write Object mailbox failed with "
19320 "status x%x add_status x%x, mbx status x%x\n",
19321 shdr_status, shdr_add_status, rc);
19322 rc = -ENXIO;
1feb8204 19323 *offset = shdr_add_status;
52d52440
JS
19324 } else
19325 *offset += wr_object->u.response.actual_write_length;
19326 return rc;
19327}
19328
695a814e
JS
19329/**
19330 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19331 * @vport: pointer to vport data structure.
19332 *
19333 * This function iterate through the mailboxq and clean up all REG_LOGIN
19334 * and REG_VPI mailbox commands associated with the vport. This function
19335 * is called when driver want to restart discovery of the vport due to
19336 * a Clear Virtual Link event.
19337 **/
19338void
19339lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19340{
19341 struct lpfc_hba *phba = vport->phba;
19342 LPFC_MBOXQ_t *mb, *nextmb;
19343 struct lpfc_dmabuf *mp;
78730cfe 19344 struct lpfc_nodelist *ndlp;
d439d286 19345 struct lpfc_nodelist *act_mbx_ndlp = NULL;
589a52d6 19346 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
d439d286 19347 LIST_HEAD(mbox_cmd_list);
63e801ce 19348 uint8_t restart_loop;
695a814e 19349
d439d286 19350 /* Clean up internally queued mailbox commands with the vport */
695a814e
JS
19351 spin_lock_irq(&phba->hbalock);
19352 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19353 if (mb->vport != vport)
19354 continue;
19355
19356 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19357 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19358 continue;
19359
d439d286
JS
19360 list_del(&mb->list);
19361 list_add_tail(&mb->list, &mbox_cmd_list);
19362 }
19363 /* Clean up active mailbox command with the vport */
19364 mb = phba->sli.mbox_active;
19365 if (mb && (mb->vport == vport)) {
19366 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19367 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19368 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19369 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19370 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
d439d286
JS
19371 /* Put reference count for delayed processing */
19372 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19373 /* Unregister the RPI when mailbox complete */
19374 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19375 }
19376 }
63e801ce
JS
19377 /* Cleanup any mailbox completions which are not yet processed */
19378 do {
19379 restart_loop = 0;
19380 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19381 /*
19382 * If this mailox is already processed or it is
19383 * for another vport ignore it.
19384 */
19385 if ((mb->vport != vport) ||
19386 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19387 continue;
19388
19389 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19390 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19391 continue;
19392
19393 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19394 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19395 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
63e801ce
JS
19396 /* Unregister the RPI when mailbox complete */
19397 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19398 restart_loop = 1;
19399 spin_unlock_irq(&phba->hbalock);
19400 spin_lock(shost->host_lock);
19401 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19402 spin_unlock(shost->host_lock);
19403 spin_lock_irq(&phba->hbalock);
19404 break;
19405 }
19406 }
19407 } while (restart_loop);
19408
d439d286
JS
19409 spin_unlock_irq(&phba->hbalock);
19410
19411 /* Release the cleaned-up mailbox commands */
19412 while (!list_empty(&mbox_cmd_list)) {
19413 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
695a814e 19414 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
3e1f0718 19415 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
695a814e
JS
19416 if (mp) {
19417 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19418 kfree(mp);
19419 }
3e1f0718
JS
19420 mb->ctx_buf = NULL;
19421 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19422 mb->ctx_ndlp = NULL;
78730cfe 19423 if (ndlp) {
ec21b3b0 19424 spin_lock(shost->host_lock);
589a52d6 19425 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
ec21b3b0 19426 spin_unlock(shost->host_lock);
78730cfe 19427 lpfc_nlp_put(ndlp);
78730cfe 19428 }
695a814e 19429 }
695a814e
JS
19430 mempool_free(mb, phba->mbox_mem_pool);
19431 }
d439d286
JS
19432
19433 /* Release the ndlp with the cleaned-up active mailbox command */
19434 if (act_mbx_ndlp) {
19435 spin_lock(shost->host_lock);
19436 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19437 spin_unlock(shost->host_lock);
19438 lpfc_nlp_put(act_mbx_ndlp);
695a814e 19439 }
695a814e
JS
19440}
19441
2a9bf3d0
JS
19442/**
19443 * lpfc_drain_txq - Drain the txq
19444 * @phba: Pointer to HBA context object.
19445 *
19446 * This function attempt to submit IOCBs on the txq
19447 * to the adapter. For SLI4 adapters, the txq contains
19448 * ELS IOCBs that have been deferred because the there
19449 * are no SGLs. This congestion can occur with large
19450 * vport counts during node discovery.
19451 **/
19452
19453uint32_t
19454lpfc_drain_txq(struct lpfc_hba *phba)
19455{
19456 LIST_HEAD(completions);
895427bd 19457 struct lpfc_sli_ring *pring;
2e706377 19458 struct lpfc_iocbq *piocbq = NULL;
2a9bf3d0
JS
19459 unsigned long iflags = 0;
19460 char *fail_msg = NULL;
19461 struct lpfc_sglq *sglq;
205e8240 19462 union lpfc_wqe128 wqe;
a2fc4aef 19463 uint32_t txq_cnt = 0;
dc19e3b4 19464 struct lpfc_queue *wq;
2a9bf3d0 19465
dc19e3b4
JS
19466 if (phba->link_flag & LS_MDS_LOOPBACK) {
19467 /* MDS WQE are posted only to first WQ*/
cdb42bec 19468 wq = phba->sli4_hba.hdwq[0].fcp_wq;
dc19e3b4
JS
19469 if (unlikely(!wq))
19470 return 0;
19471 pring = wq->pring;
19472 } else {
19473 wq = phba->sli4_hba.els_wq;
19474 if (unlikely(!wq))
19475 return 0;
19476 pring = lpfc_phba_elsring(phba);
19477 }
19478
19479 if (unlikely(!pring) || list_empty(&pring->txq))
1234a6d5 19480 return 0;
895427bd 19481
398d81c9 19482 spin_lock_irqsave(&pring->ring_lock, iflags);
0e9bb8d7
JS
19483 list_for_each_entry(piocbq, &pring->txq, list) {
19484 txq_cnt++;
19485 }
19486
19487 if (txq_cnt > pring->txq_max)
19488 pring->txq_max = txq_cnt;
2a9bf3d0 19489
398d81c9 19490 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19491
0e9bb8d7 19492 while (!list_empty(&pring->txq)) {
398d81c9 19493 spin_lock_irqsave(&pring->ring_lock, iflags);
2a9bf3d0 19494
19ca7609 19495 piocbq = lpfc_sli_ringtx_get(phba, pring);
a629852a 19496 if (!piocbq) {
398d81c9 19497 spin_unlock_irqrestore(&pring->ring_lock, iflags);
a629852a
JS
19498 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19499 "2823 txq empty and txq_cnt is %d\n ",
0e9bb8d7 19500 txq_cnt);
a629852a
JS
19501 break;
19502 }
895427bd 19503 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
2a9bf3d0 19504 if (!sglq) {
19ca7609 19505 __lpfc_sli_ringtx_put(phba, pring, piocbq);
398d81c9 19506 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0 19507 break;
2a9bf3d0 19508 }
0e9bb8d7 19509 txq_cnt--;
2a9bf3d0
JS
19510
19511 /* The xri and iocb resources secured,
19512 * attempt to issue request
19513 */
6d368e53 19514 piocbq->sli4_lxritag = sglq->sli4_lxritag;
2a9bf3d0
JS
19515 piocbq->sli4_xritag = sglq->sli4_xritag;
19516 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19517 fail_msg = "to convert bpl to sgl";
205e8240 19518 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
2a9bf3d0 19519 fail_msg = "to convert iocb to wqe";
dc19e3b4 19520 else if (lpfc_sli4_wq_put(wq, &wqe))
2a9bf3d0
JS
19521 fail_msg = " - Wq is full";
19522 else
19523 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19524
19525 if (fail_msg) {
19526 /* Failed means we can't issue and need to cancel */
19527 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19528 "2822 IOCB failed %s iotag 0x%x "
19529 "xri 0x%x\n",
19530 fail_msg,
19531 piocbq->iotag, piocbq->sli4_xritag);
19532 list_add_tail(&piocbq->list, &completions);
19533 }
398d81c9 19534 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2a9bf3d0
JS
19535 }
19536
2a9bf3d0
JS
19537 /* Cancel all the IOCBs that cannot be issued */
19538 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19539 IOERR_SLI_ABORTED);
19540
0e9bb8d7 19541 return txq_cnt;
2a9bf3d0 19542}
895427bd
JS
19543
19544/**
19545 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19546 * @phba: Pointer to HBA context object.
19547 * @pwqe: Pointer to command WQE.
19548 * @sglq: Pointer to the scatter gather queue object.
19549 *
19550 * This routine converts the bpl or bde that is in the WQE
19551 * to a sgl list for the sli4 hardware. The physical address
19552 * of the bpl/bde is converted back to a virtual address.
19553 * If the WQE contains a BPL then the list of BDE's is
19554 * converted to sli4_sge's. If the WQE contains a single
19555 * BDE then it is converted to a single sli_sge.
19556 * The WQE is still in cpu endianness so the contents of
19557 * the bpl can be used without byte swapping.
19558 *
19559 * Returns valid XRI = Success, NO_XRI = Failure.
19560 */
19561static uint16_t
19562lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19563 struct lpfc_sglq *sglq)
19564{
19565 uint16_t xritag = NO_XRI;
19566 struct ulp_bde64 *bpl = NULL;
19567 struct ulp_bde64 bde;
19568 struct sli4_sge *sgl = NULL;
19569 struct lpfc_dmabuf *dmabuf;
205e8240 19570 union lpfc_wqe128 *wqe;
895427bd
JS
19571 int numBdes = 0;
19572 int i = 0;
19573 uint32_t offset = 0; /* accumulated offset in the sg request list */
19574 int inbound = 0; /* number of sg reply entries inbound from firmware */
19575 uint32_t cmd;
19576
19577 if (!pwqeq || !sglq)
19578 return xritag;
19579
19580 sgl = (struct sli4_sge *)sglq->sgl;
19581 wqe = &pwqeq->wqe;
19582 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19583
19584 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19585 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19586 return sglq->sli4_xritag;
19587 numBdes = pwqeq->rsvd2;
19588 if (numBdes) {
19589 /* The addrHigh and addrLow fields within the WQE
19590 * have not been byteswapped yet so there is no
19591 * need to swap them back.
19592 */
19593 if (pwqeq->context3)
19594 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19595 else
19596 return xritag;
19597
19598 bpl = (struct ulp_bde64 *)dmabuf->virt;
19599 if (!bpl)
19600 return xritag;
19601
19602 for (i = 0; i < numBdes; i++) {
19603 /* Should already be byte swapped. */
19604 sgl->addr_hi = bpl->addrHigh;
19605 sgl->addr_lo = bpl->addrLow;
19606
19607 sgl->word2 = le32_to_cpu(sgl->word2);
19608 if ((i+1) == numBdes)
19609 bf_set(lpfc_sli4_sge_last, sgl, 1);
19610 else
19611 bf_set(lpfc_sli4_sge_last, sgl, 0);
19612 /* swap the size field back to the cpu so we
19613 * can assign it to the sgl.
19614 */
19615 bde.tus.w = le32_to_cpu(bpl->tus.w);
19616 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19617 /* The offsets in the sgl need to be accumulated
19618 * separately for the request and reply lists.
19619 * The request is always first, the reply follows.
19620 */
19621 switch (cmd) {
19622 case CMD_GEN_REQUEST64_WQE:
19623 /* add up the reply sg entries */
19624 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19625 inbound++;
19626 /* first inbound? reset the offset */
19627 if (inbound == 1)
19628 offset = 0;
19629 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19630 bf_set(lpfc_sli4_sge_type, sgl,
19631 LPFC_SGE_TYPE_DATA);
19632 offset += bde.tus.f.bdeSize;
19633 break;
19634 case CMD_FCP_TRSP64_WQE:
19635 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19636 bf_set(lpfc_sli4_sge_type, sgl,
19637 LPFC_SGE_TYPE_DATA);
19638 break;
19639 case CMD_FCP_TSEND64_WQE:
19640 case CMD_FCP_TRECEIVE64_WQE:
19641 bf_set(lpfc_sli4_sge_type, sgl,
19642 bpl->tus.f.bdeFlags);
19643 if (i < 3)
19644 offset = 0;
19645 else
19646 offset += bde.tus.f.bdeSize;
19647 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19648 break;
19649 }
19650 sgl->word2 = cpu_to_le32(sgl->word2);
19651 bpl++;
19652 sgl++;
19653 }
19654 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19655 /* The addrHigh and addrLow fields of the BDE have not
19656 * been byteswapped yet so they need to be swapped
19657 * before putting them in the sgl.
19658 */
19659 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19660 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19661 sgl->word2 = le32_to_cpu(sgl->word2);
19662 bf_set(lpfc_sli4_sge_last, sgl, 1);
19663 sgl->word2 = cpu_to_le32(sgl->word2);
19664 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19665 }
19666 return sglq->sli4_xritag;
19667}
19668
19669/**
19670 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19671 * @phba: Pointer to HBA context object.
19672 * @ring_number: Base sli ring number
19673 * @pwqe: Pointer to command WQE.
19674 **/
19675int
19676lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19677 struct lpfc_iocbq *pwqe)
19678{
205e8240 19679 union lpfc_wqe128 *wqe = &pwqe->wqe;
f358dd0c 19680 struct lpfc_nvmet_rcv_ctx *ctxp;
895427bd
JS
19681 struct lpfc_queue *wq;
19682 struct lpfc_sglq *sglq;
19683 struct lpfc_sli_ring *pring;
19684 unsigned long iflags;
cd22d605 19685 uint32_t ret = 0;
895427bd
JS
19686
19687 /* NVME_LS and NVME_LS ABTS requests. */
19688 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19689 pring = phba->sli4_hba.nvmels_wq->pring;
19690 spin_lock_irqsave(&pring->ring_lock, iflags);
19691 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19692 if (!sglq) {
19693 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19694 return WQE_BUSY;
19695 }
19696 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19697 pwqe->sli4_xritag = sglq->sli4_xritag;
19698 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19699 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19700 return WQE_ERROR;
19701 }
19702 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19703 pwqe->sli4_xritag);
cd22d605
DK
19704 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19705 if (ret) {
895427bd 19706 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19707 return ret;
895427bd 19708 }
cd22d605 19709
895427bd
JS
19710 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19711 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19712 return 0;
19713 }
19714
19715 /* NVME_FCREQ and NVME_ABTS requests */
19716 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19717 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
cdb42bec 19718 pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
895427bd
JS
19719
19720 spin_lock_irqsave(&pring->ring_lock, iflags);
cdb42bec 19721 wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
895427bd 19722 bf_set(wqe_cqid, &wqe->generic.wqe_com,
cdb42bec 19723 phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
cd22d605
DK
19724 ret = lpfc_sli4_wq_put(wq, wqe);
19725 if (ret) {
895427bd 19726 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19727 return ret;
895427bd
JS
19728 }
19729 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19730 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19731 return 0;
19732 }
19733
f358dd0c
JS
19734 /* NVMET requests */
19735 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19736 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
cdb42bec 19737 pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
f358dd0c
JS
19738
19739 spin_lock_irqsave(&pring->ring_lock, iflags);
19740 ctxp = pwqe->context2;
6c621a22 19741 sglq = ctxp->ctxbuf->sglq;
f358dd0c
JS
19742 if (pwqe->sli4_xritag == NO_XRI) {
19743 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19744 pwqe->sli4_xritag = sglq->sli4_xritag;
19745 }
19746 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19747 pwqe->sli4_xritag);
cdb42bec 19748 wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
f358dd0c 19749 bf_set(wqe_cqid, &wqe->generic.wqe_com,
cdb42bec 19750 phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
cd22d605
DK
19751 ret = lpfc_sli4_wq_put(wq, wqe);
19752 if (ret) {
f358dd0c 19753 spin_unlock_irqrestore(&pring->ring_lock, iflags);
cd22d605 19754 return ret;
f358dd0c
JS
19755 }
19756 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19757 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19758 return 0;
19759 }
895427bd
JS
19760 return WQE_ERROR;
19761}