scsi: lpfc: Add push-to-adapter support to sli4
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
128bddac 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
d080abe0 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
acf3368f 29#include <linux/module.h>
dea3101e 30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
92d7f7b0 33#include <linux/ctype.h>
0d878419 34#include <linux/aer.h>
5a0e3ad6 35#include <linux/slab.h>
52d52440 36#include <linux/firmware.h>
3ef6d24c 37#include <linux/miscdevice.h>
7bb03bbf 38#include <linux/percpu.h>
895427bd 39#include <linux/msi.h>
286871a6 40#include <linux/bitops.h>
dea3101e 41
91886523 42#include <scsi/scsi.h>
dea3101e 43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_transport_fc.h>
86c67379
JS
46#include <scsi/scsi_tcq.h>
47#include <scsi/fc/fc_fs.h>
48
49#include <linux/nvme-fc-driver.h>
dea3101e 50
da0436e9 51#include "lpfc_hw4.h"
dea3101e 52#include "lpfc_hw.h"
53#include "lpfc_sli.h"
da0436e9 54#include "lpfc_sli4.h"
ea2151b4 55#include "lpfc_nl.h"
dea3101e 56#include "lpfc_disc.h"
dea3101e 57#include "lpfc.h"
895427bd
JS
58#include "lpfc_scsi.h"
59#include "lpfc_nvme.h"
86c67379 60#include "lpfc_nvmet.h"
dea3101e 61#include "lpfc_logmsg.h"
62#include "lpfc_crtn.h"
92d7f7b0 63#include "lpfc_vport.h"
dea3101e 64#include "lpfc_version.h"
12f44457 65#include "lpfc_ids.h"
dea3101e 66
81301a9b
JS
67char *_dump_buf_data;
68unsigned long _dump_buf_data_order;
69char *_dump_buf_dif;
70unsigned long _dump_buf_dif_order;
71spinlock_t _dump_buf_lock;
72
7bb03bbf 73/* Used when mapping IRQ vectors in a driver centric manner */
b246de17
JS
74uint16_t *lpfc_used_cpu;
75uint32_t lpfc_present_cpu;
7bb03bbf 76
dea3101e 77static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
78static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 79static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
80static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
81static int lpfc_setup_endian_order(struct lpfc_hba *);
da0436e9 82static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
8a9d2e80 83static void lpfc_free_els_sgl_list(struct lpfc_hba *);
f358dd0c 84static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
8a9d2e80 85static void lpfc_init_sgl_list(struct lpfc_hba *);
da0436e9
JS
86static int lpfc_init_active_sgl_array(struct lpfc_hba *);
87static void lpfc_free_active_sgl(struct lpfc_hba *);
88static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
89static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
90static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
91static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
92static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
618a5230
JS
93static void lpfc_sli4_disable_intr(struct lpfc_hba *);
94static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
1ba981fd 95static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
dea3101e 96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 99static DEFINE_IDR(lpfc_hba_index);
f358dd0c 100#define LPFC_NVMET_BUF_POST 254
dea3101e 101
e59058c4 102/**
3621a710 103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
104 * @phba: pointer to lpfc hba data structure.
105 *
106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
107 * mailbox command. It retrieves the revision information from the HBA and
108 * collects the Vital Product Data (VPD) about the HBA for preparing the
109 * configuration of the HBA.
110 *
111 * Return codes:
112 * 0 - success.
113 * -ERESTART - requests the SLI layer to reset the HBA and try again.
114 * Any other value - indicates an error.
115 **/
dea3101e 116int
2e0fef85 117lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 118{
119 lpfc_vpd_t *vp = &phba->vpd;
120 int i = 0, rc;
121 LPFC_MBOXQ_t *pmb;
122 MAILBOX_t *mb;
123 char *lpfc_vpd_data = NULL;
124 uint16_t offset = 0;
125 static char licensed[56] =
126 "key unlock for use with gnu public licensed code only\0";
65a29c16 127 static int init_key = 1;
dea3101e 128
129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130 if (!pmb) {
2e0fef85 131 phba->link_state = LPFC_HBA_ERROR;
dea3101e 132 return -ENOMEM;
133 }
134
04c68496 135 mb = &pmb->u.mb;
2e0fef85 136 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 137
138 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
139 if (init_key) {
140 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 141
65a29c16
JS
142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143 *ptext = cpu_to_be32(*ptext);
144 init_key = 0;
145 }
dea3101e 146
147 lpfc_read_nv(phba, pmb);
148 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149 sizeof (mb->un.varRDnvp.rsvd3));
150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151 sizeof (licensed));
152
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155 if (rc != MBX_SUCCESS) {
ed957684 156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 157 "0324 Config Port initialization "
dea3101e 158 "error, mbxCmd x%x READ_NVPARM, "
159 "mbxStatus x%x\n",
dea3101e 160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free(pmb, phba->mbox_mem_pool);
162 return -ERESTART;
163 }
164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
165 sizeof(phba->wwnn));
166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167 sizeof(phba->wwpn));
dea3101e 168 }
169
92d7f7b0
JS
170 phba->sli3_options = 0x0;
171
dea3101e 172 /* Setup and issue mailbox READ REV command */
173 lpfc_read_rev(phba, pmb);
174 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
175 if (rc != MBX_SUCCESS) {
ed957684 176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 177 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 178 "READ_REV, mbxStatus x%x\n",
dea3101e 179 mb->mbxCommand, mb->mbxStatus);
180 mempool_free( pmb, phba->mbox_mem_pool);
181 return -ERESTART;
182 }
183
92d7f7b0 184
1de933f3
JSEC
185 /*
186 * The value of rr must be 1 since the driver set the cv field to 1.
187 * This setting requires the FW to set all revision fields.
dea3101e 188 */
1de933f3 189 if (mb->un.varRdRev.rr == 0) {
dea3101e 190 vp->rev.rBit = 0;
1de933f3 191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
192 "0440 Adapter failed to init, READ_REV has "
193 "missing revision information.\n");
dea3101e 194 mempool_free(pmb, phba->mbox_mem_pool);
195 return -ERESTART;
dea3101e 196 }
197
495a714c
JS
198 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
199 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 200 return -EINVAL;
495a714c 201 }
ed957684 202
dea3101e 203 /* Save information as VPD data */
1de933f3 204 vp->rev.rBit = 1;
92d7f7b0 205 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
206 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
207 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
208 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
209 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 210 vp->rev.biuRev = mb->un.varRdRev.biuRev;
211 vp->rev.smRev = mb->un.varRdRev.smRev;
212 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
213 vp->rev.endecRev = mb->un.varRdRev.endecRev;
214 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
215 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
216 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
217 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
218 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
219 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
220
92d7f7b0
JS
221 /* If the sli feature level is less then 9, we must
222 * tear down all RPIs and VPIs on link down if NPIV
223 * is enabled.
224 */
225 if (vp->rev.feaLevelHigh < 9)
226 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
227
dea3101e 228 if (lpfc_is_LC_HBA(phba->pcidev->device))
229 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
230 sizeof (phba->RandomData));
231
dea3101e 232 /* Get adapter VPD information */
dea3101e 233 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
234 if (!lpfc_vpd_data)
d7c255b2 235 goto out_free_mbox;
dea3101e 236 do {
a0c87cbd 237 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e 238 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
239
240 if (rc != MBX_SUCCESS) {
241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 242 "0441 VPD not present on adapter, "
dea3101e 243 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 244 mb->mbxCommand, mb->mbxStatus);
74b72a59 245 mb->un.varDmp.word_cnt = 0;
dea3101e 246 }
04c68496
JS
247 /* dump mem may return a zero when finished or we got a
248 * mailbox error, either way we are done.
249 */
250 if (mb->un.varDmp.word_cnt == 0)
251 break;
74b72a59
JW
252 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
253 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
254 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
255 lpfc_vpd_data + offset,
92d7f7b0 256 mb->un.varDmp.word_cnt);
dea3101e 257 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
258 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
259 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 260
261 kfree(lpfc_vpd_data);
dea3101e 262out_free_mbox:
263 mempool_free(pmb, phba->mbox_mem_pool);
264 return 0;
265}
266
e59058c4 267/**
3621a710 268 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
271 *
272 * This is the completion handler for driver's configuring asynchronous event
273 * mailbox command to the device. If the mailbox command returns successfully,
274 * it will set internal async event support flag to 1; otherwise, it will
275 * set internal async event support flag to 0.
276 **/
57127f15
JS
277static void
278lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
279{
04c68496 280 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
281 phba->temp_sensor_support = 1;
282 else
283 phba->temp_sensor_support = 0;
284 mempool_free(pmboxq, phba->mbox_mem_pool);
285 return;
286}
287
97207482 288/**
3621a710 289 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
290 * @phba: pointer to lpfc hba data structure.
291 * @pmboxq: pointer to the driver internal queue element for mailbox command.
292 *
293 * This is the completion handler for dump mailbox command for getting
294 * wake up parameters. When this command complete, the response contain
295 * Option rom version of the HBA. This function translate the version number
296 * into a human readable string and store it in OptionROMVersion.
297 **/
298static void
299lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
300{
301 struct prog_id *prg;
302 uint32_t prog_id_word;
303 char dist = ' ';
304 /* character array used for decoding dist type. */
305 char dist_char[] = "nabx";
306
04c68496 307 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 308 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 309 return;
9f1e1b50 310 }
97207482
JS
311
312 prg = (struct prog_id *) &prog_id_word;
313
314 /* word 7 contain option rom version */
04c68496 315 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
316
317 /* Decode the Option rom version word to a readable string */
318 if (prg->dist < 4)
319 dist = dist_char[prg->dist];
320
321 if ((prg->dist == 3) && (prg->num == 0))
a2fc4aef 322 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
97207482
JS
323 prg->ver, prg->rev, prg->lev);
324 else
a2fc4aef 325 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
97207482
JS
326 prg->ver, prg->rev, prg->lev,
327 dist, prg->num);
9f1e1b50 328 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
329 return;
330}
331
0558056c
JS
332/**
333 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
334 * cfg_soft_wwnn, cfg_soft_wwpn
335 * @vport: pointer to lpfc vport data structure.
336 *
337 *
338 * Return codes
339 * None.
340 **/
341void
342lpfc_update_vport_wwn(struct lpfc_vport *vport)
343{
aeb3c817
JS
344 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
345 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
346
0558056c
JS
347 /* If the soft name exists then update it using the service params */
348 if (vport->phba->cfg_soft_wwnn)
349 u64_to_wwn(vport->phba->cfg_soft_wwnn,
350 vport->fc_sparam.nodeName.u.wwn);
351 if (vport->phba->cfg_soft_wwpn)
352 u64_to_wwn(vport->phba->cfg_soft_wwpn,
353 vport->fc_sparam.portName.u.wwn);
354
355 /*
356 * If the name is empty or there exists a soft name
357 * then copy the service params name, otherwise use the fc name
358 */
359 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
361 sizeof(struct lpfc_name));
362 else
363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
364 sizeof(struct lpfc_name));
365
aeb3c817
JS
366 /*
367 * If the port name has changed, then set the Param changes flag
368 * to unreg the login
369 */
370 if (vport->fc_portname.u.wwn[0] != 0 &&
371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
372 sizeof(struct lpfc_name)))
373 vport->vport_flag |= FAWWPN_PARAM_CHG;
374
375 if (vport->fc_portname.u.wwn[0] == 0 ||
376 vport->phba->cfg_soft_wwpn ||
377 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
378 vport->vport_flag & FAWWPN_SET) {
0558056c
JS
379 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name));
aeb3c817
JS
381 vport->vport_flag &= ~FAWWPN_SET;
382 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
383 vport->vport_flag |= FAWWPN_SET;
384 }
0558056c
JS
385 else
386 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
387 sizeof(struct lpfc_name));
388}
389
e59058c4 390/**
3621a710 391 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
392 * @phba: pointer to lpfc hba data structure.
393 *
394 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
395 * command call. It performs all internal resource and state setups on the
396 * port: post IOCB buffers, enable appropriate host interrupt attentions,
397 * ELS ring timers, etc.
398 *
399 * Return codes
400 * 0 - success.
401 * Any other value - error.
402 **/
dea3101e 403int
2e0fef85 404lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 405{
2e0fef85 406 struct lpfc_vport *vport = phba->pport;
a257bf90 407 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 408 LPFC_MBOXQ_t *pmb;
409 MAILBOX_t *mb;
410 struct lpfc_dmabuf *mp;
411 struct lpfc_sli *psli = &phba->sli;
412 uint32_t status, timeout;
2e0fef85
JS
413 int i, j;
414 int rc;
dea3101e 415
7af67051
JS
416 spin_lock_irq(&phba->hbalock);
417 /*
418 * If the Config port completed correctly the HBA is not
419 * over heated any more.
420 */
421 if (phba->over_temp_state == HBA_OVER_TEMP)
422 phba->over_temp_state = HBA_NORMAL_TEMP;
423 spin_unlock_irq(&phba->hbalock);
424
dea3101e 425 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
426 if (!pmb) {
2e0fef85 427 phba->link_state = LPFC_HBA_ERROR;
dea3101e 428 return -ENOMEM;
429 }
04c68496 430 mb = &pmb->u.mb;
dea3101e 431
dea3101e 432 /* Get login parameters for NID. */
9f1177a3
JS
433 rc = lpfc_read_sparam(phba, pmb, 0);
434 if (rc) {
435 mempool_free(pmb, phba->mbox_mem_pool);
436 return -ENOMEM;
437 }
438
ed957684 439 pmb->vport = vport;
dea3101e 440 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 442 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 443 "READ_SPARM mbxStatus x%x\n",
dea3101e 444 mb->mbxCommand, mb->mbxStatus);
2e0fef85 445 phba->link_state = LPFC_HBA_ERROR;
dea3101e 446 mp = (struct lpfc_dmabuf *) pmb->context1;
9f1177a3 447 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 448 lpfc_mbuf_free(phba, mp->virt, mp->phys);
449 kfree(mp);
450 return -EIO;
451 }
452
453 mp = (struct lpfc_dmabuf *) pmb->context1;
454
2e0fef85 455 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
457 kfree(mp);
458 pmb->context1 = NULL;
0558056c 459 lpfc_update_vport_wwn(vport);
a257bf90
JS
460
461 /* Update the fc_host data structures with new wwn. */
462 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
463 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 464 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 465
dea3101e 466 /* If no serial number in VPD data, use low 6 bytes of WWNN */
467 /* This should be consolidated into parse_vpd ? - mr */
468 if (phba->SerialNumber[0] == 0) {
469 uint8_t *outptr;
470
2e0fef85 471 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 472 for (i = 0; i < 12; i++) {
473 status = *outptr++;
474 j = ((status & 0xf0) >> 4);
475 if (j <= 9)
476 phba->SerialNumber[i] =
477 (char)((uint8_t) 0x30 + (uint8_t) j);
478 else
479 phba->SerialNumber[i] =
480 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
481 i++;
482 j = (status & 0xf);
483 if (j <= 9)
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
486 else
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
489 }
490 }
491
dea3101e 492 lpfc_read_config(phba, pmb);
ed957684 493 pmb->vport = vport;
dea3101e 494 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 496 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 497 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 498 mb->mbxCommand, mb->mbxStatus);
2e0fef85 499 phba->link_state = LPFC_HBA_ERROR;
dea3101e 500 mempool_free( pmb, phba->mbox_mem_pool);
501 return -EIO;
502 }
503
a0c87cbd
JS
504 /* Check if the port is disabled */
505 lpfc_sli_read_link_ste(phba);
506
dea3101e 507 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
508 i = (mb->un.varRdConfig.max_xri + 1);
509 if (phba->cfg_hba_queue_depth > i) {
510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
511 "3359 HBA queue depth changed from %d to %d\n",
512 phba->cfg_hba_queue_depth, i);
513 phba->cfg_hba_queue_depth = i;
514 }
515
516 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
517 i = (mb->un.varRdConfig.max_xri >> 3);
518 if (phba->pport->cfg_lun_queue_depth > i) {
519 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
520 "3360 LUN queue depth changed from %d to %d\n",
521 phba->pport->cfg_lun_queue_depth, i);
522 phba->pport->cfg_lun_queue_depth = i;
523 }
dea3101e 524
525 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
526
527 /* Get the default values for Model Name and Description */
528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
529
2e0fef85 530 phba->link_state = LPFC_LINK_DOWN;
dea3101e 531
0b727fea 532 /* Only process IOCBs on ELS ring till hba_state is READY */
895427bd
JS
533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e 537
538 /* Post receive buffers for desired rings */
ed957684
JS
539 if (phba->sli_rev != 3)
540 lpfc_post_rcv_buf(phba);
dea3101e 541
9399627f
JS
542 /*
543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
544 */
545 if (phba->intr_type == MSIX) {
546 rc = lpfc_config_msi(phba, pmb);
547 if (rc) {
548 mempool_free(pmb, phba->mbox_mem_pool);
549 return -EIO;
550 }
551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
552 if (rc != MBX_SUCCESS) {
553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
554 "0352 Config MSI mailbox command "
555 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
556 pmb->u.mb.mbxCommand,
557 pmb->u.mb.mbxStatus);
9399627f
JS
558 mempool_free(pmb, phba->mbox_mem_pool);
559 return -EIO;
560 }
561 }
562
04c68496 563 spin_lock_irq(&phba->hbalock);
9399627f
JS
564 /* Initialize ERATT handling flag */
565 phba->hba_flag &= ~HBA_ERATT_HANDLED;
566
dea3101e 567 /* Enable appropriate host interrupts */
9940b97b
JS
568 if (lpfc_readl(phba->HCregaddr, &status)) {
569 spin_unlock_irq(&phba->hbalock);
570 return -EIO;
571 }
dea3101e 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
573 if (psli->num_rings > 0)
574 status |= HC_R0INT_ENA;
575 if (psli->num_rings > 1)
576 status |= HC_R1INT_ENA;
577 if (psli->num_rings > 2)
578 status |= HC_R2INT_ENA;
579 if (psli->num_rings > 3)
580 status |= HC_R3INT_ENA;
581
875fbdfe
JSEC
582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
583 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 584 status &= ~(HC_R0INT_ENA);
875fbdfe 585
dea3101e 586 writel(status, phba->HCregaddr);
587 readl(phba->HCregaddr); /* flush */
2e0fef85 588 spin_unlock_irq(&phba->hbalock);
dea3101e 589
9399627f
JS
590 /* Set up ring-0 (ELS) timer */
591 timeout = phba->fc_ratov * 2;
256ec0d0
JS
592 mod_timer(&vport->els_tmofunc,
593 jiffies + msecs_to_jiffies(1000 * timeout));
9399627f 594 /* Set up heart beat (HB) timer */
256ec0d0
JS
595 mod_timer(&phba->hb_tmofunc,
596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
597 phba->hb_outstanding = 0;
598 phba->last_completion_time = jiffies;
9399627f 599 /* Set up error attention (ERATT) polling timer */
256ec0d0 600 mod_timer(&phba->eratt_poll,
65791f1f 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
dea3101e 602
a0c87cbd
JS
603 if (phba->hba_flag & LINK_DISABLED) {
604 lpfc_printf_log(phba,
605 KERN_ERR, LOG_INIT,
606 "2598 Adapter Link is disabled.\n");
607 lpfc_down_link(phba, pmb);
608 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
609 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
610 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
611 lpfc_printf_log(phba,
612 KERN_ERR, LOG_INIT,
613 "2599 Adapter failed to issue DOWN_LINK"
614 " mbox command rc 0x%x\n", rc);
615
616 mempool_free(pmb, phba->mbox_mem_pool);
617 return -EIO;
618 }
e40a02c1 619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
620 mempool_free(pmb, phba->mbox_mem_pool);
621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
622 if (rc)
623 return rc;
dea3101e 624 }
625 /* MBOX buffer will be freed in mbox compl */
57127f15 626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
627 if (!pmb) {
628 phba->link_state = LPFC_HBA_ERROR;
629 return -ENOMEM;
630 }
631
57127f15
JS
632 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
633 pmb->mbox_cmpl = lpfc_config_async_cmpl;
634 pmb->vport = phba->pport;
635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 636
57127f15
JS
637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
638 lpfc_printf_log(phba,
639 KERN_ERR,
640 LOG_INIT,
641 "0456 Adapter failed to issue "
e4e74273 642 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
643 rc);
644 mempool_free(pmb, phba->mbox_mem_pool);
645 }
97207482
JS
646
647 /* Get Option rom version */
648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
649 if (!pmb) {
650 phba->link_state = LPFC_HBA_ERROR;
651 return -ENOMEM;
652 }
653
97207482
JS
654 lpfc_dump_wakeup_param(phba, pmb);
655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
656 pmb->vport = phba->pport;
657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
658
659 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
e4e74273 661 "to get Option ROM version status x%x\n", rc);
97207482
JS
662 mempool_free(pmb, phba->mbox_mem_pool);
663 }
664
d7c255b2 665 return 0;
ce8b3ce5
JS
666}
667
84d1b006
JS
668/**
669 * lpfc_hba_init_link - Initialize the FC link
670 * @phba: pointer to lpfc hba data structure.
6e7288d9 671 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
672 *
673 * This routine will issue the INIT_LINK mailbox command call.
674 * It is available to other drivers through the lpfc_hba data
675 * structure for use as a delayed link up mechanism with the
676 * module parameter lpfc_suppress_link_up.
677 *
678 * Return code
679 * 0 - success
680 * Any other value - error
681 **/
e399b228 682static int
6e7288d9 683lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
1b51197d
JS
684{
685 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
686}
687
688/**
689 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
690 * @phba: pointer to lpfc hba data structure.
691 * @fc_topology: desired fc topology.
692 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
693 *
694 * This routine will issue the INIT_LINK mailbox command call.
695 * It is available to other drivers through the lpfc_hba data
696 * structure for use as a delayed link up mechanism with the
697 * module parameter lpfc_suppress_link_up.
698 *
699 * Return code
700 * 0 - success
701 * Any other value - error
702 **/
703int
704lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
705 uint32_t flag)
84d1b006
JS
706{
707 struct lpfc_vport *vport = phba->pport;
708 LPFC_MBOXQ_t *pmb;
709 MAILBOX_t *mb;
710 int rc;
711
712 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
713 if (!pmb) {
714 phba->link_state = LPFC_HBA_ERROR;
715 return -ENOMEM;
716 }
717 mb = &pmb->u.mb;
718 pmb->vport = vport;
719
026abb87
JS
720 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
722 !(phba->lmt & LMT_1Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
724 !(phba->lmt & LMT_2Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
726 !(phba->lmt & LMT_4Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
728 !(phba->lmt & LMT_8Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
730 !(phba->lmt & LMT_10Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
d38dd52c
JS
732 !(phba->lmt & LMT_16Gb)) ||
733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
734 !(phba->lmt & LMT_32Gb))) {
026abb87
JS
735 /* Reset link speed to auto */
736 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
737 "1302 Invalid speed for this board:%d "
738 "Reset link speed to auto.\n",
739 phba->cfg_link_speed);
740 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
741 }
1b51197d 742 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
84d1b006 743 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1b51197d
JS
744 if (phba->sli_rev < LPFC_SLI_REV4)
745 lpfc_set_loopback_flag(phba);
6e7288d9 746 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 747 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
84d1b006
JS
748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
749 "0498 Adapter failed to init, mbxCmd x%x "
750 "INIT_LINK, mbxStatus x%x\n",
751 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
752 if (phba->sli_rev <= LPFC_SLI_REV3) {
753 /* Clear all interrupt enable conditions */
754 writel(0, phba->HCregaddr);
755 readl(phba->HCregaddr); /* flush */
756 /* Clear all pending interrupts */
757 writel(0xffffffff, phba->HAregaddr);
758 readl(phba->HAregaddr); /* flush */
759 }
84d1b006 760 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 761 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
762 mempool_free(pmb, phba->mbox_mem_pool);
763 return -EIO;
764 }
e40a02c1 765 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
766 if (flag == MBX_POLL)
767 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
768
769 return 0;
770}
771
772/**
773 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
774 * @phba: pointer to lpfc hba data structure.
775 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
776 *
777 * This routine will issue the DOWN_LINK mailbox command call.
778 * It is available to other drivers through the lpfc_hba data
779 * structure for use to stop the link.
780 *
781 * Return code
782 * 0 - success
783 * Any other value - error
784 **/
e399b228 785static int
6e7288d9 786lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
787{
788 LPFC_MBOXQ_t *pmb;
789 int rc;
790
791 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
792 if (!pmb) {
793 phba->link_state = LPFC_HBA_ERROR;
794 return -ENOMEM;
795 }
796
797 lpfc_printf_log(phba,
798 KERN_ERR, LOG_INIT,
799 "0491 Adapter Link is disabled.\n");
800 lpfc_down_link(phba, pmb);
801 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 802 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006
JS
803 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
804 lpfc_printf_log(phba,
805 KERN_ERR, LOG_INIT,
806 "2522 Adapter failed to issue DOWN_LINK"
807 " mbox command rc 0x%x\n", rc);
808
809 mempool_free(pmb, phba->mbox_mem_pool);
810 return -EIO;
811 }
6e7288d9
JS
812 if (flag == MBX_POLL)
813 mempool_free(pmb, phba->mbox_mem_pool);
814
84d1b006
JS
815 return 0;
816}
817
e59058c4 818/**
3621a710 819 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
820 * @phba: pointer to lpfc HBA data structure.
821 *
822 * This routine will do LPFC uninitialization before the HBA is reset when
823 * bringing down the SLI Layer.
824 *
825 * Return codes
826 * 0 - success.
827 * Any other value - error.
828 **/
dea3101e 829int
2e0fef85 830lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 831{
1b32f6aa
JS
832 struct lpfc_vport **vports;
833 int i;
3772a991
JS
834
835 if (phba->sli_rev <= LPFC_SLI_REV3) {
836 /* Disable interrupts */
837 writel(0, phba->HCregaddr);
838 readl(phba->HCregaddr); /* flush */
839 }
dea3101e 840
1b32f6aa
JS
841 if (phba->pport->load_flag & FC_UNLOADING)
842 lpfc_cleanup_discovery_resources(phba->pport);
843 else {
844 vports = lpfc_create_vport_work_array(phba);
845 if (vports != NULL)
3772a991
JS
846 for (i = 0; i <= phba->max_vports &&
847 vports[i] != NULL; i++)
1b32f6aa
JS
848 lpfc_cleanup_discovery_resources(vports[i]);
849 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
850 }
851 return 0;
dea3101e 852}
853
68e814f5
JS
854/**
855 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
856 * rspiocb which got deferred
857 *
858 * @phba: pointer to lpfc HBA data structure.
859 *
860 * This routine will cleanup completed slow path events after HBA is reset
861 * when bringing down the SLI Layer.
862 *
863 *
864 * Return codes
865 * void.
866 **/
867static void
868lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
869{
870 struct lpfc_iocbq *rspiocbq;
871 struct hbq_dmabuf *dmabuf;
872 struct lpfc_cq_event *cq_event;
873
874 spin_lock_irq(&phba->hbalock);
875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
876 spin_unlock_irq(&phba->hbalock);
877
878 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
879 /* Get the response iocb from the head of work queue */
880 spin_lock_irq(&phba->hbalock);
881 list_remove_head(&phba->sli4_hba.sp_queue_event,
882 cq_event, struct lpfc_cq_event, list);
883 spin_unlock_irq(&phba->hbalock);
884
885 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
886 case CQE_CODE_COMPL_WQE:
887 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
888 cq_event);
889 lpfc_sli_release_iocbq(phba, rspiocbq);
890 break;
891 case CQE_CODE_RECEIVE:
892 case CQE_CODE_RECEIVE_V1:
893 dmabuf = container_of(cq_event, struct hbq_dmabuf,
894 cq_event);
895 lpfc_in_buf_free(phba, &dmabuf->dbuf);
896 }
897 }
898}
899
e59058c4 900/**
bcece5f5 901 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
e59058c4
JS
902 * @phba: pointer to lpfc HBA data structure.
903 *
bcece5f5
JS
904 * This routine will cleanup posted ELS buffers after the HBA is reset
905 * when bringing down the SLI Layer.
906 *
e59058c4
JS
907 *
908 * Return codes
bcece5f5 909 * void.
e59058c4 910 **/
bcece5f5
JS
911static void
912lpfc_hba_free_post_buf(struct lpfc_hba *phba)
41415862
JW
913{
914 struct lpfc_sli *psli = &phba->sli;
915 struct lpfc_sli_ring *pring;
916 struct lpfc_dmabuf *mp, *next_mp;
07eab624
JS
917 LIST_HEAD(buflist);
918 int count;
41415862 919
92d7f7b0
JS
920 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
921 lpfc_sli_hbqbuf_free_all(phba);
922 else {
923 /* Cleanup preposted buffers on the ELS ring */
895427bd 924 pring = &psli->sli3_ring[LPFC_ELS_RING];
07eab624
JS
925 spin_lock_irq(&phba->hbalock);
926 list_splice_init(&pring->postbufq, &buflist);
927 spin_unlock_irq(&phba->hbalock);
928
929 count = 0;
930 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
92d7f7b0 931 list_del(&mp->list);
07eab624 932 count++;
92d7f7b0
JS
933 lpfc_mbuf_free(phba, mp->virt, mp->phys);
934 kfree(mp);
935 }
07eab624
JS
936
937 spin_lock_irq(&phba->hbalock);
938 pring->postbufq_cnt -= count;
bcece5f5 939 spin_unlock_irq(&phba->hbalock);
41415862 940 }
bcece5f5
JS
941}
942
943/**
944 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
945 * @phba: pointer to lpfc HBA data structure.
946 *
947 * This routine will cleanup the txcmplq after the HBA is reset when bringing
948 * down the SLI Layer.
949 *
950 * Return codes
951 * void
952 **/
953static void
954lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
955{
956 struct lpfc_sli *psli = &phba->sli;
895427bd 957 struct lpfc_queue *qp = NULL;
bcece5f5
JS
958 struct lpfc_sli_ring *pring;
959 LIST_HEAD(completions);
960 int i;
c1dd9111 961 struct lpfc_iocbq *piocb, *next_iocb;
bcece5f5 962
895427bd
JS
963 if (phba->sli_rev != LPFC_SLI_REV4) {
964 for (i = 0; i < psli->num_rings; i++) {
965 pring = &psli->sli3_ring[i];
bcece5f5 966 spin_lock_irq(&phba->hbalock);
895427bd
JS
967 /* At this point in time the HBA is either reset or DOA
968 * Nothing should be on txcmplq as it will
969 * NEVER complete.
970 */
971 list_splice_init(&pring->txcmplq, &completions);
972 pring->txcmplq_cnt = 0;
bcece5f5 973 spin_unlock_irq(&phba->hbalock);
09372820 974
895427bd
JS
975 lpfc_sli_abort_iocb_ring(phba, pring);
976 }
a257bf90 977 /* Cancel all the IOCBs from the completions list */
895427bd
JS
978 lpfc_sli_cancel_iocbs(phba, &completions,
979 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
980 return;
981 }
982 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
983 pring = qp->pring;
984 if (!pring)
985 continue;
986 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
987 list_for_each_entry_safe(piocb, next_iocb,
988 &pring->txcmplq, list)
989 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
895427bd
JS
990 list_splice_init(&pring->txcmplq, &completions);
991 pring->txcmplq_cnt = 0;
992 spin_unlock_irq(&pring->ring_lock);
41415862
JW
993 lpfc_sli_abort_iocb_ring(phba, pring);
994 }
895427bd
JS
995 /* Cancel all the IOCBs from the completions list */
996 lpfc_sli_cancel_iocbs(phba, &completions,
997 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
bcece5f5 998}
41415862 999
bcece5f5
JS
1000/**
1001 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1002 int i;
1003 * @phba: pointer to lpfc HBA data structure.
1004 *
1005 * This routine will do uninitialization after the HBA is reset when bring
1006 * down the SLI Layer.
1007 *
1008 * Return codes
1009 * 0 - success.
1010 * Any other value - error.
1011 **/
1012static int
1013lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1014{
1015 lpfc_hba_free_post_buf(phba);
1016 lpfc_hba_clean_txcmplq(phba);
41415862
JW
1017 return 0;
1018}
5af5eee7 1019
da0436e9
JS
1020/**
1021 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1022 * @phba: pointer to lpfc HBA data structure.
1023 *
1024 * This routine will do uninitialization after the HBA is reset when bring
1025 * down the SLI Layer.
1026 *
1027 * Return codes
af901ca1 1028 * 0 - success.
da0436e9
JS
1029 * Any other value - error.
1030 **/
1031static int
1032lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1033{
1034 struct lpfc_scsi_buf *psb, *psb_next;
86c67379 1035 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
da0436e9 1036 LIST_HEAD(aborts);
895427bd 1037 LIST_HEAD(nvme_aborts);
86c67379 1038 LIST_HEAD(nvmet_aborts);
da0436e9 1039 unsigned long iflag = 0;
0f65ff68 1040 struct lpfc_sglq *sglq_entry = NULL;
cf1a1d3e 1041 int cnt;
0f65ff68 1042
895427bd
JS
1043
1044 lpfc_sli_hbqbuf_free_all(phba);
bcece5f5
JS
1045 lpfc_hba_clean_txcmplq(phba);
1046
da0436e9
JS
1047 /* At this point in time the HBA is either reset or DOA. Either
1048 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
895427bd 1049 * on the lpfc_els_sgl_list so that it can either be freed if the
da0436e9
JS
1050 * driver is unloading or reposted if the driver is restarting
1051 * the port.
1052 */
895427bd 1053 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
da0436e9 1054 /* scsl_buf_list */
895427bd 1055 /* sgl_list_lock required because worker thread uses this
da0436e9
JS
1056 * list.
1057 */
895427bd 1058 spin_lock(&phba->sli4_hba.sgl_list_lock);
0f65ff68
JS
1059 list_for_each_entry(sglq_entry,
1060 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1061 sglq_entry->state = SGL_FREED;
1062
da0436e9 1063 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895427bd
JS
1064 &phba->sli4_hba.lpfc_els_sgl_list);
1065
f358dd0c 1066
895427bd 1067 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9
JS
1068 /* abts_scsi_buf_list_lock required because worker thread uses this
1069 * list.
1070 */
895427bd
JS
1071 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
1072 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1073 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1074 &aborts);
1075 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1076 }
1077
1078 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1079 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1080 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
1081 &nvme_aborts);
86c67379
JS
1082 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1083 &nvmet_aborts);
895427bd
JS
1084 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1085 }
1086
da0436e9
JS
1087 spin_unlock_irq(&phba->hbalock);
1088
1089 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1090 psb->pCmd = NULL;
1091 psb->status = IOSTAT_SUCCESS;
1092 }
a40fc5f0
JS
1093 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1094 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1095 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
68e814f5 1096
86c67379 1097 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
cf1a1d3e 1098 cnt = 0;
86c67379
JS
1099 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1100 psb->pCmd = NULL;
1101 psb->status = IOSTAT_SUCCESS;
cf1a1d3e 1102 cnt++;
86c67379
JS
1103 }
1104 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
cf1a1d3e 1105 phba->put_nvme_bufs += cnt;
86c67379
JS
1106 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1107 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1108
1109 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1110 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
6c621a22 1111 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
86c67379 1112 }
895427bd 1113 }
895427bd 1114
68e814f5 1115 lpfc_sli4_free_sp_events(phba);
da0436e9
JS
1116 return 0;
1117}
1118
1119/**
1120 * lpfc_hba_down_post - Wrapper func for hba down post routine
1121 * @phba: pointer to lpfc HBA data structure.
1122 *
1123 * This routine wraps the actual SLI3 or SLI4 routine for performing
1124 * uninitialization after the HBA is reset when bring down the SLI Layer.
1125 *
1126 * Return codes
af901ca1 1127 * 0 - success.
da0436e9
JS
1128 * Any other value - error.
1129 **/
1130int
1131lpfc_hba_down_post(struct lpfc_hba *phba)
1132{
1133 return (*phba->lpfc_hba_down_post)(phba);
1134}
41415862 1135
e59058c4 1136/**
3621a710 1137 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
1138 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1139 *
1140 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1141 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1142 * work-port-events bitmap and the worker thread is notified. This timeout
1143 * event will be used by the worker thread to invoke the actual timeout
1144 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1145 * be performed in the timeout handler and the HBA timeout event bit shall
1146 * be cleared by the worker thread after it has taken the event bitmap out.
1147 **/
a6ababd2 1148static void
f22eb4d3 1149lpfc_hb_timeout(struct timer_list *t)
858c9f6c
JS
1150{
1151 struct lpfc_hba *phba;
5e9d9b82 1152 uint32_t tmo_posted;
858c9f6c
JS
1153 unsigned long iflag;
1154
f22eb4d3 1155 phba = from_timer(phba, t, hb_tmofunc);
9399627f
JS
1156
1157 /* Check for heart beat timeout conditions */
858c9f6c 1158 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
1159 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1160 if (!tmo_posted)
858c9f6c
JS
1161 phba->pport->work_port_events |= WORKER_HB_TMO;
1162 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1163
9399627f 1164 /* Tell the worker thread there is work to do */
5e9d9b82
JS
1165 if (!tmo_posted)
1166 lpfc_worker_wake_up(phba);
858c9f6c
JS
1167 return;
1168}
1169
19ca7609
JS
1170/**
1171 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1172 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1173 *
1174 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1175 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1176 * work-port-events bitmap and the worker thread is notified. This timeout
1177 * event will be used by the worker thread to invoke the actual timeout
1178 * handler routine, lpfc_rrq_handler. Any periodical operations will
1179 * be performed in the timeout handler and the RRQ timeout event bit shall
1180 * be cleared by the worker thread after it has taken the event bitmap out.
1181 **/
1182static void
f22eb4d3 1183lpfc_rrq_timeout(struct timer_list *t)
19ca7609
JS
1184{
1185 struct lpfc_hba *phba;
19ca7609
JS
1186 unsigned long iflag;
1187
f22eb4d3 1188 phba = from_timer(phba, t, rrq_tmr);
19ca7609 1189 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1190 if (!(phba->pport->load_flag & FC_UNLOADING))
1191 phba->hba_flag |= HBA_RRQ_ACTIVE;
1192 else
1193 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
19ca7609 1194 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1195
1196 if (!(phba->pport->load_flag & FC_UNLOADING))
1197 lpfc_worker_wake_up(phba);
19ca7609
JS
1198}
1199
e59058c4 1200/**
3621a710 1201 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
1202 * @phba: pointer to lpfc hba data structure.
1203 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1204 *
1205 * This is the callback function to the lpfc heart-beat mailbox command.
1206 * If configured, the lpfc driver issues the heart-beat mailbox command to
1207 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1208 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1209 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1210 * heart-beat outstanding state. Once the mailbox command comes back and
1211 * no error conditions detected, the heart-beat mailbox command timer is
1212 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1213 * state is cleared for the next heart-beat. If the timer expired with the
1214 * heart-beat outstanding state set, the driver will put the HBA offline.
1215 **/
858c9f6c
JS
1216static void
1217lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1218{
1219 unsigned long drvr_flag;
1220
1221 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1222 phba->hb_outstanding = 0;
1223 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1224
9399627f 1225 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
1226 mempool_free(pmboxq, phba->mbox_mem_pool);
1227 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1228 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 1229 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c 1230 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1231 jiffies +
1232 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1233 return;
1234}
1235
e59058c4 1236/**
3621a710 1237 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1238 * @phba: pointer to lpfc hba data structure.
1239 *
1240 * This is the actual HBA-timer timeout handler to be invoked by the worker
1241 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1242 * handler performs any periodic operations needed for the device. If such
1243 * periodic event has already been attended to either in the interrupt handler
1244 * or by processing slow-ring or fast-ring events within the HBA-timer
1245 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1246 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1247 * is configured and there is no heart-beat mailbox command outstanding, a
1248 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1249 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1250 * to offline.
1251 **/
858c9f6c
JS
1252void
1253lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1254{
45ed1190 1255 struct lpfc_vport **vports;
858c9f6c 1256 LPFC_MBOXQ_t *pmboxq;
0ff10d46 1257 struct lpfc_dmabuf *buf_ptr;
45ed1190 1258 int retval, i;
858c9f6c 1259 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1260 LIST_HEAD(completions);
0cf07f84
JS
1261 struct lpfc_queue *qp;
1262 unsigned long time_elapsed;
1263 uint32_t tick_cqe, max_cqe, val;
1264 uint64_t tot, data1, data2, data3;
66d7ce93 1265 struct lpfc_nvmet_tgtport *tgtp;
0cf07f84
JS
1266 struct lpfc_register reg_data;
1267 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
858c9f6c 1268
45ed1190
JS
1269 vports = lpfc_create_vport_work_array(phba);
1270 if (vports != NULL)
4258e98e 1271 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
45ed1190 1272 lpfc_rcv_seq_check_edtov(vports[i]);
4258e98e
JS
1273 lpfc_fdmi_num_disc_check(vports[i]);
1274 }
45ed1190
JS
1275 lpfc_destroy_vport_work_array(phba, vports);
1276
858c9f6c 1277 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1278 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1279 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1280 return;
1281
0cf07f84
JS
1282 if (phba->cfg_auto_imax) {
1283 if (!phba->last_eqdelay_time) {
1284 phba->last_eqdelay_time = jiffies;
1285 goto skip_eqdelay;
1286 }
1287 time_elapsed = jiffies - phba->last_eqdelay_time;
1288 phba->last_eqdelay_time = jiffies;
1289
1290 tot = 0xffff;
1291 /* Check outstanding IO count */
1292 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1293 if (phba->nvmet_support) {
66d7ce93
DK
1294 tgtp = phba->targetport->private;
1295 /* Calculate outstanding IOs */
1296 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
1297 tot += atomic_read(&tgtp->xmt_fcp_release);
1298 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
0cf07f84
JS
1299 } else {
1300 tot = atomic_read(&phba->fc4NvmeIoCmpls);
1301 data1 = atomic_read(
1302 &phba->fc4NvmeInputRequests);
1303 data2 = atomic_read(
1304 &phba->fc4NvmeOutputRequests);
1305 data3 = atomic_read(
1306 &phba->fc4NvmeControlRequests);
1307 tot = (data1 + data2 + data3) - tot;
1308 }
1309 }
1310
1311 /* Interrupts per sec per EQ */
1312 val = phba->cfg_fcp_imax / phba->io_channel_irqs;
1313 tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
1314
1315 /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
1316 max_cqe = time_elapsed * tick_cqe;
1317
1318 for (i = 0; i < phba->io_channel_irqs; i++) {
1319 /* Fast-path EQ */
1320 qp = phba->sli4_hba.hba_eq[i];
1321 if (!qp)
1322 continue;
1323
1324 /* Use no EQ delay if we don't have many outstanding
1325 * IOs, or if we are only processing 1 CQE/ISR or less.
1326 * Otherwise, assume we can process up to lpfc_fcp_imax
1327 * interrupts per HBA.
1328 */
1329 if (tot < LPFC_NODELAY_MAX_IO ||
1330 qp->EQ_cqe_cnt <= max_cqe)
1331 val = 0;
1332 else
1333 val = phba->cfg_fcp_imax;
1334
1335 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
1336 /* Use EQ Delay Register method */
1337
1338 /* Convert for EQ Delay register */
1339 if (val) {
1340 /* First, interrupts per sec per EQ */
1341 val = phba->cfg_fcp_imax /
1342 phba->io_channel_irqs;
1343
1344 /* us delay between each interrupt */
1345 val = LPFC_SEC_TO_USEC / val;
1346 }
1347 if (val != qp->q_mode) {
1348 reg_data.word0 = 0;
1349 bf_set(lpfc_sliport_eqdelay_id,
1350 &reg_data, qp->queue_id);
1351 bf_set(lpfc_sliport_eqdelay_delay,
1352 &reg_data, val);
1353 writel(reg_data.word0, eqdreg);
1354 }
1355 } else {
1356 /* Use mbox command method */
1357 if (val != qp->q_mode)
1358 lpfc_modify_hba_eq_delay(phba, i,
1359 1, val);
1360 }
1361
1362 /*
1363 * val is cfg_fcp_imax or 0 for mbox delay or us delay
1364 * between interrupts for EQDR.
1365 */
1366 qp->q_mode = val;
1367 qp->EQ_cqe_cnt = 0;
1368 }
1369 }
1370
1371skip_eqdelay:
858c9f6c 1372 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c 1373
256ec0d0
JS
1374 if (time_after(phba->last_completion_time +
1375 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1376 jiffies)) {
858c9f6c
JS
1377 spin_unlock_irq(&phba->pport->work_port_lock);
1378 if (!phba->hb_outstanding)
1379 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1380 jiffies +
1381 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1382 else
1383 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1384 jiffies +
1385 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c
JS
1386 return;
1387 }
1388 spin_unlock_irq(&phba->pport->work_port_lock);
1389
0ff10d46
JS
1390 if (phba->elsbuf_cnt &&
1391 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1392 spin_lock_irq(&phba->hbalock);
1393 list_splice_init(&phba->elsbuf, &completions);
1394 phba->elsbuf_cnt = 0;
1395 phba->elsbuf_prev_cnt = 0;
1396 spin_unlock_irq(&phba->hbalock);
1397
1398 while (!list_empty(&completions)) {
1399 list_remove_head(&completions, buf_ptr,
1400 struct lpfc_dmabuf, list);
1401 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1402 kfree(buf_ptr);
1403 }
1404 }
1405 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1406
858c9f6c 1407 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
1408 if (phba->cfg_enable_hba_heartbeat) {
1409 if (!phba->hb_outstanding) {
bc73905a
JS
1410 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1411 (list_empty(&psli->mboxq))) {
1412 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1413 GFP_KERNEL);
1414 if (!pmboxq) {
1415 mod_timer(&phba->hb_tmofunc,
1416 jiffies +
256ec0d0
JS
1417 msecs_to_jiffies(1000 *
1418 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1419 return;
1420 }
1421
1422 lpfc_heart_beat(phba, pmboxq);
1423 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1424 pmboxq->vport = phba->pport;
1425 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1426 MBX_NOWAIT);
1427
1428 if (retval != MBX_BUSY &&
1429 retval != MBX_SUCCESS) {
1430 mempool_free(pmboxq,
1431 phba->mbox_mem_pool);
1432 mod_timer(&phba->hb_tmofunc,
1433 jiffies +
256ec0d0
JS
1434 msecs_to_jiffies(1000 *
1435 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1436 return;
1437 }
1438 phba->skipped_hb = 0;
1439 phba->hb_outstanding = 1;
1440 } else if (time_before_eq(phba->last_completion_time,
1441 phba->skipped_hb)) {
1442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1443 "2857 Last completion time not "
1444 " updated in %d ms\n",
1445 jiffies_to_msecs(jiffies
1446 - phba->last_completion_time));
1447 } else
1448 phba->skipped_hb = jiffies;
1449
858c9f6c 1450 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1451 jiffies +
1452 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1453 return;
13815c83
JS
1454 } else {
1455 /*
1456 * If heart beat timeout called with hb_outstanding set
dcf2a4e0
JS
1457 * we need to give the hb mailbox cmd a chance to
1458 * complete or TMO.
13815c83 1459 */
dcf2a4e0
JS
1460 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1461 "0459 Adapter heartbeat still out"
1462 "standing:last compl time was %d ms.\n",
1463 jiffies_to_msecs(jiffies
1464 - phba->last_completion_time));
1465 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1466 jiffies +
1467 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1468 }
4258e98e
JS
1469 } else {
1470 mod_timer(&phba->hb_tmofunc,
1471 jiffies +
1472 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1473 }
1474}
1475
e59058c4 1476/**
3621a710 1477 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1478 * @phba: pointer to lpfc hba data structure.
1479 *
1480 * This routine is called to bring the HBA offline when HBA hardware error
1481 * other than Port Error 6 has been detected.
1482 **/
09372820
JS
1483static void
1484lpfc_offline_eratt(struct lpfc_hba *phba)
1485{
1486 struct lpfc_sli *psli = &phba->sli;
1487
1488 spin_lock_irq(&phba->hbalock);
f4b4c68f 1489 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820 1490 spin_unlock_irq(&phba->hbalock);
618a5230 1491 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
09372820
JS
1492
1493 lpfc_offline(phba);
1494 lpfc_reset_barrier(phba);
f4b4c68f 1495 spin_lock_irq(&phba->hbalock);
09372820 1496 lpfc_sli_brdreset(phba);
f4b4c68f 1497 spin_unlock_irq(&phba->hbalock);
09372820
JS
1498 lpfc_hba_down_post(phba);
1499 lpfc_sli_brdready(phba, HS_MBRDY);
1500 lpfc_unblock_mgmt_io(phba);
1501 phba->link_state = LPFC_HBA_ERROR;
1502 return;
1503}
1504
da0436e9
JS
1505/**
1506 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1507 * @phba: pointer to lpfc hba data structure.
1508 *
1509 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1510 * other than Port Error 6 has been detected.
1511 **/
a88dbb6a 1512void
da0436e9
JS
1513lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1514{
946727dc
JS
1515 spin_lock_irq(&phba->hbalock);
1516 phba->link_state = LPFC_HBA_ERROR;
1517 spin_unlock_irq(&phba->hbalock);
1518
618a5230 1519 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
da0436e9 1520 lpfc_offline(phba);
da0436e9 1521 lpfc_hba_down_post(phba);
da0436e9 1522 lpfc_unblock_mgmt_io(phba);
da0436e9
JS
1523}
1524
a257bf90
JS
1525/**
1526 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1527 * @phba: pointer to lpfc hba data structure.
1528 *
1529 * This routine is invoked to handle the deferred HBA hardware error
1530 * conditions. This type of error is indicated by HBA by setting ER1
1531 * and another ER bit in the host status register. The driver will
1532 * wait until the ER1 bit clears before handling the error condition.
1533 **/
1534static void
1535lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1536{
1537 uint32_t old_host_status = phba->work_hs;
a257bf90
JS
1538 struct lpfc_sli *psli = &phba->sli;
1539
f4b4c68f
JS
1540 /* If the pci channel is offline, ignore possible errors,
1541 * since we cannot communicate with the pci card anyway.
1542 */
1543 if (pci_channel_offline(phba->pcidev)) {
1544 spin_lock_irq(&phba->hbalock);
1545 phba->hba_flag &= ~DEFER_ERATT;
1546 spin_unlock_irq(&phba->hbalock);
1547 return;
1548 }
1549
a257bf90
JS
1550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1551 "0479 Deferred Adapter Hardware Error "
1552 "Data: x%x x%x x%x\n",
1553 phba->work_hs,
1554 phba->work_status[0], phba->work_status[1]);
1555
1556 spin_lock_irq(&phba->hbalock);
f4b4c68f 1557 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1558 spin_unlock_irq(&phba->hbalock);
1559
1560
1561 /*
1562 * Firmware stops when it triggred erratt. That could cause the I/Os
1563 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1564 * SCSI layer retry it after re-establishing link.
1565 */
db55fba8 1566 lpfc_sli_abort_fcp_rings(phba);
a257bf90
JS
1567
1568 /*
1569 * There was a firmware error. Take the hba offline and then
1570 * attempt to restart it.
1571 */
618a5230 1572 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
a257bf90
JS
1573 lpfc_offline(phba);
1574
1575 /* Wait for the ER1 bit to clear.*/
1576 while (phba->work_hs & HS_FFER1) {
1577 msleep(100);
9940b97b
JS
1578 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1579 phba->work_hs = UNPLUG_ERR ;
1580 break;
1581 }
a257bf90
JS
1582 /* If driver is unloading let the worker thread continue */
1583 if (phba->pport->load_flag & FC_UNLOADING) {
1584 phba->work_hs = 0;
1585 break;
1586 }
1587 }
1588
1589 /*
1590 * This is to ptrotect against a race condition in which
1591 * first write to the host attention register clear the
1592 * host status register.
1593 */
1594 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1595 phba->work_hs = old_host_status & ~HS_FFER1;
1596
3772a991 1597 spin_lock_irq(&phba->hbalock);
a257bf90 1598 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1599 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1600 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1601 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1602}
1603
3772a991
JS
1604static void
1605lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1606{
1607 struct lpfc_board_event_header board_event;
1608 struct Scsi_Host *shost;
1609
1610 board_event.event_type = FC_REG_BOARD_EVENT;
1611 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1612 shost = lpfc_shost_from_vport(phba->pport);
1613 fc_host_post_vendor_event(shost, fc_get_event_number(),
1614 sizeof(board_event),
1615 (char *) &board_event,
1616 LPFC_NL_VENDOR_ID);
1617}
1618
e59058c4 1619/**
3772a991 1620 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1621 * @phba: pointer to lpfc hba data structure.
1622 *
1623 * This routine is invoked to handle the following HBA hardware error
1624 * conditions:
1625 * 1 - HBA error attention interrupt
1626 * 2 - DMA ring index out of range
1627 * 3 - Mailbox command came back as unknown
1628 **/
3772a991
JS
1629static void
1630lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1631{
2e0fef85 1632 struct lpfc_vport *vport = phba->pport;
2e0fef85 1633 struct lpfc_sli *psli = &phba->sli;
d2873e4c 1634 uint32_t event_data;
57127f15
JS
1635 unsigned long temperature;
1636 struct temp_event temp_event_data;
92d7f7b0 1637 struct Scsi_Host *shost;
2e0fef85 1638
8d63f375 1639 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1640 * since we cannot communicate with the pci card anyway.
1641 */
1642 if (pci_channel_offline(phba->pcidev)) {
1643 spin_lock_irq(&phba->hbalock);
1644 phba->hba_flag &= ~DEFER_ERATT;
1645 spin_unlock_irq(&phba->hbalock);
8d63f375 1646 return;
3772a991
JS
1647 }
1648
13815c83
JS
1649 /* If resets are disabled then leave the HBA alone and return */
1650 if (!phba->cfg_enable_hba_reset)
1651 return;
dea3101e 1652
ea2151b4 1653 /* Send an internal error event to mgmt application */
3772a991 1654 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1655
a257bf90
JS
1656 if (phba->hba_flag & DEFER_ERATT)
1657 lpfc_handle_deferred_eratt(phba);
1658
dcf2a4e0
JS
1659 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1660 if (phba->work_hs & HS_FFER6)
1661 /* Re-establishing Link */
1662 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1663 "1301 Re-establishing Link "
1664 "Data: x%x x%x x%x\n",
1665 phba->work_hs, phba->work_status[0],
1666 phba->work_status[1]);
1667 if (phba->work_hs & HS_FFER8)
1668 /* Device Zeroization */
1669 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1670 "2861 Host Authentication device "
1671 "zeroization Data:x%x x%x x%x\n",
1672 phba->work_hs, phba->work_status[0],
1673 phba->work_status[1]);
58da1ffb 1674
92d7f7b0 1675 spin_lock_irq(&phba->hbalock);
f4b4c68f 1676 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1677 spin_unlock_irq(&phba->hbalock);
dea3101e 1678
1679 /*
1680 * Firmware stops when it triggled erratt with HS_FFER6.
1681 * That could cause the I/Os dropped by the firmware.
1682 * Error iocb (I/O) on txcmplq and let the SCSI layer
1683 * retry it after re-establishing link.
1684 */
db55fba8 1685 lpfc_sli_abort_fcp_rings(phba);
dea3101e 1686
dea3101e 1687 /*
1688 * There was a firmware error. Take the hba offline and then
1689 * attempt to restart it.
1690 */
618a5230 1691 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea3101e 1692 lpfc_offline(phba);
41415862 1693 lpfc_sli_brdrestart(phba);
dea3101e 1694 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1695 lpfc_unblock_mgmt_io(phba);
dea3101e 1696 return;
1697 }
46fa311e 1698 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1699 } else if (phba->work_hs & HS_CRIT_TEMP) {
1700 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1701 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1702 temp_event_data.event_code = LPFC_CRIT_TEMP;
1703 temp_event_data.data = (uint32_t)temperature;
1704
1705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1706 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1707 "(%ld), taking this port offline "
1708 "Data: x%x x%x x%x\n",
1709 temperature, phba->work_hs,
1710 phba->work_status[0], phba->work_status[1]);
1711
1712 shost = lpfc_shost_from_vport(phba->pport);
1713 fc_host_post_vendor_event(shost, fc_get_event_number(),
1714 sizeof(temp_event_data),
1715 (char *) &temp_event_data,
1716 SCSI_NL_VID_TYPE_PCI
1717 | PCI_VENDOR_ID_EMULEX);
1718
7af67051 1719 spin_lock_irq(&phba->hbalock);
7af67051
JS
1720 phba->over_temp_state = HBA_OVER_TEMP;
1721 spin_unlock_irq(&phba->hbalock);
09372820 1722 lpfc_offline_eratt(phba);
57127f15 1723
dea3101e 1724 } else {
1725 /* The if clause above forces this code path when the status
9399627f
JS
1726 * failure is a value other than FFER6. Do not call the offline
1727 * twice. This is the adapter hardware error path.
dea3101e 1728 */
1729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1730 "0457 Adapter Hardware Error "
dea3101e 1731 "Data: x%x x%x x%x\n",
e8b62011 1732 phba->work_hs,
dea3101e 1733 phba->work_status[0], phba->work_status[1]);
1734
d2873e4c 1735 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1736 shost = lpfc_shost_from_vport(vport);
2e0fef85 1737 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1738 sizeof(event_data), (char *) &event_data,
1739 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1740
09372820 1741 lpfc_offline_eratt(phba);
dea3101e 1742 }
9399627f 1743 return;
dea3101e 1744}
1745
618a5230
JS
1746/**
1747 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1748 * @phba: pointer to lpfc hba data structure.
1749 * @mbx_action: flag for mailbox shutdown action.
1750 *
1751 * This routine is invoked to perform an SLI4 port PCI function reset in
1752 * response to port status register polling attention. It waits for port
1753 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1754 * During this process, interrupt vectors are freed and later requested
1755 * for handling possible port resource change.
1756 **/
1757static int
e10b2022
JS
1758lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1759 bool en_rn_msg)
618a5230
JS
1760{
1761 int rc;
1762 uint32_t intr_mode;
1763
27d6ac0a 1764 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
65791f1f
JS
1765 LPFC_SLI_INTF_IF_TYPE_2) {
1766 /*
1767 * On error status condition, driver need to wait for port
1768 * ready before performing reset.
1769 */
1770 rc = lpfc_sli4_pdev_status_reg_wait(phba);
0e916ee7 1771 if (rc)
65791f1f
JS
1772 return rc;
1773 }
0e916ee7 1774
65791f1f
JS
1775 /* need reset: attempt for port recovery */
1776 if (en_rn_msg)
1777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1778 "2887 Reset Needed: Attempting Port "
1779 "Recovery...\n");
1780 lpfc_offline_prep(phba, mbx_action);
1781 lpfc_offline(phba);
1782 /* release interrupt for possible resource change */
1783 lpfc_sli4_disable_intr(phba);
1784 lpfc_sli_brdrestart(phba);
1785 /* request and enable interrupt */
1786 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1787 if (intr_mode == LPFC_INTR_ERROR) {
1788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1789 "3175 Failed to enable interrupt\n");
1790 return -EIO;
618a5230 1791 }
65791f1f
JS
1792 phba->intr_mode = intr_mode;
1793 rc = lpfc_online(phba);
1794 if (rc == 0)
1795 lpfc_unblock_mgmt_io(phba);
1796
618a5230
JS
1797 return rc;
1798}
1799
da0436e9
JS
1800/**
1801 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1802 * @phba: pointer to lpfc hba data structure.
1803 *
1804 * This routine is invoked to handle the SLI4 HBA hardware error attention
1805 * conditions.
1806 **/
1807static void
1808lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1809{
1810 struct lpfc_vport *vport = phba->pport;
1811 uint32_t event_data;
1812 struct Scsi_Host *shost;
2fcee4bf 1813 uint32_t if_type;
2e90f4b5
JS
1814 struct lpfc_register portstat_reg = {0};
1815 uint32_t reg_err1, reg_err2;
1816 uint32_t uerrlo_reg, uemasklo_reg;
65791f1f 1817 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
e10b2022 1818 bool en_rn_msg = true;
946727dc 1819 struct temp_event temp_event_data;
65791f1f
JS
1820 struct lpfc_register portsmphr_reg;
1821 int rc, i;
da0436e9
JS
1822
1823 /* If the pci channel is offline, ignore possible errors, since
1824 * we cannot communicate with the pci card anyway.
1825 */
1826 if (pci_channel_offline(phba->pcidev))
1827 return;
da0436e9 1828
65791f1f 1829 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2fcee4bf
JS
1830 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1831 switch (if_type) {
1832 case LPFC_SLI_INTF_IF_TYPE_0:
2e90f4b5
JS
1833 pci_rd_rc1 = lpfc_readl(
1834 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1835 &uerrlo_reg);
1836 pci_rd_rc2 = lpfc_readl(
1837 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1838 &uemasklo_reg);
1839 /* consider PCI bus read error as pci_channel_offline */
1840 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1841 return;
65791f1f
JS
1842 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1843 lpfc_sli4_offline_eratt(phba);
1844 return;
1845 }
1846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1847 "7623 Checking UE recoverable");
1848
1849 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1850 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1851 &portsmphr_reg.word0))
1852 continue;
1853
1854 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1855 &portsmphr_reg);
1856 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1857 LPFC_PORT_SEM_UE_RECOVERABLE)
1858 break;
1859 /*Sleep for 1Sec, before checking SEMAPHORE */
1860 msleep(1000);
1861 }
1862
1863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1864 "4827 smphr_port_status x%x : Waited %dSec",
1865 smphr_port_status, i);
1866
1867 /* Recoverable UE, reset the HBA device */
1868 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1869 LPFC_PORT_SEM_UE_RECOVERABLE) {
1870 for (i = 0; i < 20; i++) {
1871 msleep(1000);
1872 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1873 &portsmphr_reg.word0) &&
1874 (LPFC_POST_STAGE_PORT_READY ==
1875 bf_get(lpfc_port_smphr_port_status,
1876 &portsmphr_reg))) {
1877 rc = lpfc_sli4_port_sta_fn_reset(phba,
1878 LPFC_MBX_NO_WAIT, en_rn_msg);
1879 if (rc == 0)
1880 return;
1881 lpfc_printf_log(phba,
1882 KERN_ERR, LOG_INIT,
1883 "4215 Failed to recover UE");
1884 break;
1885 }
1886 }
1887 }
1888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1889 "7624 Firmware not ready: Failing UE recovery,"
1890 " waited %dSec", i);
2fcee4bf
JS
1891 lpfc_sli4_offline_eratt(phba);
1892 break;
946727dc 1893
2fcee4bf 1894 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 1895 case LPFC_SLI_INTF_IF_TYPE_6:
2e90f4b5
JS
1896 pci_rd_rc1 = lpfc_readl(
1897 phba->sli4_hba.u.if_type2.STATUSregaddr,
1898 &portstat_reg.word0);
1899 /* consider PCI bus read error as pci_channel_offline */
6b5151fd
JS
1900 if (pci_rd_rc1 == -EIO) {
1901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1902 "3151 PCI bus read access failure: x%x\n",
1903 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2e90f4b5 1904 return;
6b5151fd 1905 }
2e90f4b5
JS
1906 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1907 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2fcee4bf 1908 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2fcee4bf
JS
1909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1910 "2889 Port Overtemperature event, "
946727dc
JS
1911 "taking port offline Data: x%x x%x\n",
1912 reg_err1, reg_err2);
1913
310429ef 1914 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
1915 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1916 temp_event_data.event_code = LPFC_CRIT_TEMP;
1917 temp_event_data.data = 0xFFFFFFFF;
1918
1919 shost = lpfc_shost_from_vport(phba->pport);
1920 fc_host_post_vendor_event(shost, fc_get_event_number(),
1921 sizeof(temp_event_data),
1922 (char *)&temp_event_data,
1923 SCSI_NL_VID_TYPE_PCI
1924 | PCI_VENDOR_ID_EMULEX);
1925
2fcee4bf
JS
1926 spin_lock_irq(&phba->hbalock);
1927 phba->over_temp_state = HBA_OVER_TEMP;
1928 spin_unlock_irq(&phba->hbalock);
1929 lpfc_sli4_offline_eratt(phba);
946727dc 1930 return;
2fcee4bf 1931 }
2e90f4b5 1932 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
e10b2022 1933 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2e90f4b5 1934 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e10b2022
JS
1935 "3143 Port Down: Firmware Update "
1936 "Detected\n");
1937 en_rn_msg = false;
1938 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2e90f4b5
JS
1939 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1941 "3144 Port Down: Debug Dump\n");
1942 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1943 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1945 "3145 Port Down: Provisioning\n");
618a5230 1946
946727dc
JS
1947 /* If resets are disabled then leave the HBA alone and return */
1948 if (!phba->cfg_enable_hba_reset)
1949 return;
1950
618a5230 1951 /* Check port status register for function reset */
e10b2022
JS
1952 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1953 en_rn_msg);
618a5230
JS
1954 if (rc == 0) {
1955 /* don't report event on forced debug dump */
1956 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1957 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1958 return;
1959 else
1960 break;
2fcee4bf 1961 }
618a5230 1962 /* fall through for not able to recover */
6b5151fd
JS
1963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1964 "3152 Unrecoverable error, bring the port "
1965 "offline\n");
2fcee4bf
JS
1966 lpfc_sli4_offline_eratt(phba);
1967 break;
1968 case LPFC_SLI_INTF_IF_TYPE_1:
1969 default:
1970 break;
1971 }
2e90f4b5
JS
1972 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1973 "3123 Report dump event to upper layer\n");
1974 /* Send an internal error event to mgmt application */
1975 lpfc_board_errevt_to_mgmt(phba);
1976
1977 event_data = FC_REG_DUMP_EVENT;
1978 shost = lpfc_shost_from_vport(vport);
1979 fc_host_post_vendor_event(shost, fc_get_event_number(),
1980 sizeof(event_data), (char *) &event_data,
1981 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
da0436e9
JS
1982}
1983
1984/**
1985 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1986 * @phba: pointer to lpfc HBA data structure.
1987 *
1988 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1989 * routine from the API jump table function pointer from the lpfc_hba struct.
1990 *
1991 * Return codes
af901ca1 1992 * 0 - success.
da0436e9
JS
1993 * Any other value - error.
1994 **/
1995void
1996lpfc_handle_eratt(struct lpfc_hba *phba)
1997{
1998 (*phba->lpfc_handle_eratt)(phba);
1999}
2000
e59058c4 2001/**
3621a710 2002 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
2003 * @phba: pointer to lpfc hba data structure.
2004 *
2005 * This routine is invoked from the worker thread to handle a HBA host
895427bd 2006 * attention link event. SLI3 only.
e59058c4 2007 **/
dea3101e 2008void
2e0fef85 2009lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 2010{
2e0fef85
JS
2011 struct lpfc_vport *vport = phba->pport;
2012 struct lpfc_sli *psli = &phba->sli;
dea3101e 2013 LPFC_MBOXQ_t *pmb;
2014 volatile uint32_t control;
2015 struct lpfc_dmabuf *mp;
09372820 2016 int rc = 0;
dea3101e 2017
2018 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
2019 if (!pmb) {
2020 rc = 1;
dea3101e 2021 goto lpfc_handle_latt_err_exit;
09372820 2022 }
dea3101e 2023
2024 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
2025 if (!mp) {
2026 rc = 2;
dea3101e 2027 goto lpfc_handle_latt_free_pmb;
09372820 2028 }
dea3101e 2029
2030 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
2031 if (!mp->virt) {
2032 rc = 3;
dea3101e 2033 goto lpfc_handle_latt_free_mp;
09372820 2034 }
dea3101e 2035
6281bfe0 2036 /* Cleanup any outstanding ELS commands */
549e55cd 2037 lpfc_els_flush_all_cmd(phba);
dea3101e 2038
2039 psli->slistat.link_event++;
76a95d75
JS
2040 lpfc_read_topology(phba, pmb, mp);
2041 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 2042 pmb->vport = vport;
0d2b6b83 2043 /* Block ELS IOCBs until we have processed this mbox command */
895427bd 2044 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 2045 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
2046 if (rc == MBX_NOT_FINISHED) {
2047 rc = 4;
14691150 2048 goto lpfc_handle_latt_free_mbuf;
09372820 2049 }
dea3101e 2050
2051 /* Clear Link Attention in HA REG */
2e0fef85 2052 spin_lock_irq(&phba->hbalock);
dea3101e 2053 writel(HA_LATT, phba->HAregaddr);
2054 readl(phba->HAregaddr); /* flush */
2e0fef85 2055 spin_unlock_irq(&phba->hbalock);
dea3101e 2056
2057 return;
2058
14691150 2059lpfc_handle_latt_free_mbuf:
895427bd 2060 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 2061 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 2062lpfc_handle_latt_free_mp:
2063 kfree(mp);
2064lpfc_handle_latt_free_pmb:
1dcb58e5 2065 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2066lpfc_handle_latt_err_exit:
2067 /* Enable Link attention interrupts */
2e0fef85 2068 spin_lock_irq(&phba->hbalock);
dea3101e 2069 psli->sli_flag |= LPFC_PROCESS_LA;
2070 control = readl(phba->HCregaddr);
2071 control |= HC_LAINT_ENA;
2072 writel(control, phba->HCregaddr);
2073 readl(phba->HCregaddr); /* flush */
2074
2075 /* Clear Link Attention in HA REG */
2076 writel(HA_LATT, phba->HAregaddr);
2077 readl(phba->HAregaddr); /* flush */
2e0fef85 2078 spin_unlock_irq(&phba->hbalock);
dea3101e 2079 lpfc_linkdown(phba);
2e0fef85 2080 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2081
09372820
JS
2082 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2083 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 2084
2085 return;
2086}
2087
e59058c4 2088/**
3621a710 2089 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
2090 * @phba: pointer to lpfc hba data structure.
2091 * @vpd: pointer to the vital product data.
2092 * @len: length of the vital product data in bytes.
2093 *
2094 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2095 * an array of characters. In this routine, the ModelName, ProgramType, and
2096 * ModelDesc, etc. fields of the phba data structure will be populated.
2097 *
2098 * Return codes
2099 * 0 - pointer to the VPD passed in is NULL
2100 * 1 - success
2101 **/
3772a991 2102int
2e0fef85 2103lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 2104{
2105 uint8_t lenlo, lenhi;
07da60c1 2106 int Length;
dea3101e 2107 int i, j;
2108 int finished = 0;
2109 int index = 0;
2110
2111 if (!vpd)
2112 return 0;
2113
2114 /* Vital Product */
ed957684 2115 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 2116 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 2117 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2118 (uint32_t) vpd[3]);
74b72a59 2119 while (!finished && (index < (len - 4))) {
dea3101e 2120 switch (vpd[index]) {
2121 case 0x82:
74b72a59 2122 case 0x91:
dea3101e 2123 index += 1;
2124 lenlo = vpd[index];
2125 index += 1;
2126 lenhi = vpd[index];
2127 index += 1;
2128 i = ((((unsigned short)lenhi) << 8) + lenlo);
2129 index += i;
2130 break;
2131 case 0x90:
2132 index += 1;
2133 lenlo = vpd[index];
2134 index += 1;
2135 lenhi = vpd[index];
2136 index += 1;
2137 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
2138 if (Length > len - index)
2139 Length = len - index;
dea3101e 2140 while (Length > 0) {
2141 /* Look for Serial Number */
2142 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2143 index += 2;
2144 i = vpd[index];
2145 index += 1;
2146 j = 0;
2147 Length -= (3+i);
2148 while(i--) {
2149 phba->SerialNumber[j++] = vpd[index++];
2150 if (j == 31)
2151 break;
2152 }
2153 phba->SerialNumber[j] = 0;
2154 continue;
2155 }
2156 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2157 phba->vpd_flag |= VPD_MODEL_DESC;
2158 index += 2;
2159 i = vpd[index];
2160 index += 1;
2161 j = 0;
2162 Length -= (3+i);
2163 while(i--) {
2164 phba->ModelDesc[j++] = vpd[index++];
2165 if (j == 255)
2166 break;
2167 }
2168 phba->ModelDesc[j] = 0;
2169 continue;
2170 }
2171 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2172 phba->vpd_flag |= VPD_MODEL_NAME;
2173 index += 2;
2174 i = vpd[index];
2175 index += 1;
2176 j = 0;
2177 Length -= (3+i);
2178 while(i--) {
2179 phba->ModelName[j++] = vpd[index++];
2180 if (j == 79)
2181 break;
2182 }
2183 phba->ModelName[j] = 0;
2184 continue;
2185 }
2186 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2187 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2188 index += 2;
2189 i = vpd[index];
2190 index += 1;
2191 j = 0;
2192 Length -= (3+i);
2193 while(i--) {
2194 phba->ProgramType[j++] = vpd[index++];
2195 if (j == 255)
2196 break;
2197 }
2198 phba->ProgramType[j] = 0;
2199 continue;
2200 }
2201 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2202 phba->vpd_flag |= VPD_PORT;
2203 index += 2;
2204 i = vpd[index];
2205 index += 1;
2206 j = 0;
2207 Length -= (3+i);
2208 while(i--) {
cd1c8301
JS
2209 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2210 (phba->sli4_hba.pport_name_sta ==
2211 LPFC_SLI4_PPNAME_GET)) {
2212 j++;
2213 index++;
2214 } else
2215 phba->Port[j++] = vpd[index++];
2216 if (j == 19)
2217 break;
dea3101e 2218 }
cd1c8301
JS
2219 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2220 (phba->sli4_hba.pport_name_sta ==
2221 LPFC_SLI4_PPNAME_NON))
2222 phba->Port[j] = 0;
dea3101e 2223 continue;
2224 }
2225 else {
2226 index += 2;
2227 i = vpd[index];
2228 index += 1;
2229 index += i;
2230 Length -= (3 + i);
2231 }
2232 }
2233 finished = 0;
2234 break;
2235 case 0x78:
2236 finished = 1;
2237 break;
2238 default:
2239 index ++;
2240 break;
2241 }
74b72a59 2242 }
dea3101e 2243
2244 return(1);
2245}
2246
e59058c4 2247/**
3621a710 2248 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
2249 * @phba: pointer to lpfc hba data structure.
2250 * @mdp: pointer to the data structure to hold the derived model name.
2251 * @descp: pointer to the data structure to hold the derived description.
2252 *
2253 * This routine retrieves HBA's description based on its registered PCI device
2254 * ID. The @descp passed into this function points to an array of 256 chars. It
2255 * shall be returned with the model name, maximum speed, and the host bus type.
2256 * The @mdp passed into this function points to an array of 80 chars. When the
2257 * function returns, the @mdp will be filled with the model name.
2258 **/
dea3101e 2259static void
2e0fef85 2260lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 2261{
2262 lpfc_vpd_t *vp;
fefcb2b6 2263 uint16_t dev_id = phba->pcidev->device;
74b72a59 2264 int max_speed;
84774a4d 2265 int GE = 0;
da0436e9 2266 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 2267 struct {
a747c9ce
JS
2268 char *name;
2269 char *bus;
2270 char *function;
2271 } m = {"<Unknown>", "", ""};
74b72a59
JW
2272
2273 if (mdp && mdp[0] != '\0'
2274 && descp && descp[0] != '\0')
2275 return;
2276
d38dd52c
JS
2277 if (phba->lmt & LMT_32Gb)
2278 max_speed = 32;
2279 else if (phba->lmt & LMT_16Gb)
c0c11512
JS
2280 max_speed = 16;
2281 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
2282 max_speed = 10;
2283 else if (phba->lmt & LMT_8Gb)
2284 max_speed = 8;
2285 else if (phba->lmt & LMT_4Gb)
2286 max_speed = 4;
2287 else if (phba->lmt & LMT_2Gb)
2288 max_speed = 2;
4169d868 2289 else if (phba->lmt & LMT_1Gb)
74b72a59 2290 max_speed = 1;
4169d868
JS
2291 else
2292 max_speed = 0;
dea3101e 2293
2294 vp = &phba->vpd;
dea3101e 2295
e4adb204 2296 switch (dev_id) {
06325e74 2297 case PCI_DEVICE_ID_FIREFLY:
12222f4f
JS
2298 m = (typeof(m)){"LP6000", "PCI",
2299 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2300 break;
dea3101e 2301 case PCI_DEVICE_ID_SUPERFLY:
2302 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
12222f4f 2303 m = (typeof(m)){"LP7000", "PCI", ""};
dea3101e 2304 else
12222f4f
JS
2305 m = (typeof(m)){"LP7000E", "PCI", ""};
2306 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2307 break;
2308 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce 2309 m = (typeof(m)){"LP8000", "PCI",
12222f4f 2310 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2311 break;
2312 case PCI_DEVICE_ID_CENTAUR:
2313 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
12222f4f 2314 m = (typeof(m)){"LP9002", "PCI", ""};
dea3101e 2315 else
12222f4f
JS
2316 m = (typeof(m)){"LP9000", "PCI", ""};
2317 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2318 break;
2319 case PCI_DEVICE_ID_RFLY:
a747c9ce 2320 m = (typeof(m)){"LP952", "PCI",
12222f4f 2321 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2322 break;
2323 case PCI_DEVICE_ID_PEGASUS:
a747c9ce 2324 m = (typeof(m)){"LP9802", "PCI-X",
12222f4f 2325 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2326 break;
2327 case PCI_DEVICE_ID_THOR:
a747c9ce 2328 m = (typeof(m)){"LP10000", "PCI-X",
12222f4f 2329 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2330 break;
2331 case PCI_DEVICE_ID_VIPER:
a747c9ce 2332 m = (typeof(m)){"LPX1000", "PCI-X",
12222f4f 2333 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2334 break;
2335 case PCI_DEVICE_ID_PFLY:
a747c9ce 2336 m = (typeof(m)){"LP982", "PCI-X",
12222f4f 2337 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2338 break;
2339 case PCI_DEVICE_ID_TFLY:
a747c9ce 2340 m = (typeof(m)){"LP1050", "PCI-X",
12222f4f 2341 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2342 break;
2343 case PCI_DEVICE_ID_HELIOS:
a747c9ce 2344 m = (typeof(m)){"LP11000", "PCI-X2",
12222f4f 2345 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2346 break;
e4adb204 2347 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce 2348 m = (typeof(m)){"LP11000-SP", "PCI-X2",
12222f4f 2349 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2350 break;
2351 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce 2352 m = (typeof(m)){"LP11002-SP", "PCI-X2",
12222f4f 2353 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2354 break;
2355 case PCI_DEVICE_ID_NEPTUNE:
12222f4f
JS
2356 m = (typeof(m)){"LPe1000", "PCIe",
2357 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2358 break;
2359 case PCI_DEVICE_ID_NEPTUNE_SCSP:
12222f4f
JS
2360 m = (typeof(m)){"LPe1000-SP", "PCIe",
2361 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2362 break;
2363 case PCI_DEVICE_ID_NEPTUNE_DCSP:
12222f4f
JS
2364 m = (typeof(m)){"LPe1002-SP", "PCIe",
2365 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204 2366 break;
dea3101e 2367 case PCI_DEVICE_ID_BMID:
a747c9ce 2368 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 2369 break;
2370 case PCI_DEVICE_ID_BSMB:
12222f4f
JS
2371 m = (typeof(m)){"LP111", "PCI-X2",
2372 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2373 break;
2374 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 2375 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 2376 break;
e4adb204 2377 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 2378 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2379 break;
2380 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 2381 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 2382 GE = 1;
e4adb204 2383 break;
dea3101e 2384 case PCI_DEVICE_ID_ZMID:
a747c9ce 2385 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e 2386 break;
2387 case PCI_DEVICE_ID_ZSMB:
a747c9ce 2388 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e 2389 break;
2390 case PCI_DEVICE_ID_LP101:
12222f4f
JS
2391 m = (typeof(m)){"LP101", "PCI-X",
2392 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2393 break;
2394 case PCI_DEVICE_ID_LP10000S:
12222f4f
JS
2395 m = (typeof(m)){"LP10000-S", "PCI",
2396 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2397 break;
e4adb204 2398 case PCI_DEVICE_ID_LP11000S:
12222f4f
JS
2399 m = (typeof(m)){"LP11000-S", "PCI-X2",
2400 "Obsolete, Unsupported Fibre Channel Adapter"};
18a3b596 2401 break;
e4adb204 2402 case PCI_DEVICE_ID_LPE11000S:
12222f4f
JS
2403 m = (typeof(m)){"LPe11000-S", "PCIe",
2404 "Obsolete, Unsupported Fibre Channel Adapter"};
5cc36b3c 2405 break;
b87eab38 2406 case PCI_DEVICE_ID_SAT:
a747c9ce 2407 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2408 break;
2409 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 2410 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2411 break;
2412 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 2413 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2414 break;
2415 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 2416 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2417 break;
2418 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 2419 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2420 break;
2421 case PCI_DEVICE_ID_SAT_S:
a747c9ce 2422 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 2423 break;
84774a4d 2424 case PCI_DEVICE_ID_HORNET:
12222f4f
JS
2425 m = (typeof(m)){"LP21000", "PCIe",
2426 "Obsolete, Unsupported FCoE Adapter"};
84774a4d
JS
2427 GE = 1;
2428 break;
2429 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce 2430 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2431 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2432 break;
2433 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce 2434 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2435 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2436 break;
2437 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce 2438 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
12222f4f 2439 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d 2440 break;
da0436e9
JS
2441 case PCI_DEVICE_ID_TIGERSHARK:
2442 oneConnect = 1;
a747c9ce 2443 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 2444 break;
a747c9ce 2445 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 2446 oneConnect = 1;
a747c9ce
JS
2447 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2448 break;
2449 case PCI_DEVICE_ID_FALCON:
2450 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2451 "EmulexSecure Fibre"};
6669f9bb 2452 break;
98fc5dd9
JS
2453 case PCI_DEVICE_ID_BALIUS:
2454 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
12222f4f 2455 "Obsolete, Unsupported Fibre Channel Adapter"};
98fc5dd9 2456 break;
085c647c 2457 case PCI_DEVICE_ID_LANCER_FC:
c0c11512 2458 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c 2459 break;
12222f4f
JS
2460 case PCI_DEVICE_ID_LANCER_FC_VF:
2461 m = (typeof(m)){"LPe16000", "PCIe",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 break;
085c647c
JS
2464 case PCI_DEVICE_ID_LANCER_FCOE:
2465 oneConnect = 1;
079b5c91 2466 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 2467 break;
12222f4f
JS
2468 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2469 oneConnect = 1;
2470 m = (typeof(m)){"OCe15100", "PCIe",
2471 "Obsolete, Unsupported FCoE"};
2472 break;
d38dd52c
JS
2473 case PCI_DEVICE_ID_LANCER_G6_FC:
2474 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2475 break;
f8cafd38
JS
2476 case PCI_DEVICE_ID_SKYHAWK:
2477 case PCI_DEVICE_ID_SKYHAWK_VF:
2478 oneConnect = 1;
2479 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2480 break;
5cc36b3c 2481 default:
a747c9ce 2482 m = (typeof(m)){"Unknown", "", ""};
e4adb204 2483 break;
dea3101e 2484 }
74b72a59
JW
2485
2486 if (mdp && mdp[0] == '\0')
2487 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
2488 /*
2489 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
2490 * and we put the port number on the end
2491 */
2492 if (descp && descp[0] == '\0') {
2493 if (oneConnect)
2494 snprintf(descp, 255,
4169d868 2495 "Emulex OneConnect %s, %s Initiator %s",
a747c9ce 2496 m.name, m.function,
da0436e9 2497 phba->Port);
4169d868
JS
2498 else if (max_speed == 0)
2499 snprintf(descp, 255,
290237d2 2500 "Emulex %s %s %s",
4169d868 2501 m.name, m.bus, m.function);
da0436e9
JS
2502 else
2503 snprintf(descp, 255,
2504 "Emulex %s %d%s %s %s",
a747c9ce
JS
2505 m.name, max_speed, (GE) ? "GE" : "Gb",
2506 m.bus, m.function);
da0436e9 2507 }
dea3101e 2508}
2509
e59058c4 2510/**
3621a710 2511 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
2512 * @phba: pointer to lpfc hba data structure.
2513 * @pring: pointer to a IOCB ring.
2514 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2515 *
2516 * This routine posts a given number of IOCBs with the associated DMA buffer
2517 * descriptors specified by the cnt argument to the given IOCB ring.
2518 *
2519 * Return codes
2520 * The number of IOCBs NOT able to be posted to the IOCB ring.
2521 **/
dea3101e 2522int
495a714c 2523lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 2524{
2525 IOCB_t *icmd;
0bd4ca25 2526 struct lpfc_iocbq *iocb;
dea3101e 2527 struct lpfc_dmabuf *mp1, *mp2;
2528
2529 cnt += pring->missbufcnt;
2530
2531 /* While there are buffers to post */
2532 while (cnt > 0) {
2533 /* Allocate buffer for command iocb */
0bd4ca25 2534 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 2535 if (iocb == NULL) {
2536 pring->missbufcnt = cnt;
2537 return cnt;
2538 }
dea3101e 2539 icmd = &iocb->iocb;
2540
2541 /* 2 buffers can be posted per command */
2542 /* Allocate buffer to post */
2543 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2544 if (mp1)
98c9ea5c
JS
2545 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2546 if (!mp1 || !mp1->virt) {
c9475cb0 2547 kfree(mp1);
604a3e30 2548 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2549 pring->missbufcnt = cnt;
2550 return cnt;
2551 }
2552
2553 INIT_LIST_HEAD(&mp1->list);
2554 /* Allocate buffer to post */
2555 if (cnt > 1) {
2556 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2557 if (mp2)
2558 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2559 &mp2->phys);
98c9ea5c 2560 if (!mp2 || !mp2->virt) {
c9475cb0 2561 kfree(mp2);
dea3101e 2562 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2563 kfree(mp1);
604a3e30 2564 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2565 pring->missbufcnt = cnt;
2566 return cnt;
2567 }
2568
2569 INIT_LIST_HEAD(&mp2->list);
2570 } else {
2571 mp2 = NULL;
2572 }
2573
2574 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2575 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2576 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2577 icmd->ulpBdeCount = 1;
2578 cnt--;
2579 if (mp2) {
2580 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2581 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2582 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2583 cnt--;
2584 icmd->ulpBdeCount = 2;
2585 }
2586
2587 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2588 icmd->ulpLe = 1;
2589
3772a991
JS
2590 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2591 IOCB_ERROR) {
dea3101e 2592 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2593 kfree(mp1);
2594 cnt++;
2595 if (mp2) {
2596 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2597 kfree(mp2);
2598 cnt++;
2599 }
604a3e30 2600 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2601 pring->missbufcnt = cnt;
dea3101e 2602 return cnt;
2603 }
dea3101e 2604 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2605 if (mp2)
dea3101e 2606 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 2607 }
2608 pring->missbufcnt = 0;
2609 return 0;
2610}
2611
e59058c4 2612/**
3621a710 2613 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2614 * @phba: pointer to lpfc hba data structure.
2615 *
2616 * This routine posts initial receive IOCB buffers to the ELS ring. The
2617 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
895427bd 2618 * set to 64 IOCBs. SLI3 only.
e59058c4
JS
2619 *
2620 * Return codes
2621 * 0 - success (currently always success)
2622 **/
dea3101e 2623static int
2e0fef85 2624lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 2625{
2626 struct lpfc_sli *psli = &phba->sli;
2627
2628 /* Ring 0, ELS / CT buffers */
895427bd 2629 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 2630 /* Ring 2 - FCP no buffers needed */
2631
2632 return 0;
2633}
2634
2635#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2636
e59058c4 2637/**
3621a710 2638 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2639 * @HashResultPointer: pointer to an array as hash table.
2640 *
2641 * This routine sets up the initial values to the array of hash table entries
2642 * for the LC HBAs.
2643 **/
dea3101e 2644static void
2645lpfc_sha_init(uint32_t * HashResultPointer)
2646{
2647 HashResultPointer[0] = 0x67452301;
2648 HashResultPointer[1] = 0xEFCDAB89;
2649 HashResultPointer[2] = 0x98BADCFE;
2650 HashResultPointer[3] = 0x10325476;
2651 HashResultPointer[4] = 0xC3D2E1F0;
2652}
2653
e59058c4 2654/**
3621a710 2655 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2656 * @HashResultPointer: pointer to an initial/result hash table.
2657 * @HashWorkingPointer: pointer to an working hash table.
2658 *
2659 * This routine iterates an initial hash table pointed by @HashResultPointer
2660 * with the values from the working hash table pointeed by @HashWorkingPointer.
2661 * The results are putting back to the initial hash table, returned through
2662 * the @HashResultPointer as the result hash table.
2663 **/
dea3101e 2664static void
2665lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2666{
2667 int t;
2668 uint32_t TEMP;
2669 uint32_t A, B, C, D, E;
2670 t = 16;
2671 do {
2672 HashWorkingPointer[t] =
2673 S(1,
2674 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2675 8] ^
2676 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2677 } while (++t <= 79);
2678 t = 0;
2679 A = HashResultPointer[0];
2680 B = HashResultPointer[1];
2681 C = HashResultPointer[2];
2682 D = HashResultPointer[3];
2683 E = HashResultPointer[4];
2684
2685 do {
2686 if (t < 20) {
2687 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2688 } else if (t < 40) {
2689 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2690 } else if (t < 60) {
2691 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2692 } else {
2693 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2694 }
2695 TEMP += S(5, A) + E + HashWorkingPointer[t];
2696 E = D;
2697 D = C;
2698 C = S(30, B);
2699 B = A;
2700 A = TEMP;
2701 } while (++t <= 79);
2702
2703 HashResultPointer[0] += A;
2704 HashResultPointer[1] += B;
2705 HashResultPointer[2] += C;
2706 HashResultPointer[3] += D;
2707 HashResultPointer[4] += E;
2708
2709}
2710
e59058c4 2711/**
3621a710 2712 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2713 * @RandomChallenge: pointer to the entry of host challenge random number array.
2714 * @HashWorking: pointer to the entry of the working hash array.
2715 *
2716 * This routine calculates the working hash array referred by @HashWorking
2717 * from the challenge random numbers associated with the host, referred by
2718 * @RandomChallenge. The result is put into the entry of the working hash
2719 * array and returned by reference through @HashWorking.
2720 **/
dea3101e 2721static void
2722lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2723{
2724 *HashWorking = (*RandomChallenge ^ *HashWorking);
2725}
2726
e59058c4 2727/**
3621a710 2728 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2729 * @phba: pointer to lpfc hba data structure.
2730 * @hbainit: pointer to an array of unsigned 32-bit integers.
2731 *
2732 * This routine performs the special handling for LC HBA initialization.
2733 **/
dea3101e 2734void
2735lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2736{
2737 int t;
2738 uint32_t *HashWorking;
2e0fef85 2739 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2740
bbfbbbc1 2741 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 2742 if (!HashWorking)
2743 return;
2744
dea3101e 2745 HashWorking[0] = HashWorking[78] = *pwwnn++;
2746 HashWorking[1] = HashWorking[79] = *pwwnn;
2747
2748 for (t = 0; t < 7; t++)
2749 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2750
2751 lpfc_sha_init(hbainit);
2752 lpfc_sha_iterate(hbainit, HashWorking);
2753 kfree(HashWorking);
2754}
2755
e59058c4 2756/**
3621a710 2757 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2758 * @vport: pointer to a virtual N_Port data structure.
2759 *
2760 * This routine performs the necessary cleanups before deleting the @vport.
2761 * It invokes the discovery state machine to perform necessary state
2762 * transitions and to release the ndlps associated with the @vport. Note,
2763 * the physical port is treated as @vport 0.
2764 **/
87af33fe 2765void
2e0fef85 2766lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2767{
87af33fe 2768 struct lpfc_hba *phba = vport->phba;
dea3101e 2769 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2770 int i = 0;
dea3101e 2771
87af33fe
JS
2772 if (phba->link_state > LPFC_LINK_DOWN)
2773 lpfc_port_link_failure(vport);
2774
2775 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2776 if (!NLP_CHK_NODE_ACT(ndlp)) {
2777 ndlp = lpfc_enable_node(vport, ndlp,
2778 NLP_STE_UNUSED_NODE);
2779 if (!ndlp)
2780 continue;
2781 spin_lock_irq(&phba->ndlp_lock);
2782 NLP_SET_FREE_REQ(ndlp);
2783 spin_unlock_irq(&phba->ndlp_lock);
2784 /* Trigger the release of the ndlp memory */
2785 lpfc_nlp_put(ndlp);
2786 continue;
2787 }
2788 spin_lock_irq(&phba->ndlp_lock);
2789 if (NLP_CHK_FREE_REQ(ndlp)) {
2790 /* The ndlp should not be in memory free mode already */
2791 spin_unlock_irq(&phba->ndlp_lock);
2792 continue;
2793 } else
2794 /* Indicate request for freeing ndlp memory */
2795 NLP_SET_FREE_REQ(ndlp);
2796 spin_unlock_irq(&phba->ndlp_lock);
2797
58da1ffb
JS
2798 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2799 ndlp->nlp_DID == Fabric_DID) {
2800 /* Just free up ndlp with Fabric_DID for vports */
2801 lpfc_nlp_put(ndlp);
2802 continue;
2803 }
2804
eff4a01b
JS
2805 /* take care of nodes in unused state before the state
2806 * machine taking action.
2807 */
2808 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2809 lpfc_nlp_put(ndlp);
2810 continue;
2811 }
2812
87af33fe
JS
2813 if (ndlp->nlp_type & NLP_FABRIC)
2814 lpfc_disc_state_machine(vport, ndlp, NULL,
2815 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2816
87af33fe
JS
2817 lpfc_disc_state_machine(vport, ndlp, NULL,
2818 NLP_EVT_DEVICE_RM);
2819 }
2820
a8adb832
JS
2821 /* At this point, ALL ndlp's should be gone
2822 * because of the previous NLP_EVT_DEVICE_RM.
2823 * Lets wait for this to happen, if needed.
2824 */
87af33fe 2825 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2826 if (i++ > 3000) {
87af33fe 2827 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 2828 "0233 Nodelist not empty\n");
e47c9093
JS
2829 list_for_each_entry_safe(ndlp, next_ndlp,
2830 &vport->fc_nodes, nlp_listp) {
2831 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2832 LOG_NODE,
d7c255b2 2833 "0282 did:x%x ndlp:x%p "
e47c9093
JS
2834 "usgmap:x%x refcnt:%d\n",
2835 ndlp->nlp_DID, (void *)ndlp,
2836 ndlp->nlp_usg_map,
2c935bc5 2837 kref_read(&ndlp->kref));
e47c9093 2838 }
a8adb832 2839 break;
87af33fe 2840 }
a8adb832
JS
2841
2842 /* Wait for any activity on ndlps to settle */
2843 msleep(10);
87af33fe 2844 }
1151e3ec 2845 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e 2846}
2847
e59058c4 2848/**
3621a710 2849 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2850 * @vport: pointer to a virtual N_Port data structure.
2851 *
2852 * This routine stops all the timers associated with a @vport. This function
2853 * is invoked before disabling or deleting a @vport. Note that the physical
2854 * port is treated as @vport 0.
2855 **/
92d7f7b0
JS
2856void
2857lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2858{
92d7f7b0 2859 del_timer_sync(&vport->els_tmofunc);
92494144 2860 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2861 lpfc_can_disctmo(vport);
2862 return;
dea3101e 2863}
2864
ecfd03c6
JS
2865/**
2866 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2867 * @phba: pointer to lpfc hba data structure.
2868 *
2869 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2870 * caller of this routine should already hold the host lock.
2871 **/
2872void
2873__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2874{
5ac6b303
JS
2875 /* Clear pending FCF rediscovery wait flag */
2876 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2877
ecfd03c6
JS
2878 /* Now, try to stop the timer */
2879 del_timer(&phba->fcf.redisc_wait);
2880}
2881
2882/**
2883 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2884 * @phba: pointer to lpfc hba data structure.
2885 *
2886 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2887 * checks whether the FCF rediscovery wait timer is pending with the host
2888 * lock held before proceeding with disabling the timer and clearing the
2889 * wait timer pendig flag.
2890 **/
2891void
2892lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2893{
2894 spin_lock_irq(&phba->hbalock);
2895 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2896 /* FCF rediscovery timer already fired or stopped */
2897 spin_unlock_irq(&phba->hbalock);
2898 return;
2899 }
2900 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
2901 /* Clear failover in progress flags */
2902 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
2903 spin_unlock_irq(&phba->hbalock);
2904}
2905
e59058c4 2906/**
3772a991 2907 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2908 * @phba: pointer to lpfc hba data structure.
2909 *
2910 * This routine stops all the timers associated with a HBA. This function is
2911 * invoked before either putting a HBA offline or unloading the driver.
2912 **/
3772a991
JS
2913void
2914lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2915{
51ef4c26 2916 lpfc_stop_vport_timers(phba->pport);
2e0fef85 2917 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2918 del_timer_sync(&phba->fabric_block_timer);
9399627f 2919 del_timer_sync(&phba->eratt_poll);
3772a991 2920 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
2921 if (phba->sli_rev == LPFC_SLI_REV4) {
2922 del_timer_sync(&phba->rrq_tmr);
2923 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2924 }
3772a991
JS
2925 phba->hb_outstanding = 0;
2926
2927 switch (phba->pci_dev_grp) {
2928 case LPFC_PCI_DEV_LP:
2929 /* Stop any LightPulse device specific driver timers */
2930 del_timer_sync(&phba->fcp_poll_timer);
2931 break;
2932 case LPFC_PCI_DEV_OC:
2933 /* Stop any OneConnect device sepcific driver timers */
ecfd03c6 2934 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
2935 break;
2936 default:
2937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2938 "0297 Invalid device group (x%x)\n",
2939 phba->pci_dev_grp);
2940 break;
2941 }
2e0fef85 2942 return;
dea3101e 2943}
2944
e59058c4 2945/**
3621a710 2946 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2947 * @phba: pointer to lpfc hba data structure.
2948 *
2949 * This routine marks a HBA's management interface as blocked. Once the HBA's
2950 * management interface is marked as blocked, all the user space access to
2951 * the HBA, whether they are from sysfs interface or libdfc interface will
2952 * all be blocked. The HBA is set to block the management interface when the
2953 * driver prepares the HBA interface for online or offline.
2954 **/
a6ababd2 2955static void
618a5230 2956lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
a6ababd2
AB
2957{
2958 unsigned long iflag;
6e7288d9
JS
2959 uint8_t actcmd = MBX_HEARTBEAT;
2960 unsigned long timeout;
2961
a6ababd2
AB
2962 spin_lock_irqsave(&phba->hbalock, iflag);
2963 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
618a5230
JS
2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2965 if (mbx_action == LPFC_MBX_NO_WAIT)
2966 return;
2967 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2968 spin_lock_irqsave(&phba->hbalock, iflag);
a183a15f 2969 if (phba->sli.mbox_active) {
6e7288d9 2970 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
2971 /* Determine how long we might wait for the active mailbox
2972 * command to be gracefully completed by firmware.
2973 */
2974 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2975 phba->sli.mbox_active) * 1000) + jiffies;
2976 }
a6ababd2 2977 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 2978
6e7288d9
JS
2979 /* Wait for the outstnading mailbox command to complete */
2980 while (phba->sli.mbox_active) {
2981 /* Check active mailbox complete status every 2ms */
2982 msleep(2);
2983 if (time_after(jiffies, timeout)) {
2984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2985 "2813 Mgmt IO is Blocked %x "
2986 "- mbox cmd %x still active\n",
2987 phba->sli.sli_flag, actcmd);
2988 break;
2989 }
2990 }
a6ababd2
AB
2991}
2992
6b5151fd
JS
2993/**
2994 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2995 * @phba: pointer to lpfc hba data structure.
2996 *
2997 * Allocate RPIs for all active remote nodes. This is needed whenever
2998 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2999 * is to fixup the temporary rpi assignments.
3000 **/
3001void
3002lpfc_sli4_node_prep(struct lpfc_hba *phba)
3003{
3004 struct lpfc_nodelist *ndlp, *next_ndlp;
3005 struct lpfc_vport **vports;
9d3d340d
JS
3006 int i, rpi;
3007 unsigned long flags;
6b5151fd
JS
3008
3009 if (phba->sli_rev != LPFC_SLI_REV4)
3010 return;
3011
3012 vports = lpfc_create_vport_work_array(phba);
9d3d340d
JS
3013 if (vports == NULL)
3014 return;
6b5151fd 3015
9d3d340d
JS
3016 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3017 if (vports[i]->load_flag & FC_UNLOADING)
3018 continue;
3019
3020 list_for_each_entry_safe(ndlp, next_ndlp,
3021 &vports[i]->fc_nodes,
3022 nlp_listp) {
3023 if (!NLP_CHK_NODE_ACT(ndlp))
3024 continue;
3025 rpi = lpfc_sli4_alloc_rpi(phba);
3026 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3027 spin_lock_irqsave(&phba->ndlp_lock, flags);
3028 NLP_CLR_NODE_ACT(ndlp);
3029 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3030 continue;
6b5151fd 3031 }
9d3d340d
JS
3032 ndlp->nlp_rpi = rpi;
3033 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3034 "0009 rpi:%x DID:%x "
3035 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3036 ndlp->nlp_DID, ndlp->nlp_flag,
3037 ndlp->nlp_usg_map, ndlp);
6b5151fd
JS
3038 }
3039 }
3040 lpfc_destroy_vport_work_array(phba, vports);
3041}
3042
e59058c4 3043/**
3621a710 3044 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
3045 * @phba: pointer to lpfc hba data structure.
3046 *
3047 * This routine initializes the HBA and brings a HBA online. During this
3048 * process, the management interface is blocked to prevent user space access
3049 * to the HBA interfering with the driver initialization.
3050 *
3051 * Return codes
3052 * 0 - successful
3053 * 1 - failed
3054 **/
dea3101e 3055int
2e0fef85 3056lpfc_online(struct lpfc_hba *phba)
dea3101e 3057{
372bd282 3058 struct lpfc_vport *vport;
549e55cd 3059 struct lpfc_vport **vports;
a145fda3 3060 int i, error = 0;
16a3a208 3061 bool vpis_cleared = false;
2e0fef85 3062
dea3101e 3063 if (!phba)
3064 return 0;
372bd282 3065 vport = phba->pport;
dea3101e 3066
2e0fef85 3067 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 3068 return 0;
3069
ed957684 3070 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3071 "0458 Bring Adapter online\n");
dea3101e 3072
618a5230 3073 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
46fa311e 3074
da0436e9
JS
3075 if (phba->sli_rev == LPFC_SLI_REV4) {
3076 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3077 lpfc_unblock_mgmt_io(phba);
3078 return 1;
3079 }
16a3a208
JS
3080 spin_lock_irq(&phba->hbalock);
3081 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3082 vpis_cleared = true;
3083 spin_unlock_irq(&phba->hbalock);
a145fda3
DK
3084
3085 /* Reestablish the local initiator port.
3086 * The offline process destroyed the previous lport.
3087 */
3088 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3089 !phba->nvmet_support) {
3090 error = lpfc_nvme_create_localport(phba->pport);
3091 if (error)
3092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3093 "6132 NVME restore reg failed "
3094 "on nvmei error x%x\n", error);
3095 }
da0436e9 3096 } else {
895427bd 3097 lpfc_sli_queue_init(phba);
da0436e9
JS
3098 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3099 lpfc_unblock_mgmt_io(phba);
3100 return 1;
3101 }
46fa311e 3102 }
dea3101e 3103
549e55cd 3104 vports = lpfc_create_vport_work_array(phba);
aeb6641f 3105 if (vports != NULL) {
da0436e9 3106 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
3107 struct Scsi_Host *shost;
3108 shost = lpfc_shost_from_vport(vports[i]);
3109 spin_lock_irq(shost->host_lock);
3110 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3111 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3112 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
16a3a208 3113 if (phba->sli_rev == LPFC_SLI_REV4) {
1c6834a7 3114 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
16a3a208
JS
3115 if ((vpis_cleared) &&
3116 (vports[i]->port_type !=
3117 LPFC_PHYSICAL_PORT))
3118 vports[i]->vpi = 0;
3119 }
549e55cd
JS
3120 spin_unlock_irq(shost->host_lock);
3121 }
aeb6641f
AB
3122 }
3123 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3124
46fa311e 3125 lpfc_unblock_mgmt_io(phba);
dea3101e 3126 return 0;
3127}
3128
e59058c4 3129/**
3621a710 3130 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
3131 * @phba: pointer to lpfc hba data structure.
3132 *
3133 * This routine marks a HBA's management interface as not blocked. Once the
3134 * HBA's management interface is marked as not blocked, all the user space
3135 * access to the HBA, whether they are from sysfs interface or libdfc
3136 * interface will be allowed. The HBA is set to block the management interface
3137 * when the driver prepares the HBA interface for online or offline and then
3138 * set to unblock the management interface afterwards.
3139 **/
46fa311e
JS
3140void
3141lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3142{
3143 unsigned long iflag;
3144
2e0fef85
JS
3145 spin_lock_irqsave(&phba->hbalock, iflag);
3146 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3147 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
3148}
3149
e59058c4 3150/**
3621a710 3151 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
3152 * @phba: pointer to lpfc hba data structure.
3153 *
3154 * This routine is invoked to prepare a HBA to be brought offline. It performs
3155 * unregistration login to all the nodes on all vports and flushes the mailbox
3156 * queue to make it ready to be brought offline.
3157 **/
46fa311e 3158void
618a5230 3159lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
46fa311e 3160{
2e0fef85 3161 struct lpfc_vport *vport = phba->pport;
46fa311e 3162 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 3163 struct lpfc_vport **vports;
72100cc4 3164 struct Scsi_Host *shost;
87af33fe 3165 int i;
dea3101e 3166
2e0fef85 3167 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3168 return;
dea3101e 3169
618a5230 3170 lpfc_block_mgmt_io(phba, mbx_action);
dea3101e 3171
3172 lpfc_linkdown(phba);
3173
87af33fe
JS
3174 /* Issue an unreg_login to all nodes on all vports */
3175 vports = lpfc_create_vport_work_array(phba);
3176 if (vports != NULL) {
da0436e9 3177 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
3178 if (vports[i]->load_flag & FC_UNLOADING)
3179 continue;
72100cc4
JS
3180 shost = lpfc_shost_from_vport(vports[i]);
3181 spin_lock_irq(shost->host_lock);
c868595d 3182 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
3183 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3184 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 3185 spin_unlock_irq(shost->host_lock);
695a814e 3186
87af33fe
JS
3187 shost = lpfc_shost_from_vport(vports[i]);
3188 list_for_each_entry_safe(ndlp, next_ndlp,
3189 &vports[i]->fc_nodes,
3190 nlp_listp) {
e47c9093
JS
3191 if (!NLP_CHK_NODE_ACT(ndlp))
3192 continue;
87af33fe
JS
3193 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3194 continue;
3195 if (ndlp->nlp_type & NLP_FABRIC) {
3196 lpfc_disc_state_machine(vports[i], ndlp,
3197 NULL, NLP_EVT_DEVICE_RECOVERY);
3198 lpfc_disc_state_machine(vports[i], ndlp,
3199 NULL, NLP_EVT_DEVICE_RM);
3200 }
3201 spin_lock_irq(shost->host_lock);
3202 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
401ee0c1 3203 spin_unlock_irq(shost->host_lock);
6b5151fd
JS
3204 /*
3205 * Whenever an SLI4 port goes offline, free the
401ee0c1
JS
3206 * RPI. Get a new RPI when the adapter port
3207 * comes back online.
6b5151fd 3208 */
be6bb941
JS
3209 if (phba->sli_rev == LPFC_SLI_REV4) {
3210 lpfc_printf_vlog(ndlp->vport,
3211 KERN_INFO, LOG_NODE,
3212 "0011 lpfc_offline: "
3213 "ndlp:x%p did %x "
3214 "usgmap:x%x rpi:%x\n",
3215 ndlp, ndlp->nlp_DID,
3216 ndlp->nlp_usg_map,
3217 ndlp->nlp_rpi);
3218
6b5151fd 3219 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
be6bb941 3220 }
87af33fe
JS
3221 lpfc_unreg_rpi(vports[i], ndlp);
3222 }
3223 }
3224 }
09372820 3225 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3226
618a5230 3227 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
f485c18d
DK
3228
3229 if (phba->wq)
3230 flush_workqueue(phba->wq);
46fa311e
JS
3231}
3232
e59058c4 3233/**
3621a710 3234 * lpfc_offline - Bring a HBA offline
e59058c4
JS
3235 * @phba: pointer to lpfc hba data structure.
3236 *
3237 * This routine actually brings a HBA offline. It stops all the timers
3238 * associated with the HBA, brings down the SLI layer, and eventually
3239 * marks the HBA as in offline state for the upper layer protocol.
3240 **/
46fa311e 3241void
2e0fef85 3242lpfc_offline(struct lpfc_hba *phba)
46fa311e 3243{
549e55cd
JS
3244 struct Scsi_Host *shost;
3245 struct lpfc_vport **vports;
3246 int i;
46fa311e 3247
549e55cd 3248 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3249 return;
688a8863 3250
da0436e9
JS
3251 /* stop port and all timers associated with this hba */
3252 lpfc_stop_port(phba);
4b40d02b
DK
3253
3254 /* Tear down the local and target port registrations. The
3255 * nvme transports need to cleanup.
3256 */
3257 lpfc_nvmet_destroy_targetport(phba);
3258 lpfc_nvme_destroy_localport(phba->pport);
3259
51ef4c26
JS
3260 vports = lpfc_create_vport_work_array(phba);
3261 if (vports != NULL)
da0436e9 3262 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 3263 lpfc_stop_vport_timers(vports[i]);
09372820 3264 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 3265 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3266 "0460 Bring Adapter offline\n");
dea3101e 3267 /* Bring down the SLI Layer and cleanup. The HBA is offline
3268 now. */
3269 lpfc_sli_hba_down(phba);
92d7f7b0 3270 spin_lock_irq(&phba->hbalock);
7054a606 3271 phba->work_ha = 0;
92d7f7b0 3272 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
3273 vports = lpfc_create_vport_work_array(phba);
3274 if (vports != NULL)
da0436e9 3275 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 3276 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
3277 spin_lock_irq(shost->host_lock);
3278 vports[i]->work_port_events = 0;
3279 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3280 spin_unlock_irq(shost->host_lock);
3281 }
09372820 3282 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3283}
3284
e59058c4 3285/**
3621a710 3286 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
3287 * @phba: pointer to lpfc hba data structure.
3288 *
3289 * This routine is to free all the SCSI buffers and IOCBs from the driver
3290 * list back to kernel. It is called from lpfc_pci_remove_one to free
3291 * the internal resources before the device is removed from the system.
e59058c4 3292 **/
8a9d2e80 3293static void
2e0fef85 3294lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 3295{
3296 struct lpfc_scsi_buf *sb, *sb_next;
dea3101e 3297
895427bd
JS
3298 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3299 return;
3300
2e0fef85 3301 spin_lock_irq(&phba->hbalock);
a40fc5f0 3302
dea3101e 3303 /* Release all the lpfc_scsi_bufs maintained by this host. */
a40fc5f0
JS
3304
3305 spin_lock(&phba->scsi_buf_list_put_lock);
3306 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3307 list) {
dea3101e 3308 list_del(&sb->list);
771db5c0 3309 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3310 sb->dma_handle);
dea3101e 3311 kfree(sb);
3312 phba->total_scsi_bufs--;
3313 }
a40fc5f0
JS
3314 spin_unlock(&phba->scsi_buf_list_put_lock);
3315
3316 spin_lock(&phba->scsi_buf_list_get_lock);
3317 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3318 list) {
dea3101e 3319 list_del(&sb->list);
771db5c0 3320 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3321 sb->dma_handle);
dea3101e 3322 kfree(sb);
3323 phba->total_scsi_bufs--;
3324 }
a40fc5f0 3325 spin_unlock(&phba->scsi_buf_list_get_lock);
2e0fef85 3326 spin_unlock_irq(&phba->hbalock);
8a9d2e80 3327}
895427bd
JS
3328/**
3329 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
3330 * @phba: pointer to lpfc hba data structure.
3331 *
3332 * This routine is to free all the NVME buffers and IOCBs from the driver
3333 * list back to kernel. It is called from lpfc_pci_remove_one to free
3334 * the internal resources before the device is removed from the system.
3335 **/
3336static void
3337lpfc_nvme_free(struct lpfc_hba *phba)
3338{
3339 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
895427bd
JS
3340
3341 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3342 return;
3343
3344 spin_lock_irq(&phba->hbalock);
3345
3346 /* Release all the lpfc_nvme_bufs maintained by this host. */
3347 spin_lock(&phba->nvme_buf_list_put_lock);
3348 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3349 &phba->lpfc_nvme_buf_list_put, list) {
3350 list_del(&lpfc_ncmd->list);
cf1a1d3e 3351 phba->put_nvme_bufs--;
771db5c0 3352 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
895427bd
JS
3353 lpfc_ncmd->dma_handle);
3354 kfree(lpfc_ncmd);
3355 phba->total_nvme_bufs--;
3356 }
3357 spin_unlock(&phba->nvme_buf_list_put_lock);
3358
3359 spin_lock(&phba->nvme_buf_list_get_lock);
3360 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3361 &phba->lpfc_nvme_buf_list_get, list) {
3362 list_del(&lpfc_ncmd->list);
cf1a1d3e 3363 phba->get_nvme_bufs--;
771db5c0 3364 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
895427bd
JS
3365 lpfc_ncmd->dma_handle);
3366 kfree(lpfc_ncmd);
3367 phba->total_nvme_bufs--;
3368 }
3369 spin_unlock(&phba->nvme_buf_list_get_lock);
895427bd
JS
3370 spin_unlock_irq(&phba->hbalock);
3371}
8a9d2e80 3372/**
895427bd 3373 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
8a9d2e80
JS
3374 * @phba: pointer to lpfc hba data structure.
3375 *
3376 * This routine first calculates the sizes of the current els and allocated
3377 * scsi sgl lists, and then goes through all sgls to updates the physical
3378 * XRIs assigned due to port function reset. During port initialization, the
3379 * current els and allocated scsi sgl lists are 0s.
3380 *
3381 * Return codes
3382 * 0 - successful (for now, it always returns 0)
3383 **/
3384int
895427bd 3385lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
8a9d2e80
JS
3386{
3387 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
895427bd 3388 uint16_t i, lxri, xri_cnt, els_xri_cnt;
8a9d2e80 3389 LIST_HEAD(els_sgl_list);
8a9d2e80
JS
3390 int rc;
3391
3392 /*
3393 * update on pci function's els xri-sgl list
3394 */
3395 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
895427bd 3396
8a9d2e80
JS
3397 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3398 /* els xri-sgl expanded */
3399 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3400 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3401 "3157 ELS xri-sgl count increased from "
3402 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3403 els_xri_cnt);
3404 /* allocate the additional els sgls */
3405 for (i = 0; i < xri_cnt; i++) {
3406 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3407 GFP_KERNEL);
3408 if (sglq_entry == NULL) {
3409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3410 "2562 Failure to allocate an "
3411 "ELS sgl entry:%d\n", i);
3412 rc = -ENOMEM;
3413 goto out_free_mem;
3414 }
3415 sglq_entry->buff_type = GEN_BUFF_TYPE;
3416 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3417 &sglq_entry->phys);
3418 if (sglq_entry->virt == NULL) {
3419 kfree(sglq_entry);
3420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3421 "2563 Failure to allocate an "
3422 "ELS mbuf:%d\n", i);
3423 rc = -ENOMEM;
3424 goto out_free_mem;
3425 }
3426 sglq_entry->sgl = sglq_entry->virt;
3427 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3428 sglq_entry->state = SGL_FREED;
3429 list_add_tail(&sglq_entry->list, &els_sgl_list);
3430 }
38c20673 3431 spin_lock_irq(&phba->hbalock);
895427bd
JS
3432 spin_lock(&phba->sli4_hba.sgl_list_lock);
3433 list_splice_init(&els_sgl_list,
3434 &phba->sli4_hba.lpfc_els_sgl_list);
3435 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 3436 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
3437 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3438 /* els xri-sgl shrinked */
3439 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3441 "3158 ELS xri-sgl count decreased from "
3442 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3443 els_xri_cnt);
3444 spin_lock_irq(&phba->hbalock);
895427bd
JS
3445 spin_lock(&phba->sli4_hba.sgl_list_lock);
3446 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3447 &els_sgl_list);
8a9d2e80
JS
3448 /* release extra els sgls from list */
3449 for (i = 0; i < xri_cnt; i++) {
3450 list_remove_head(&els_sgl_list,
3451 sglq_entry, struct lpfc_sglq, list);
3452 if (sglq_entry) {
895427bd
JS
3453 __lpfc_mbuf_free(phba, sglq_entry->virt,
3454 sglq_entry->phys);
8a9d2e80
JS
3455 kfree(sglq_entry);
3456 }
3457 }
895427bd
JS
3458 list_splice_init(&els_sgl_list,
3459 &phba->sli4_hba.lpfc_els_sgl_list);
3460 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8a9d2e80
JS
3461 spin_unlock_irq(&phba->hbalock);
3462 } else
3463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3464 "3163 ELS xri-sgl count unchanged: %d\n",
3465 els_xri_cnt);
3466 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3467
3468 /* update xris to els sgls on the list */
3469 sglq_entry = NULL;
3470 sglq_entry_next = NULL;
3471 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
895427bd 3472 &phba->sli4_hba.lpfc_els_sgl_list, list) {
8a9d2e80
JS
3473 lxri = lpfc_sli4_next_xritag(phba);
3474 if (lxri == NO_XRI) {
3475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3476 "2400 Failed to allocate xri for "
3477 "ELS sgl\n");
3478 rc = -ENOMEM;
3479 goto out_free_mem;
3480 }
3481 sglq_entry->sli4_lxritag = lxri;
3482 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3483 }
895427bd
JS
3484 return 0;
3485
3486out_free_mem:
3487 lpfc_free_els_sgl_list(phba);
3488 return rc;
3489}
3490
f358dd0c
JS
3491/**
3492 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3493 * @phba: pointer to lpfc hba data structure.
3494 *
3495 * This routine first calculates the sizes of the current els and allocated
3496 * scsi sgl lists, and then goes through all sgls to updates the physical
3497 * XRIs assigned due to port function reset. During port initialization, the
3498 * current els and allocated scsi sgl lists are 0s.
3499 *
3500 * Return codes
3501 * 0 - successful (for now, it always returns 0)
3502 **/
3503int
3504lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3505{
3506 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3507 uint16_t i, lxri, xri_cnt, els_xri_cnt;
6c621a22 3508 uint16_t nvmet_xri_cnt;
f358dd0c
JS
3509 LIST_HEAD(nvmet_sgl_list);
3510 int rc;
3511
3512 /*
3513 * update on pci function's nvmet xri-sgl list
3514 */
3515 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
61f3d4bf 3516
6c621a22
JS
3517 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3518 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
f358dd0c
JS
3519 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3520 /* els xri-sgl expanded */
3521 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3522 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3523 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3524 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3525 /* allocate the additional nvmet sgls */
3526 for (i = 0; i < xri_cnt; i++) {
3527 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3528 GFP_KERNEL);
3529 if (sglq_entry == NULL) {
3530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3531 "6303 Failure to allocate an "
3532 "NVMET sgl entry:%d\n", i);
3533 rc = -ENOMEM;
3534 goto out_free_mem;
3535 }
3536 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3537 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3538 &sglq_entry->phys);
3539 if (sglq_entry->virt == NULL) {
3540 kfree(sglq_entry);
3541 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3542 "6304 Failure to allocate an "
3543 "NVMET buf:%d\n", i);
3544 rc = -ENOMEM;
3545 goto out_free_mem;
3546 }
3547 sglq_entry->sgl = sglq_entry->virt;
3548 memset(sglq_entry->sgl, 0,
3549 phba->cfg_sg_dma_buf_size);
3550 sglq_entry->state = SGL_FREED;
3551 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3552 }
3553 spin_lock_irq(&phba->hbalock);
3554 spin_lock(&phba->sli4_hba.sgl_list_lock);
3555 list_splice_init(&nvmet_sgl_list,
3556 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3557 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3558 spin_unlock_irq(&phba->hbalock);
3559 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3560 /* nvmet xri-sgl shrunk */
3561 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3562 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3563 "6305 NVMET xri-sgl count decreased from "
3564 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3565 nvmet_xri_cnt);
3566 spin_lock_irq(&phba->hbalock);
3567 spin_lock(&phba->sli4_hba.sgl_list_lock);
3568 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3569 &nvmet_sgl_list);
3570 /* release extra nvmet sgls from list */
3571 for (i = 0; i < xri_cnt; i++) {
3572 list_remove_head(&nvmet_sgl_list,
3573 sglq_entry, struct lpfc_sglq, list);
3574 if (sglq_entry) {
3575 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3576 sglq_entry->phys);
3577 kfree(sglq_entry);
3578 }
3579 }
3580 list_splice_init(&nvmet_sgl_list,
3581 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3582 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3583 spin_unlock_irq(&phba->hbalock);
3584 } else
3585 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3586 "6306 NVMET xri-sgl count unchanged: %d\n",
3587 nvmet_xri_cnt);
3588 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3589
3590 /* update xris to nvmet sgls on the list */
3591 sglq_entry = NULL;
3592 sglq_entry_next = NULL;
3593 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3594 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3595 lxri = lpfc_sli4_next_xritag(phba);
3596 if (lxri == NO_XRI) {
3597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3598 "6307 Failed to allocate xri for "
3599 "NVMET sgl\n");
3600 rc = -ENOMEM;
3601 goto out_free_mem;
3602 }
3603 sglq_entry->sli4_lxritag = lxri;
3604 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3605 }
3606 return 0;
3607
3608out_free_mem:
3609 lpfc_free_nvmet_sgl_list(phba);
3610 return rc;
3611}
3612
895427bd
JS
3613/**
3614 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
3615 * @phba: pointer to lpfc hba data structure.
3616 *
3617 * This routine first calculates the sizes of the current els and allocated
3618 * scsi sgl lists, and then goes through all sgls to updates the physical
3619 * XRIs assigned due to port function reset. During port initialization, the
3620 * current els and allocated scsi sgl lists are 0s.
3621 *
3622 * Return codes
3623 * 0 - successful (for now, it always returns 0)
3624 **/
3625int
3626lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
3627{
3628 struct lpfc_scsi_buf *psb, *psb_next;
3629 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
3630 LIST_HEAD(scsi_sgl_list);
3631 int rc;
8a9d2e80
JS
3632
3633 /*
895427bd 3634 * update on pci function's els xri-sgl list
8a9d2e80 3635 */
895427bd 3636 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
8a9d2e80
JS
3637 phba->total_scsi_bufs = 0;
3638
895427bd
JS
3639 /*
3640 * update on pci function's allocated scsi xri-sgl list
3641 */
8a9d2e80
JS
3642 /* maximum number of xris available for scsi buffers */
3643 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3644 els_xri_cnt;
3645
895427bd
JS
3646 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3647 return 0;
3648
3649 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3650 phba->sli4_hba.scsi_xri_max = /* Split them up */
3651 (phba->sli4_hba.scsi_xri_max *
3652 phba->cfg_xri_split) / 100;
8a9d2e80 3653
a40fc5f0 3654 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3655 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3656 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3657 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
164cecd1 3658 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3659 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3660
e8c0a779
JS
3661 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3662 "6060 Current allocated SCSI xri-sgl count:%d, "
3663 "maximum SCSI xri count:%d (split:%d)\n",
3664 phba->sli4_hba.scsi_xri_cnt,
3665 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
3666
8a9d2e80
JS
3667 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3668 /* max scsi xri shrinked below the allocated scsi buffers */
3669 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3670 phba->sli4_hba.scsi_xri_max;
3671 /* release the extra allocated scsi buffers */
3672 for (i = 0; i < scsi_xri_cnt; i++) {
3673 list_remove_head(&scsi_sgl_list, psb,
3674 struct lpfc_scsi_buf, list);
a2fc4aef 3675 if (psb) {
771db5c0 3676 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
a2fc4aef
JS
3677 psb->data, psb->dma_handle);
3678 kfree(psb);
3679 }
8a9d2e80 3680 }
a40fc5f0 3681 spin_lock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 3682 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
a40fc5f0 3683 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
3684 }
3685
3686 /* update xris associated to remaining allocated scsi buffers */
3687 psb = NULL;
3688 psb_next = NULL;
3689 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3690 lxri = lpfc_sli4_next_xritag(phba);
3691 if (lxri == NO_XRI) {
3692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3693 "2560 Failed to allocate xri for "
3694 "scsi buffer\n");
3695 rc = -ENOMEM;
3696 goto out_free_mem;
3697 }
3698 psb->cur_iocbq.sli4_lxritag = lxri;
3699 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3700 }
a40fc5f0 3701 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 3702 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
3703 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3704 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
164cecd1 3705 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 3706 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
dea3101e 3707 return 0;
8a9d2e80
JS
3708
3709out_free_mem:
8a9d2e80
JS
3710 lpfc_scsi_free(phba);
3711 return rc;
dea3101e 3712}
3713
96418b5e
JS
3714static uint64_t
3715lpfc_get_wwpn(struct lpfc_hba *phba)
3716{
3717 uint64_t wwn;
3718 int rc;
3719 LPFC_MBOXQ_t *mboxq;
3720 MAILBOX_t *mb;
3721
96418b5e
JS
3722 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3723 GFP_KERNEL);
3724 if (!mboxq)
3725 return (uint64_t)-1;
3726
3727 /* First get WWN of HBA instance */
3728 lpfc_read_nv(phba, mboxq);
3729 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3730 if (rc != MBX_SUCCESS) {
3731 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3732 "6019 Mailbox failed , mbxCmd x%x "
3733 "READ_NV, mbxStatus x%x\n",
3734 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3735 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3736 mempool_free(mboxq, phba->mbox_mem_pool);
3737 return (uint64_t) -1;
3738 }
3739 mb = &mboxq->u.mb;
3740 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3741 /* wwn is WWPN of HBA instance */
3742 mempool_free(mboxq, phba->mbox_mem_pool);
3743 if (phba->sli_rev == LPFC_SLI_REV4)
3744 return be64_to_cpu(wwn);
3745 else
286871a6 3746 return rol64(wwn, 32);
96418b5e
JS
3747}
3748
895427bd
JS
3749/**
3750 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3751 * @phba: pointer to lpfc hba data structure.
3752 *
3753 * This routine first calculates the sizes of the current els and allocated
3754 * scsi sgl lists, and then goes through all sgls to updates the physical
3755 * XRIs assigned due to port function reset. During port initialization, the
3756 * current els and allocated scsi sgl lists are 0s.
3757 *
3758 * Return codes
3759 * 0 - successful (for now, it always returns 0)
3760 **/
3761int
3762lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3763{
3764 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3765 uint16_t i, lxri, els_xri_cnt;
3766 uint16_t nvme_xri_cnt, nvme_xri_max;
3767 LIST_HEAD(nvme_sgl_list);
cf1a1d3e 3768 int rc, cnt;
895427bd
JS
3769
3770 phba->total_nvme_bufs = 0;
cf1a1d3e
JS
3771 phba->get_nvme_bufs = 0;
3772 phba->put_nvme_bufs = 0;
895427bd
JS
3773
3774 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3775 return 0;
3776 /*
3777 * update on pci function's allocated nvme xri-sgl list
3778 */
3779
3780 /* maximum number of xris available for nvme buffers */
3781 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3782 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3783 phba->sli4_hba.nvme_xri_max = nvme_xri_max;
3784 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
3785
3786 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3787 "6074 Current allocated NVME xri-sgl count:%d, "
3788 "maximum NVME xri count:%d\n",
3789 phba->sli4_hba.nvme_xri_cnt,
3790 phba->sli4_hba.nvme_xri_max);
3791
3792 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3793 spin_lock(&phba->nvme_buf_list_put_lock);
3794 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3795 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
cf1a1d3e
JS
3796 cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
3797 phba->get_nvme_bufs = 0;
3798 phba->put_nvme_bufs = 0;
895427bd
JS
3799 spin_unlock(&phba->nvme_buf_list_put_lock);
3800 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3801
3802 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
3803 /* max nvme xri shrunk below the allocated nvme buffers */
3804 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3805 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
3806 phba->sli4_hba.nvme_xri_max;
3807 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3808 /* release the extra allocated nvme buffers */
3809 for (i = 0; i < nvme_xri_cnt; i++) {
3810 list_remove_head(&nvme_sgl_list, lpfc_ncmd,
3811 struct lpfc_nvme_buf, list);
3812 if (lpfc_ncmd) {
771db5c0 3813 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
895427bd
JS
3814 lpfc_ncmd->data,
3815 lpfc_ncmd->dma_handle);
3816 kfree(lpfc_ncmd);
3817 }
3818 }
3819 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3820 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
3821 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3822 }
3823
3824 /* update xris associated to remaining allocated nvme buffers */
3825 lpfc_ncmd = NULL;
3826 lpfc_ncmd_next = NULL;
3827 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3828 &nvme_sgl_list, list) {
3829 lxri = lpfc_sli4_next_xritag(phba);
3830 if (lxri == NO_XRI) {
3831 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3832 "6075 Failed to allocate xri for "
3833 "nvme buffer\n");
3834 rc = -ENOMEM;
3835 goto out_free_mem;
3836 }
3837 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
3838 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3839 }
3840 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3841 spin_lock(&phba->nvme_buf_list_put_lock);
3842 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
cf1a1d3e 3843 phba->get_nvme_bufs = cnt;
895427bd
JS
3844 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3845 spin_unlock(&phba->nvme_buf_list_put_lock);
3846 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3847 return 0;
3848
3849out_free_mem:
3850 lpfc_nvme_free(phba);
3851 return rc;
3852}
3853
e59058c4 3854/**
3621a710 3855 * lpfc_create_port - Create an FC port
e59058c4
JS
3856 * @phba: pointer to lpfc hba data structure.
3857 * @instance: a unique integer ID to this FC port.
3858 * @dev: pointer to the device data structure.
3859 *
3860 * This routine creates a FC port for the upper layer protocol. The FC port
3861 * can be created on top of either a physical port or a virtual port provided
3862 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3863 * and associates the FC port created before adding the shost into the SCSI
3864 * layer.
3865 *
3866 * Return codes
3867 * @vport - pointer to the virtual N_Port data structure.
3868 * NULL - port create failed.
3869 **/
2e0fef85 3870struct lpfc_vport *
3de2a653 3871lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 3872{
2e0fef85 3873 struct lpfc_vport *vport;
895427bd 3874 struct Scsi_Host *shost = NULL;
2e0fef85 3875 int error = 0;
96418b5e
JS
3876 int i;
3877 uint64_t wwn;
3878 bool use_no_reset_hba = false;
56bc8028 3879 int rc;
96418b5e 3880
56bc8028
JS
3881 if (lpfc_no_hba_reset_cnt) {
3882 if (phba->sli_rev < LPFC_SLI_REV4 &&
3883 dev == &phba->pcidev->dev) {
3884 /* Reset the port first */
3885 lpfc_sli_brdrestart(phba);
3886 rc = lpfc_sli_chipset_init(phba);
3887 if (rc)
3888 return NULL;
3889 }
3890 wwn = lpfc_get_wwpn(phba);
3891 }
96418b5e
JS
3892
3893 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3894 if (wwn == lpfc_no_hba_reset[i]) {
3895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3896 "6020 Setting use_no_reset port=%llx\n",
3897 wwn);
3898 use_no_reset_hba = true;
3899 break;
3900 }
3901 }
47a8617c 3902
895427bd
JS
3903 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3904 if (dev != &phba->pcidev->dev) {
3905 shost = scsi_host_alloc(&lpfc_vport_template,
3906 sizeof(struct lpfc_vport));
3907 } else {
96418b5e 3908 if (!use_no_reset_hba)
895427bd
JS
3909 shost = scsi_host_alloc(&lpfc_template,
3910 sizeof(struct lpfc_vport));
3911 else
96418b5e 3912 shost = scsi_host_alloc(&lpfc_template_no_hr,
895427bd
JS
3913 sizeof(struct lpfc_vport));
3914 }
3915 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3916 shost = scsi_host_alloc(&lpfc_template_nvme,
ea4142f6
JS
3917 sizeof(struct lpfc_vport));
3918 }
2e0fef85
JS
3919 if (!shost)
3920 goto out;
47a8617c 3921
2e0fef85
JS
3922 vport = (struct lpfc_vport *) shost->hostdata;
3923 vport->phba = phba;
2e0fef85 3924 vport->load_flag |= FC_LOADING;
92d7f7b0 3925 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 3926 vport->fc_rscn_flush = 0;
3de2a653 3927 lpfc_get_vport_cfgparam(vport);
895427bd 3928
2e0fef85
JS
3929 shost->unique_id = instance;
3930 shost->max_id = LPFC_MAX_TARGET;
3de2a653 3931 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
3932 shost->this_id = -1;
3933 shost->max_cmd_len = 16;
8b0dff14 3934 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
da0436e9 3935 if (phba->sli_rev == LPFC_SLI_REV4) {
28baac74 3936 shost->dma_boundary =
cb5172ea 3937 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
da0436e9
JS
3938 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3939 }
81301a9b 3940
47a8617c 3941 /*
2e0fef85
JS
3942 * Set initial can_queue value since 0 is no longer supported and
3943 * scsi_add_host will fail. This will be adjusted later based on the
3944 * max xri value determined in hba setup.
47a8617c 3945 */
2e0fef85 3946 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 3947 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
3948 shost->transportt = lpfc_vport_transport_template;
3949 vport->port_type = LPFC_NPIV_PORT;
3950 } else {
3951 shost->transportt = lpfc_transport_template;
3952 vport->port_type = LPFC_PHYSICAL_PORT;
3953 }
47a8617c 3954
2e0fef85
JS
3955 /* Initialize all internally managed lists. */
3956 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 3957 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 3958 spin_lock_init(&vport->work_port_lock);
47a8617c 3959
f22eb4d3 3960 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
47a8617c 3961
f22eb4d3 3962 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
92494144 3963
f22eb4d3 3964 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
92494144 3965
d139b9bd 3966 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
3967 if (error)
3968 goto out_put_shost;
47a8617c 3969
549e55cd 3970 spin_lock_irq(&phba->hbalock);
2e0fef85 3971 list_add_tail(&vport->listentry, &phba->port_list);
549e55cd 3972 spin_unlock_irq(&phba->hbalock);
2e0fef85 3973 return vport;
47a8617c 3974
2e0fef85
JS
3975out_put_shost:
3976 scsi_host_put(shost);
3977out:
3978 return NULL;
47a8617c
JS
3979}
3980
e59058c4 3981/**
3621a710 3982 * destroy_port - destroy an FC port
e59058c4
JS
3983 * @vport: pointer to an lpfc virtual N_Port data structure.
3984 *
3985 * This routine destroys a FC port from the upper layer protocol. All the
3986 * resources associated with the port are released.
3987 **/
2e0fef85
JS
3988void
3989destroy_port(struct lpfc_vport *vport)
47a8617c 3990{
92d7f7b0
JS
3991 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3992 struct lpfc_hba *phba = vport->phba;
47a8617c 3993
858c9f6c 3994 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
3995 fc_remove_host(shost);
3996 scsi_remove_host(shost);
47a8617c 3997
92d7f7b0
JS
3998 spin_lock_irq(&phba->hbalock);
3999 list_del_init(&vport->listentry);
4000 spin_unlock_irq(&phba->hbalock);
47a8617c 4001
92d7f7b0 4002 lpfc_cleanup(vport);
47a8617c 4003 return;
47a8617c
JS
4004}
4005
e59058c4 4006/**
3621a710 4007 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
4008 *
4009 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4010 * uses the kernel idr facility to perform the task.
4011 *
4012 * Return codes:
4013 * instance - a unique integer ID allocated as the new instance.
4014 * -1 - lpfc get instance failed.
4015 **/
92d7f7b0
JS
4016int
4017lpfc_get_instance(void)
4018{
ab516036
TH
4019 int ret;
4020
4021 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4022 return ret < 0 ? -1 : ret;
47a8617c
JS
4023}
4024
e59058c4 4025/**
3621a710 4026 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
4027 * @shost: pointer to SCSI host data structure.
4028 * @time: elapsed time of the scan in jiffies.
4029 *
4030 * This routine is called by the SCSI layer with a SCSI host to determine
4031 * whether the scan host is finished.
4032 *
4033 * Note: there is no scan_start function as adapter initialization will have
4034 * asynchronously kicked off the link initialization.
4035 *
4036 * Return codes
4037 * 0 - SCSI host scan is not over yet.
4038 * 1 - SCSI host scan is over.
4039 **/
47a8617c
JS
4040int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4041{
2e0fef85
JS
4042 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4043 struct lpfc_hba *phba = vport->phba;
858c9f6c 4044 int stat = 0;
47a8617c 4045
858c9f6c
JS
4046 spin_lock_irq(shost->host_lock);
4047
51ef4c26 4048 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
4049 stat = 1;
4050 goto finished;
4051 }
256ec0d0 4052 if (time >= msecs_to_jiffies(30 * 1000)) {
2e0fef85 4053 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4054 "0461 Scanning longer than 30 "
4055 "seconds. Continuing initialization\n");
858c9f6c 4056 stat = 1;
47a8617c 4057 goto finished;
2e0fef85 4058 }
256ec0d0
JS
4059 if (time >= msecs_to_jiffies(15 * 1000) &&
4060 phba->link_state <= LPFC_LINK_DOWN) {
2e0fef85 4061 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4062 "0465 Link down longer than 15 "
4063 "seconds. Continuing initialization\n");
858c9f6c 4064 stat = 1;
47a8617c 4065 goto finished;
2e0fef85 4066 }
47a8617c 4067
2e0fef85 4068 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 4069 goto finished;
2e0fef85 4070 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 4071 goto finished;
256ec0d0 4072 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
858c9f6c 4073 goto finished;
2e0fef85 4074 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
4075 goto finished;
4076
4077 stat = 1;
47a8617c
JS
4078
4079finished:
858c9f6c
JS
4080 spin_unlock_irq(shost->host_lock);
4081 return stat;
92d7f7b0 4082}
47a8617c 4083
e59058c4 4084/**
3621a710 4085 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
4086 * @shost: pointer to SCSI host data structure.
4087 *
4088 * This routine initializes a given SCSI host attributes on a FC port. The
4089 * SCSI host can be either on top of a physical port or a virtual port.
4090 **/
92d7f7b0
JS
4091void lpfc_host_attrib_init(struct Scsi_Host *shost)
4092{
4093 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4094 struct lpfc_hba *phba = vport->phba;
47a8617c 4095 /*
2e0fef85 4096 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
4097 */
4098
2e0fef85
JS
4099 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4100 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
4101 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4102
4103 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 4104 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
4105 fc_host_supported_fc4s(shost)[2] = 1;
4106 fc_host_supported_fc4s(shost)[7] = 1;
4107
92d7f7b0
JS
4108 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4109 sizeof fc_host_symbolic_name(shost));
47a8617c
JS
4110
4111 fc_host_supported_speeds(shost) = 0;
d38dd52c
JS
4112 if (phba->lmt & LMT_32Gb)
4113 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
88a2cfbb
JS
4114 if (phba->lmt & LMT_16Gb)
4115 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
47a8617c
JS
4116 if (phba->lmt & LMT_10Gb)
4117 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
a8adb832
JS
4118 if (phba->lmt & LMT_8Gb)
4119 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
47a8617c
JS
4120 if (phba->lmt & LMT_4Gb)
4121 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4122 if (phba->lmt & LMT_2Gb)
4123 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4124 if (phba->lmt & LMT_1Gb)
4125 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4126
4127 fc_host_maxframe_size(shost) =
2e0fef85
JS
4128 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4129 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 4130
0af5d708
MC
4131 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4132
47a8617c
JS
4133 /* This value is also unchanging */
4134 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 4135 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
4136 fc_host_active_fc4s(shost)[2] = 1;
4137 fc_host_active_fc4s(shost)[7] = 1;
4138
92d7f7b0 4139 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 4140 spin_lock_irq(shost->host_lock);
51ef4c26 4141 vport->load_flag &= ~FC_LOADING;
47a8617c 4142 spin_unlock_irq(shost->host_lock);
47a8617c 4143}
dea3101e 4144
e59058c4 4145/**
da0436e9 4146 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
4147 * @phba: pointer to lpfc hba data structure.
4148 *
da0436e9
JS
4149 * This routine is invoked to stop an SLI3 device port, it stops the device
4150 * from generating interrupts and stops the device driver's timers for the
4151 * device.
e59058c4 4152 **/
da0436e9
JS
4153static void
4154lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 4155{
da0436e9
JS
4156 /* Clear all interrupt enable conditions */
4157 writel(0, phba->HCregaddr);
4158 readl(phba->HCregaddr); /* flush */
4159 /* Clear all pending interrupts */
4160 writel(0xffffffff, phba->HAregaddr);
4161 readl(phba->HAregaddr); /* flush */
db2378e0 4162
da0436e9
JS
4163 /* Reset some HBA SLI setup states */
4164 lpfc_stop_hba_timers(phba);
4165 phba->pport->work_port_events = 0;
4166}
db2378e0 4167
da0436e9
JS
4168/**
4169 * lpfc_stop_port_s4 - Stop SLI4 device port
4170 * @phba: pointer to lpfc hba data structure.
4171 *
4172 * This routine is invoked to stop an SLI4 device port, it stops the device
4173 * from generating interrupts and stops the device driver's timers for the
4174 * device.
4175 **/
4176static void
4177lpfc_stop_port_s4(struct lpfc_hba *phba)
4178{
4179 /* Reset some HBA SLI4 setup states */
4180 lpfc_stop_hba_timers(phba);
4181 phba->pport->work_port_events = 0;
4182 phba->sli4_hba.intr_enable = 0;
da0436e9 4183}
9399627f 4184
da0436e9
JS
4185/**
4186 * lpfc_stop_port - Wrapper function for stopping hba port
4187 * @phba: Pointer to HBA context object.
4188 *
4189 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4190 * the API jump table function pointer from the lpfc_hba struct.
4191 **/
4192void
4193lpfc_stop_port(struct lpfc_hba *phba)
4194{
4195 phba->lpfc_stop_port(phba);
f485c18d
DK
4196
4197 if (phba->wq)
4198 flush_workqueue(phba->wq);
da0436e9 4199}
db2378e0 4200
ecfd03c6
JS
4201/**
4202 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4203 * @phba: Pointer to hba for which this call is being executed.
4204 *
4205 * This routine starts the timer waiting for the FCF rediscovery to complete.
4206 **/
4207void
4208lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4209{
4210 unsigned long fcf_redisc_wait_tmo =
4211 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4212 /* Start fcf rediscovery wait period timer */
4213 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4214 spin_lock_irq(&phba->hbalock);
4215 /* Allow action to new fcf asynchronous event */
4216 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4217 /* Mark the FCF rediscovery pending state */
4218 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4219 spin_unlock_irq(&phba->hbalock);
4220}
4221
4222/**
4223 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4224 * @ptr: Map to lpfc_hba data structure pointer.
4225 *
4226 * This routine is invoked when waiting for FCF table rediscover has been
4227 * timed out. If new FCF record(s) has (have) been discovered during the
4228 * wait period, a new FCF event shall be added to the FCOE async event
4229 * list, and then worker thread shall be waked up for processing from the
4230 * worker thread context.
4231 **/
e399b228 4232static void
f22eb4d3 4233lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
ecfd03c6 4234{
f22eb4d3 4235 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
ecfd03c6
JS
4236
4237 /* Don't send FCF rediscovery event if timer cancelled */
4238 spin_lock_irq(&phba->hbalock);
4239 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4240 spin_unlock_irq(&phba->hbalock);
4241 return;
4242 }
4243 /* Clear FCF rediscovery timer pending flag */
4244 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4245 /* FCF rediscovery event to worker thread */
4246 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4247 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 4248 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 4249 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
4250 /* wake up worker thread */
4251 lpfc_worker_wake_up(phba);
4252}
4253
e59058c4 4254/**
da0436e9 4255 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 4256 * @phba: pointer to lpfc hba data structure.
da0436e9 4257 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 4258 *
da0436e9
JS
4259 * This routine is to parse the SLI4 link-attention link fault code and
4260 * translate it into the base driver's read link attention mailbox command
4261 * status.
4262 *
4263 * Return: Link-attention status in terms of base driver's coding.
e59058c4 4264 **/
da0436e9
JS
4265static uint16_t
4266lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4267 struct lpfc_acqe_link *acqe_link)
db2378e0 4268{
da0436e9 4269 uint16_t latt_fault;
9399627f 4270
da0436e9
JS
4271 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4272 case LPFC_ASYNC_LINK_FAULT_NONE:
4273 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4274 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4275 latt_fault = 0;
4276 break;
4277 default:
4278 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4279 "0398 Invalid link fault code: x%x\n",
4280 bf_get(lpfc_acqe_link_fault, acqe_link));
4281 latt_fault = MBXERR_ERROR;
4282 break;
4283 }
4284 return latt_fault;
db2378e0
JS
4285}
4286
5b75da2f 4287/**
da0436e9 4288 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 4289 * @phba: pointer to lpfc hba data structure.
da0436e9 4290 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 4291 *
da0436e9
JS
4292 * This routine is to parse the SLI4 link attention type and translate it
4293 * into the base driver's link attention type coding.
5b75da2f 4294 *
da0436e9
JS
4295 * Return: Link attention type in terms of base driver's coding.
4296 **/
4297static uint8_t
4298lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4299 struct lpfc_acqe_link *acqe_link)
5b75da2f 4300{
da0436e9 4301 uint8_t att_type;
5b75da2f 4302
da0436e9
JS
4303 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4304 case LPFC_ASYNC_LINK_STATUS_DOWN:
4305 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 4306 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
4307 break;
4308 case LPFC_ASYNC_LINK_STATUS_UP:
4309 /* Ignore physical link up events - wait for logical link up */
76a95d75 4310 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
4311 break;
4312 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 4313 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
4314 break;
4315 default:
4316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4317 "0399 Invalid link attention type: x%x\n",
4318 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 4319 att_type = LPFC_ATT_RESERVED;
da0436e9 4320 break;
5b75da2f 4321 }
da0436e9 4322 return att_type;
5b75da2f
JS
4323}
4324
8b68cd52
JS
4325/**
4326 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4327 * @phba: pointer to lpfc hba data structure.
4328 *
4329 * This routine is to get an SLI3 FC port's link speed in Mbps.
4330 *
4331 * Return: link speed in terms of Mbps.
4332 **/
4333uint32_t
4334lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4335{
4336 uint32_t link_speed;
4337
4338 if (!lpfc_is_link_up(phba))
4339 return 0;
4340
a085e87c
JS
4341 if (phba->sli_rev <= LPFC_SLI_REV3) {
4342 switch (phba->fc_linkspeed) {
4343 case LPFC_LINK_SPEED_1GHZ:
4344 link_speed = 1000;
4345 break;
4346 case LPFC_LINK_SPEED_2GHZ:
4347 link_speed = 2000;
4348 break;
4349 case LPFC_LINK_SPEED_4GHZ:
4350 link_speed = 4000;
4351 break;
4352 case LPFC_LINK_SPEED_8GHZ:
4353 link_speed = 8000;
4354 break;
4355 case LPFC_LINK_SPEED_10GHZ:
4356 link_speed = 10000;
4357 break;
4358 case LPFC_LINK_SPEED_16GHZ:
4359 link_speed = 16000;
4360 break;
4361 default:
4362 link_speed = 0;
4363 }
4364 } else {
4365 if (phba->sli4_hba.link_state.logical_speed)
4366 link_speed =
4367 phba->sli4_hba.link_state.logical_speed;
4368 else
4369 link_speed = phba->sli4_hba.link_state.speed;
8b68cd52
JS
4370 }
4371 return link_speed;
4372}
4373
4374/**
4375 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4376 * @phba: pointer to lpfc hba data structure.
4377 * @evt_code: asynchronous event code.
4378 * @speed_code: asynchronous event link speed code.
4379 *
4380 * This routine is to parse the giving SLI4 async event link speed code into
4381 * value of Mbps for the link speed.
4382 *
4383 * Return: link speed in terms of Mbps.
4384 **/
4385static uint32_t
4386lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4387 uint8_t speed_code)
4388{
4389 uint32_t port_speed;
4390
4391 switch (evt_code) {
4392 case LPFC_TRAILER_CODE_LINK:
4393 switch (speed_code) {
26d830ec 4394 case LPFC_ASYNC_LINK_SPEED_ZERO:
8b68cd52
JS
4395 port_speed = 0;
4396 break;
26d830ec 4397 case LPFC_ASYNC_LINK_SPEED_10MBPS:
8b68cd52
JS
4398 port_speed = 10;
4399 break;
26d830ec 4400 case LPFC_ASYNC_LINK_SPEED_100MBPS:
8b68cd52
JS
4401 port_speed = 100;
4402 break;
26d830ec 4403 case LPFC_ASYNC_LINK_SPEED_1GBPS:
8b68cd52
JS
4404 port_speed = 1000;
4405 break;
26d830ec 4406 case LPFC_ASYNC_LINK_SPEED_10GBPS:
8b68cd52
JS
4407 port_speed = 10000;
4408 break;
26d830ec
JS
4409 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4410 port_speed = 20000;
4411 break;
4412 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4413 port_speed = 25000;
4414 break;
4415 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4416 port_speed = 40000;
4417 break;
8b68cd52
JS
4418 default:
4419 port_speed = 0;
4420 }
4421 break;
4422 case LPFC_TRAILER_CODE_FC:
4423 switch (speed_code) {
26d830ec 4424 case LPFC_FC_LA_SPEED_UNKNOWN:
8b68cd52
JS
4425 port_speed = 0;
4426 break;
26d830ec 4427 case LPFC_FC_LA_SPEED_1G:
8b68cd52
JS
4428 port_speed = 1000;
4429 break;
26d830ec 4430 case LPFC_FC_LA_SPEED_2G:
8b68cd52
JS
4431 port_speed = 2000;
4432 break;
26d830ec 4433 case LPFC_FC_LA_SPEED_4G:
8b68cd52
JS
4434 port_speed = 4000;
4435 break;
26d830ec 4436 case LPFC_FC_LA_SPEED_8G:
8b68cd52
JS
4437 port_speed = 8000;
4438 break;
26d830ec 4439 case LPFC_FC_LA_SPEED_10G:
8b68cd52
JS
4440 port_speed = 10000;
4441 break;
26d830ec 4442 case LPFC_FC_LA_SPEED_16G:
8b68cd52
JS
4443 port_speed = 16000;
4444 break;
d38dd52c
JS
4445 case LPFC_FC_LA_SPEED_32G:
4446 port_speed = 32000;
4447 break;
8b68cd52
JS
4448 default:
4449 port_speed = 0;
4450 }
4451 break;
4452 default:
4453 port_speed = 0;
4454 }
4455 return port_speed;
4456}
4457
da0436e9 4458/**
70f3c073 4459 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
4460 * @phba: pointer to lpfc hba data structure.
4461 * @acqe_link: pointer to the async link completion queue entry.
4462 *
70f3c073 4463 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
4464 **/
4465static void
4466lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4467 struct lpfc_acqe_link *acqe_link)
4468{
4469 struct lpfc_dmabuf *mp;
4470 LPFC_MBOXQ_t *pmb;
4471 MAILBOX_t *mb;
76a95d75 4472 struct lpfc_mbx_read_top *la;
da0436e9 4473 uint8_t att_type;
76a95d75 4474 int rc;
da0436e9
JS
4475
4476 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 4477 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 4478 return;
32b9793f 4479 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
4480 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4481 if (!pmb) {
4482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4483 "0395 The mboxq allocation failed\n");
4484 return;
4485 }
4486 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4487 if (!mp) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4489 "0396 The lpfc_dmabuf allocation failed\n");
4490 goto out_free_pmb;
4491 }
4492 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4493 if (!mp->virt) {
4494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4495 "0397 The mbuf allocation failed\n");
4496 goto out_free_dmabuf;
4497 }
4498
4499 /* Cleanup any outstanding ELS commands */
4500 lpfc_els_flush_all_cmd(phba);
4501
4502 /* Block ELS IOCBs until we have done process link event */
895427bd 4503 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
da0436e9
JS
4504
4505 /* Update link event statistics */
4506 phba->sli.slistat.link_event++;
4507
76a95d75
JS
4508 /* Create lpfc_handle_latt mailbox command from link ACQE */
4509 lpfc_read_topology(phba, pmb, mp);
4510 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
4511 pmb->vport = phba->pport;
4512
da0436e9
JS
4513 /* Keep the link status for extra SLI4 state machine reference */
4514 phba->sli4_hba.link_state.speed =
8b68cd52
JS
4515 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4516 bf_get(lpfc_acqe_link_speed, acqe_link));
da0436e9
JS
4517 phba->sli4_hba.link_state.duplex =
4518 bf_get(lpfc_acqe_link_duplex, acqe_link);
4519 phba->sli4_hba.link_state.status =
4520 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
4521 phba->sli4_hba.link_state.type =
4522 bf_get(lpfc_acqe_link_type, acqe_link);
4523 phba->sli4_hba.link_state.number =
4524 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
4525 phba->sli4_hba.link_state.fault =
4526 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 4527 phba->sli4_hba.link_state.logical_speed =
8b68cd52
JS
4528 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4529
70f3c073 4530 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
4531 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4532 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4533 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
4534 phba->sli4_hba.link_state.speed,
4535 phba->sli4_hba.link_state.topology,
4536 phba->sli4_hba.link_state.status,
4537 phba->sli4_hba.link_state.type,
4538 phba->sli4_hba.link_state.number,
8b68cd52 4539 phba->sli4_hba.link_state.logical_speed,
70f3c073 4540 phba->sli4_hba.link_state.fault);
76a95d75
JS
4541 /*
4542 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4543 * topology info. Note: Optional for non FC-AL ports.
4544 */
4545 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4547 if (rc == MBX_NOT_FINISHED)
4548 goto out_free_dmabuf;
4549 return;
4550 }
4551 /*
4552 * For FCoE Mode: fill in all the topology information we need and call
4553 * the READ_TOPOLOGY completion routine to continue without actually
4554 * sending the READ_TOPOLOGY mailbox command to the port.
4555 */
4556 /* Parse and translate status field */
4557 mb = &pmb->u.mb;
4558 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
4559
4560 /* Parse and translate link attention fields */
4561 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4562 la->eventTag = acqe_link->event_tag;
4563 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4564 bf_set(lpfc_mbx_read_top_link_spd, la,
a085e87c 4565 (bf_get(lpfc_acqe_link_speed, acqe_link)));
76a95d75
JS
4566
4567 /* Fake the the following irrelvant fields */
4568 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4569 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4570 bf_set(lpfc_mbx_read_top_il, la, 0);
4571 bf_set(lpfc_mbx_read_top_pb, la, 0);
4572 bf_set(lpfc_mbx_read_top_fa, la, 0);
4573 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
4574
4575 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 4576 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 4577
5b75da2f 4578 return;
da0436e9
JS
4579
4580out_free_dmabuf:
4581 kfree(mp);
4582out_free_pmb:
4583 mempool_free(pmb, phba->mbox_mem_pool);
4584}
4585
70f3c073
JS
4586/**
4587 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4588 * @phba: pointer to lpfc hba data structure.
4589 * @acqe_fc: pointer to the async fc completion queue entry.
4590 *
4591 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4592 * that the event was received and then issue a read_topology mailbox command so
4593 * that the rest of the driver will treat it the same as SLI3.
4594 **/
4595static void
4596lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4597{
4598 struct lpfc_dmabuf *mp;
4599 LPFC_MBOXQ_t *pmb;
7bdedb34
JS
4600 MAILBOX_t *mb;
4601 struct lpfc_mbx_read_top *la;
70f3c073
JS
4602 int rc;
4603
4604 if (bf_get(lpfc_trailer_type, acqe_fc) !=
4605 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4606 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4607 "2895 Non FC link Event detected.(%d)\n",
4608 bf_get(lpfc_trailer_type, acqe_fc));
4609 return;
4610 }
4611 /* Keep the link status for extra SLI4 state machine reference */
4612 phba->sli4_hba.link_state.speed =
8b68cd52
JS
4613 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4614 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
70f3c073
JS
4615 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4616 phba->sli4_hba.link_state.topology =
4617 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4618 phba->sli4_hba.link_state.status =
4619 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4620 phba->sli4_hba.link_state.type =
4621 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4622 phba->sli4_hba.link_state.number =
4623 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4624 phba->sli4_hba.link_state.fault =
4625 bf_get(lpfc_acqe_link_fault, acqe_fc);
4626 phba->sli4_hba.link_state.logical_speed =
8b68cd52 4627 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
70f3c073
JS
4628 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4629 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4630 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4631 "%dMbps Fault:%d\n",
4632 phba->sli4_hba.link_state.speed,
4633 phba->sli4_hba.link_state.topology,
4634 phba->sli4_hba.link_state.status,
4635 phba->sli4_hba.link_state.type,
4636 phba->sli4_hba.link_state.number,
8b68cd52 4637 phba->sli4_hba.link_state.logical_speed,
70f3c073
JS
4638 phba->sli4_hba.link_state.fault);
4639 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4640 if (!pmb) {
4641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4642 "2897 The mboxq allocation failed\n");
4643 return;
4644 }
4645 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4646 if (!mp) {
4647 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4648 "2898 The lpfc_dmabuf allocation failed\n");
4649 goto out_free_pmb;
4650 }
4651 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4652 if (!mp->virt) {
4653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4654 "2899 The mbuf allocation failed\n");
4655 goto out_free_dmabuf;
4656 }
4657
4658 /* Cleanup any outstanding ELS commands */
4659 lpfc_els_flush_all_cmd(phba);
4660
4661 /* Block ELS IOCBs until we have done process link event */
895427bd 4662 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
70f3c073
JS
4663
4664 /* Update link event statistics */
4665 phba->sli.slistat.link_event++;
4666
4667 /* Create lpfc_handle_latt mailbox command from link ACQE */
4668 lpfc_read_topology(phba, pmb, mp);
4669 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4670 pmb->vport = phba->pport;
4671
7bdedb34 4672 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
ae9e28f3
JS
4673 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
4674
4675 switch (phba->sli4_hba.link_state.status) {
4676 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
4677 phba->link_flag |= LS_MDS_LINK_DOWN;
4678 break;
4679 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
4680 phba->link_flag |= LS_MDS_LOOPBACK;
4681 break;
4682 default:
4683 break;
4684 }
4685
7bdedb34
JS
4686 /* Parse and translate status field */
4687 mb = &pmb->u.mb;
4688 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
4689 (void *)acqe_fc);
4690
4691 /* Parse and translate link attention fields */
4692 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
4693 la->eventTag = acqe_fc->event_tag;
7bdedb34 4694
aeb3c817
JS
4695 if (phba->sli4_hba.link_state.status ==
4696 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
4697 bf_set(lpfc_mbx_read_top_att_type, la,
4698 LPFC_FC_LA_TYPE_UNEXP_WWPN);
4699 } else {
4700 bf_set(lpfc_mbx_read_top_att_type, la,
4701 LPFC_FC_LA_TYPE_LINK_DOWN);
4702 }
7bdedb34
JS
4703 /* Invoke the mailbox command callback function */
4704 lpfc_mbx_cmpl_read_topology(phba, pmb);
4705
4706 return;
4707 }
4708
70f3c073
JS
4709 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4710 if (rc == MBX_NOT_FINISHED)
4711 goto out_free_dmabuf;
4712 return;
4713
4714out_free_dmabuf:
4715 kfree(mp);
4716out_free_pmb:
4717 mempool_free(pmb, phba->mbox_mem_pool);
4718}
4719
4720/**
4721 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4722 * @phba: pointer to lpfc hba data structure.
4723 * @acqe_fc: pointer to the async SLI completion queue entry.
4724 *
4725 * This routine is to handle the SLI4 asynchronous SLI events.
4726 **/
4727static void
4728lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4729{
4b8bae08 4730 char port_name;
8c1312e1 4731 char message[128];
4b8bae08 4732 uint8_t status;
946727dc 4733 uint8_t evt_type;
448193b5 4734 uint8_t operational = 0;
946727dc 4735 struct temp_event temp_event_data;
4b8bae08 4736 struct lpfc_acqe_misconfigured_event *misconfigured;
946727dc
JS
4737 struct Scsi_Host *shost;
4738
4739 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4b8bae08 4740
448193b5
JS
4741 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4742 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4743 "x%08x SLI Event Type:%d\n",
4744 acqe_sli->event_data1, acqe_sli->event_data2,
4745 evt_type);
4b8bae08
JS
4746
4747 port_name = phba->Port[0];
4748 if (port_name == 0x00)
4749 port_name = '?'; /* get port name is empty */
4750
946727dc
JS
4751 switch (evt_type) {
4752 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4753 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4754 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4755 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4756
4757 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4758 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4759 acqe_sli->event_data1, port_name);
4760
310429ef 4761 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
4762 shost = lpfc_shost_from_vport(phba->pport);
4763 fc_host_post_vendor_event(shost, fc_get_event_number(),
4764 sizeof(temp_event_data),
4765 (char *)&temp_event_data,
4766 SCSI_NL_VID_TYPE_PCI
4767 | PCI_VENDOR_ID_EMULEX);
4768 break;
4769 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4770 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4771 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4772 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4773
4774 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4775 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4776 acqe_sli->event_data1, port_name);
4777
4778 shost = lpfc_shost_from_vport(phba->pport);
4779 fc_host_post_vendor_event(shost, fc_get_event_number(),
4780 sizeof(temp_event_data),
4781 (char *)&temp_event_data,
4782 SCSI_NL_VID_TYPE_PCI
4783 | PCI_VENDOR_ID_EMULEX);
4784 break;
4785 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4786 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4b8bae08
JS
4787 &acqe_sli->event_data1;
4788
946727dc
JS
4789 /* fetch the status for this port */
4790 switch (phba->sli4_hba.lnk_info.lnk_no) {
4791 case LPFC_LINK_NUMBER_0:
448193b5
JS
4792 status = bf_get(lpfc_sli_misconfigured_port0_state,
4793 &misconfigured->theEvent);
4794 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4b8bae08 4795 &misconfigured->theEvent);
946727dc
JS
4796 break;
4797 case LPFC_LINK_NUMBER_1:
448193b5
JS
4798 status = bf_get(lpfc_sli_misconfigured_port1_state,
4799 &misconfigured->theEvent);
4800 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4b8bae08 4801 &misconfigured->theEvent);
946727dc
JS
4802 break;
4803 case LPFC_LINK_NUMBER_2:
448193b5
JS
4804 status = bf_get(lpfc_sli_misconfigured_port2_state,
4805 &misconfigured->theEvent);
4806 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4b8bae08 4807 &misconfigured->theEvent);
946727dc
JS
4808 break;
4809 case LPFC_LINK_NUMBER_3:
448193b5
JS
4810 status = bf_get(lpfc_sli_misconfigured_port3_state,
4811 &misconfigured->theEvent);
4812 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4b8bae08 4813 &misconfigured->theEvent);
946727dc
JS
4814 break;
4815 default:
448193b5
JS
4816 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4817 "3296 "
4818 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4819 "event: Invalid link %d",
4820 phba->sli4_hba.lnk_info.lnk_no);
4821 return;
946727dc 4822 }
4b8bae08 4823
448193b5
JS
4824 /* Skip if optic state unchanged */
4825 if (phba->sli4_hba.lnk_info.optic_state == status)
4826 return;
4827
946727dc
JS
4828 switch (status) {
4829 case LPFC_SLI_EVENT_STATUS_VALID:
448193b5
JS
4830 sprintf(message, "Physical Link is functional");
4831 break;
946727dc
JS
4832 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4833 sprintf(message, "Optics faulted/incorrectly "
4834 "installed/not installed - Reseat optics, "
4835 "if issue not resolved, replace.");
4836 break;
4837 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4838 sprintf(message,
4839 "Optics of two types installed - Remove one "
4840 "optic or install matching pair of optics.");
4841 break;
4842 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4843 sprintf(message, "Incompatible optics - Replace with "
292098be 4844 "compatible optics for card to function.");
946727dc 4845 break;
448193b5
JS
4846 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
4847 sprintf(message, "Unqualified optics - Replace with "
4848 "Avago optics for Warranty and Technical "
4849 "Support - Link is%s operational",
2ea259ee 4850 (operational) ? " not" : "");
448193b5
JS
4851 break;
4852 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
4853 sprintf(message, "Uncertified optics - Replace with "
4854 "Avago-certified optics to enable link "
4855 "operation - Link is%s operational",
2ea259ee 4856 (operational) ? " not" : "");
448193b5 4857 break;
946727dc
JS
4858 default:
4859 /* firmware is reporting a status we don't know about */
4860 sprintf(message, "Unknown event status x%02x", status);
4861 break;
4862 }
448193b5 4863 phba->sli4_hba.lnk_info.optic_state = status;
946727dc 4864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
448193b5 4865 "3176 Port Name %c %s\n", port_name, message);
946727dc
JS
4866 break;
4867 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4868 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4869 "3192 Remote DPort Test Initiated - "
4870 "Event Data1:x%08x Event Data2: x%08x\n",
4871 acqe_sli->event_data1, acqe_sli->event_data2);
4b8bae08
JS
4872 break;
4873 default:
946727dc
JS
4874 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4875 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4876 "x%08x SLI Event Type:%d\n",
4877 acqe_sli->event_data1, acqe_sli->event_data2,
4878 evt_type);
4b8bae08
JS
4879 break;
4880 }
70f3c073
JS
4881}
4882
fc2b989b
JS
4883/**
4884 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4885 * @vport: pointer to vport data structure.
4886 *
4887 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4888 * response to a CVL event.
4889 *
4890 * Return the pointer to the ndlp with the vport if successful, otherwise
4891 * return NULL.
4892 **/
4893static struct lpfc_nodelist *
4894lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4895{
4896 struct lpfc_nodelist *ndlp;
4897 struct Scsi_Host *shost;
4898 struct lpfc_hba *phba;
4899
4900 if (!vport)
4901 return NULL;
fc2b989b
JS
4902 phba = vport->phba;
4903 if (!phba)
4904 return NULL;
78730cfe
JS
4905 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4906 if (!ndlp) {
4907 /* Cannot find existing Fabric ndlp, so allocate a new one */
9d3d340d 4908 ndlp = lpfc_nlp_init(vport, Fabric_DID);
78730cfe
JS
4909 if (!ndlp)
4910 return 0;
78730cfe
JS
4911 /* Set the node type */
4912 ndlp->nlp_type |= NLP_FABRIC;
4913 /* Put ndlp onto node list */
4914 lpfc_enqueue_node(vport, ndlp);
4915 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4916 /* re-setup ndlp without removing from node list */
4917 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4918 if (!ndlp)
4919 return 0;
4920 }
63e801ce
JS
4921 if ((phba->pport->port_state < LPFC_FLOGI) &&
4922 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4923 return NULL;
4924 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
4925 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4926 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
4927 return NULL;
4928 shost = lpfc_shost_from_vport(vport);
4929 if (!shost)
4930 return NULL;
4931 lpfc_linkdown_port(vport);
4932 lpfc_cleanup_pending_mbox(vport);
4933 spin_lock_irq(shost->host_lock);
4934 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4935 spin_unlock_irq(shost->host_lock);
4936
4937 return ndlp;
4938}
4939
4940/**
4941 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4942 * @vport: pointer to lpfc hba data structure.
4943 *
4944 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4945 * response to a FCF dead event.
4946 **/
4947static void
4948lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4949{
4950 struct lpfc_vport **vports;
4951 int i;
4952
4953 vports = lpfc_create_vport_work_array(phba);
4954 if (vports)
4955 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4956 lpfc_sli4_perform_vport_cvl(vports[i]);
4957 lpfc_destroy_vport_work_array(phba, vports);
4958}
4959
da0436e9 4960/**
76a95d75 4961 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9
JS
4962 * @phba: pointer to lpfc hba data structure.
4963 * @acqe_link: pointer to the async fcoe completion queue entry.
4964 *
4965 * This routine is to handle the SLI4 asynchronous fcoe event.
4966 **/
4967static void
76a95d75 4968lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 4969 struct lpfc_acqe_fip *acqe_fip)
da0436e9 4970{
70f3c073 4971 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 4972 int rc;
6669f9bb
JS
4973 struct lpfc_vport *vport;
4974 struct lpfc_nodelist *ndlp;
4975 struct Scsi_Host *shost;
695a814e
JS
4976 int active_vlink_present;
4977 struct lpfc_vport **vports;
4978 int i;
da0436e9 4979
70f3c073
JS
4980 phba->fc_eventTag = acqe_fip->event_tag;
4981 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 4982 switch (event_type) {
70f3c073
JS
4983 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4984 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4985 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
999d813f
JS
4986 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4987 LOG_DISCOVERY,
a93ff37a
JS
4988 "2546 New FCF event, evt_tag:x%x, "
4989 "index:x%x\n",
70f3c073
JS
4990 acqe_fip->event_tag,
4991 acqe_fip->index);
999d813f
JS
4992 else
4993 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4994 LOG_DISCOVERY,
a93ff37a
JS
4995 "2788 FCF param modified event, "
4996 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
4997 acqe_fip->event_tag,
4998 acqe_fip->index);
38b92ef8 4999 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
5000 /*
5001 * During period of FCF discovery, read the FCF
5002 * table record indexed by the event to update
a93ff37a 5003 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
5004 */
5005 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5006 LOG_DISCOVERY,
a93ff37a
JS
5007 "2779 Read FCF (x%x) for updating "
5008 "roundrobin FCF failover bmask\n",
70f3c073
JS
5009 acqe_fip->index);
5010 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 5011 }
38b92ef8
JS
5012
5013 /* If the FCF discovery is in progress, do nothing. */
3804dc84 5014 spin_lock_irq(&phba->hbalock);
a93ff37a 5015 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
5016 spin_unlock_irq(&phba->hbalock);
5017 break;
5018 }
5019 /* If fast FCF failover rescan event is pending, do nothing */
5020 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
5021 spin_unlock_irq(&phba->hbalock);
5022 break;
5023 }
5024
c2b9712e
JS
5025 /* If the FCF has been in discovered state, do nothing. */
5026 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3804dc84
JS
5027 spin_unlock_irq(&phba->hbalock);
5028 break;
5029 }
5030 spin_unlock_irq(&phba->hbalock);
38b92ef8 5031
0c9ab6f5
JS
5032 /* Otherwise, scan the entire FCF table and re-discover SAN */
5033 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
5034 "2770 Start FCF table scan per async FCF "
5035 "event, evt_tag:x%x, index:x%x\n",
70f3c073 5036 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
5037 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5038 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 5039 if (rc)
0c9ab6f5
JS
5040 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5041 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 5042 "command failed (x%x)\n", rc);
da0436e9
JS
5043 break;
5044
70f3c073 5045 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
da0436e9 5046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e4e74273 5047 "2548 FCF Table full count 0x%x tag 0x%x\n",
70f3c073
JS
5048 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5049 acqe_fip->event_tag);
da0436e9
JS
5050 break;
5051
70f3c073 5052 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
80c17849 5053 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5054 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5055 "2549 FCF (x%x) disconnected from network, "
70f3c073 5056 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
38b92ef8
JS
5057 /*
5058 * If we are in the middle of FCF failover process, clear
5059 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 5060 */
fc2b989b 5061 spin_lock_irq(&phba->hbalock);
a1cadfef
JS
5062 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5063 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
fc2b989b 5064 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 5065 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 5066 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
5067 break;
5068 }
38b92ef8
JS
5069 spin_unlock_irq(&phba->hbalock);
5070
5071 /* If the event is not for currently used fcf do nothing */
70f3c073 5072 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
5073 break;
5074
5075 /*
5076 * Otherwise, request the port to rediscover the entire FCF
5077 * table for a fast recovery from case that the current FCF
5078 * is no longer valid as we are not in the middle of FCF
5079 * failover process already.
5080 */
c2b9712e
JS
5081 spin_lock_irq(&phba->hbalock);
5082 /* Mark the fast failover process in progress */
5083 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5084 spin_unlock_irq(&phba->hbalock);
5085
5086 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5087 "2771 Start FCF fast failover process due to "
5088 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5089 "\n", acqe_fip->event_tag, acqe_fip->index);
5090 rc = lpfc_sli4_redisc_fcf_table(phba);
5091 if (rc) {
5092 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5093 LOG_DISCOVERY,
5094 "2772 Issue FCF rediscover mabilbox "
5095 "command failed, fail through to FCF "
5096 "dead event\n");
5097 spin_lock_irq(&phba->hbalock);
5098 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5099 spin_unlock_irq(&phba->hbalock);
5100 /*
5101 * Last resort will fail over by treating this
5102 * as a link down to FCF registration.
5103 */
5104 lpfc_sli4_fcf_dead_failthrough(phba);
5105 } else {
5106 /* Reset FCF roundrobin bmask for new discovery */
5107 lpfc_sli4_clear_fcf_rr_bmask(phba);
5108 /*
5109 * Handling fast FCF failover to a DEAD FCF event is
5110 * considered equalivant to receiving CVL to all vports.
5111 */
5112 lpfc_sli4_perform_all_vport_cvl(phba);
5113 }
da0436e9 5114 break;
70f3c073 5115 case LPFC_FIP_EVENT_TYPE_CVL:
80c17849 5116 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5117 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6669f9bb 5118 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 5119 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 5120
6669f9bb 5121 vport = lpfc_find_vport_by_vpid(phba,
5248a749 5122 acqe_fip->index);
fc2b989b 5123 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
5124 if (!ndlp)
5125 break;
695a814e
JS
5126 active_vlink_present = 0;
5127
5128 vports = lpfc_create_vport_work_array(phba);
5129 if (vports) {
5130 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5131 i++) {
5132 if ((!(vports[i]->fc_flag &
5133 FC_VPORT_CVL_RCVD)) &&
5134 (vports[i]->port_state > LPFC_FDISC)) {
5135 active_vlink_present = 1;
5136 break;
5137 }
5138 }
5139 lpfc_destroy_vport_work_array(phba, vports);
5140 }
5141
cc82355a
JS
5142 /*
5143 * Don't re-instantiate if vport is marked for deletion.
5144 * If we are here first then vport_delete is going to wait
5145 * for discovery to complete.
5146 */
5147 if (!(vport->load_flag & FC_UNLOADING) &&
5148 active_vlink_present) {
695a814e
JS
5149 /*
5150 * If there are other active VLinks present,
5151 * re-instantiate the Vlink using FDISC.
5152 */
256ec0d0
JS
5153 mod_timer(&ndlp->nlp_delayfunc,
5154 jiffies + msecs_to_jiffies(1000));
fc2b989b 5155 shost = lpfc_shost_from_vport(vport);
6669f9bb
JS
5156 spin_lock_irq(shost->host_lock);
5157 ndlp->nlp_flag |= NLP_DELAY_TMO;
5158 spin_unlock_irq(shost->host_lock);
695a814e
JS
5159 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5160 vport->port_state = LPFC_FDISC;
5161 } else {
ecfd03c6
JS
5162 /*
5163 * Otherwise, we request port to rediscover
5164 * the entire FCF table for a fast recovery
5165 * from possible case that the current FCF
0c9ab6f5
JS
5166 * is no longer valid if we are not already
5167 * in the FCF failover process.
ecfd03c6 5168 */
fc2b989b 5169 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5170 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
5171 spin_unlock_irq(&phba->hbalock);
5172 break;
5173 }
5174 /* Mark the fast failover process in progress */
0c9ab6f5 5175 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 5176 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
5177 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5178 LOG_DISCOVERY,
a93ff37a 5179 "2773 Start FCF failover per CVL, "
70f3c073 5180 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 5181 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 5182 if (rc) {
0c9ab6f5
JS
5183 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5184 LOG_DISCOVERY,
5185 "2774 Issue FCF rediscover "
5186 "mabilbox command failed, "
5187 "through to CVL event\n");
fc2b989b 5188 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5189 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 5190 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
5191 /*
5192 * Last resort will be re-try on the
5193 * the current registered FCF entry.
5194 */
5195 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
5196 } else
5197 /*
5198 * Reset FCF roundrobin bmask for new
5199 * discovery.
5200 */
7d791df7 5201 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
5202 }
5203 break;
da0436e9
JS
5204 default:
5205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5206 "0288 Unknown FCoE event type 0x%x event tag "
70f3c073 5207 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
5208 break;
5209 }
5210}
5211
5212/**
5213 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5214 * @phba: pointer to lpfc hba data structure.
5215 * @acqe_link: pointer to the async dcbx completion queue entry.
5216 *
5217 * This routine is to handle the SLI4 asynchronous dcbx event.
5218 **/
5219static void
5220lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5221 struct lpfc_acqe_dcbx *acqe_dcbx)
5222{
4d9ab994 5223 phba->fc_eventTag = acqe_dcbx->event_tag;
da0436e9
JS
5224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5225 "0290 The SLI4 DCBX asynchronous event is not "
5226 "handled yet\n");
5227}
5228
b19a061a
JS
5229/**
5230 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5231 * @phba: pointer to lpfc hba data structure.
5232 * @acqe_link: pointer to the async grp5 completion queue entry.
5233 *
5234 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5235 * is an asynchronous notified of a logical link speed change. The Port
5236 * reports the logical link speed in units of 10Mbps.
5237 **/
5238static void
5239lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5240 struct lpfc_acqe_grp5 *acqe_grp5)
5241{
5242 uint16_t prev_ll_spd;
5243
5244 phba->fc_eventTag = acqe_grp5->event_tag;
5245 phba->fcoe_eventtag = acqe_grp5->event_tag;
5246 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5247 phba->sli4_hba.link_state.logical_speed =
8b68cd52 5248 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
b19a061a
JS
5249 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5250 "2789 GRP5 Async Event: Updating logical link speed "
8b68cd52
JS
5251 "from %dMbps to %dMbps\n", prev_ll_spd,
5252 phba->sli4_hba.link_state.logical_speed);
b19a061a
JS
5253}
5254
da0436e9
JS
5255/**
5256 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5257 * @phba: pointer to lpfc hba data structure.
5258 *
5259 * This routine is invoked by the worker thread to process all the pending
5260 * SLI4 asynchronous events.
5261 **/
5262void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5263{
5264 struct lpfc_cq_event *cq_event;
5265
5266 /* First, declare the async event has been handled */
5267 spin_lock_irq(&phba->hbalock);
5268 phba->hba_flag &= ~ASYNC_EVENT;
5269 spin_unlock_irq(&phba->hbalock);
5270 /* Now, handle all the async events */
5271 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5272 /* Get the first event from the head of the event queue */
5273 spin_lock_irq(&phba->hbalock);
5274 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5275 cq_event, struct lpfc_cq_event, list);
5276 spin_unlock_irq(&phba->hbalock);
5277 /* Process the asynchronous event */
5278 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5279 case LPFC_TRAILER_CODE_LINK:
5280 lpfc_sli4_async_link_evt(phba,
5281 &cq_event->cqe.acqe_link);
5282 break;
5283 case LPFC_TRAILER_CODE_FCOE:
70f3c073 5284 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
5285 break;
5286 case LPFC_TRAILER_CODE_DCBX:
5287 lpfc_sli4_async_dcbx_evt(phba,
5288 &cq_event->cqe.acqe_dcbx);
5289 break;
b19a061a
JS
5290 case LPFC_TRAILER_CODE_GRP5:
5291 lpfc_sli4_async_grp5_evt(phba,
5292 &cq_event->cqe.acqe_grp5);
5293 break;
70f3c073
JS
5294 case LPFC_TRAILER_CODE_FC:
5295 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5296 break;
5297 case LPFC_TRAILER_CODE_SLI:
5298 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5299 break;
da0436e9
JS
5300 default:
5301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5302 "1804 Invalid asynchrous event code: "
5303 "x%x\n", bf_get(lpfc_trailer_code,
5304 &cq_event->cqe.mcqe_cmpl));
5305 break;
5306 }
5307 /* Free the completion event processed to the free pool */
5308 lpfc_sli4_cq_event_release(phba, cq_event);
5309 }
5310}
5311
ecfd03c6
JS
5312/**
5313 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5314 * @phba: pointer to lpfc hba data structure.
5315 *
5316 * This routine is invoked by the worker thread to process FCF table
5317 * rediscovery pending completion event.
5318 **/
5319void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5320{
5321 int rc;
5322
5323 spin_lock_irq(&phba->hbalock);
5324 /* Clear FCF rediscovery timeout event */
5325 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5326 /* Clear driver fast failover FCF record flag */
5327 phba->fcf.failover_rec.flag = 0;
5328 /* Set state for FCF fast failover */
5329 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5330 spin_unlock_irq(&phba->hbalock);
5331
5332 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 5333 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5334 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 5335 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 5336 if (rc)
0c9ab6f5
JS
5337 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5338 "2747 Issue FCF scan read FCF mailbox "
5339 "command failed 0x%x\n", rc);
ecfd03c6
JS
5340}
5341
da0436e9
JS
5342/**
5343 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5344 * @phba: pointer to lpfc hba data structure.
5345 * @dev_grp: The HBA PCI-Device group number.
5346 *
5347 * This routine is invoked to set up the per HBA PCI-Device group function
5348 * API jump table entries.
5349 *
5350 * Return: 0 if success, otherwise -ENODEV
5351 **/
5352int
5353lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5354{
5355 int rc;
5356
5357 /* Set up lpfc PCI-device group */
5358 phba->pci_dev_grp = dev_grp;
5359
5360 /* The LPFC_PCI_DEV_OC uses SLI4 */
5361 if (dev_grp == LPFC_PCI_DEV_OC)
5362 phba->sli_rev = LPFC_SLI_REV4;
5363
5364 /* Set up device INIT API function jump table */
5365 rc = lpfc_init_api_table_setup(phba, dev_grp);
5366 if (rc)
5367 return -ENODEV;
5368 /* Set up SCSI API function jump table */
5369 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5370 if (rc)
5371 return -ENODEV;
5372 /* Set up SLI API function jump table */
5373 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5374 if (rc)
5375 return -ENODEV;
5376 /* Set up MBOX API function jump table */
5377 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5378 if (rc)
5379 return -ENODEV;
5380
5381 return 0;
5b75da2f
JS
5382}
5383
5384/**
3621a710 5385 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
5386 * @phba: pointer to lpfc hba data structure.
5387 * @intr_mode: active interrupt mode adopted.
5388 *
5389 * This routine it invoked to log the currently used active interrupt mode
5390 * to the device.
3772a991
JS
5391 **/
5392static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
5393{
5394 switch (intr_mode) {
5395 case 0:
5396 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5397 "0470 Enable INTx interrupt mode.\n");
5398 break;
5399 case 1:
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "0481 Enabled MSI interrupt mode.\n");
5402 break;
5403 case 2:
5404 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5405 "0480 Enabled MSI-X interrupt mode.\n");
5406 break;
5407 default:
5408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5409 "0482 Illegal interrupt mode.\n");
5410 break;
5411 }
5412 return;
5413}
5414
5b75da2f 5415/**
3772a991 5416 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
5417 * @phba: pointer to lpfc hba data structure.
5418 *
3772a991
JS
5419 * This routine is invoked to enable the PCI device that is common to all
5420 * PCI devices.
5b75da2f
JS
5421 *
5422 * Return codes
af901ca1 5423 * 0 - successful
3772a991 5424 * other values - error
5b75da2f 5425 **/
3772a991
JS
5426static int
5427lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 5428{
3772a991 5429 struct pci_dev *pdev;
5b75da2f 5430
3772a991
JS
5431 /* Obtain PCI device reference */
5432 if (!phba->pcidev)
5433 goto out_error;
5434 else
5435 pdev = phba->pcidev;
3772a991
JS
5436 /* Enable PCI device */
5437 if (pci_enable_device_mem(pdev))
5438 goto out_error;
5439 /* Request PCI resource for the device */
e0c0483c 5440 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
3772a991
JS
5441 goto out_disable_device;
5442 /* Set up device as PCI master and save state for EEH */
5443 pci_set_master(pdev);
5444 pci_try_set_mwi(pdev);
5445 pci_save_state(pdev);
5b75da2f 5446
0558056c 5447 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
453193e0 5448 if (pci_is_pcie(pdev))
0558056c
JS
5449 pdev->needs_freset = 1;
5450
3772a991 5451 return 0;
5b75da2f 5452
3772a991
JS
5453out_disable_device:
5454 pci_disable_device(pdev);
5455out_error:
079b5c91 5456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e0c0483c 5457 "1401 Failed to enable pci device\n");
3772a991 5458 return -ENODEV;
5b75da2f
JS
5459}
5460
5461/**
3772a991 5462 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
5463 * @phba: pointer to lpfc hba data structure.
5464 *
3772a991
JS
5465 * This routine is invoked to disable the PCI device that is common to all
5466 * PCI devices.
5b75da2f
JS
5467 **/
5468static void
3772a991 5469lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 5470{
3772a991 5471 struct pci_dev *pdev;
5b75da2f 5472
3772a991
JS
5473 /* Obtain PCI device reference */
5474 if (!phba->pcidev)
5475 return;
5476 else
5477 pdev = phba->pcidev;
3772a991 5478 /* Release PCI resource and disable PCI device */
e0c0483c 5479 pci_release_mem_regions(pdev);
3772a991 5480 pci_disable_device(pdev);
5b75da2f
JS
5481
5482 return;
5483}
5484
e59058c4 5485/**
3772a991
JS
5486 * lpfc_reset_hba - Reset a hba
5487 * @phba: pointer to lpfc hba data structure.
e59058c4 5488 *
3772a991
JS
5489 * This routine is invoked to reset a hba device. It brings the HBA
5490 * offline, performs a board restart, and then brings the board back
5491 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
5492 * on outstanding mailbox commands.
e59058c4 5493 **/
3772a991
JS
5494void
5495lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 5496{
3772a991
JS
5497 /* If resets are disabled then set error state and return. */
5498 if (!phba->cfg_enable_hba_reset) {
5499 phba->link_state = LPFC_HBA_ERROR;
5500 return;
5501 }
ee62021a
JS
5502 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
5503 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5504 else
5505 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
3772a991
JS
5506 lpfc_offline(phba);
5507 lpfc_sli_brdrestart(phba);
5508 lpfc_online(phba);
5509 lpfc_unblock_mgmt_io(phba);
5510}
dea3101e 5511
0a96e975
JS
5512/**
5513 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
5514 * @phba: pointer to lpfc hba data structure.
5515 *
5516 * This function enables the PCI SR-IOV virtual functions to a physical
5517 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5518 * enable the number of virtual functions to the physical function. As
5519 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5520 * API call does not considered as an error condition for most of the device.
5521 **/
5522uint16_t
5523lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
5524{
5525 struct pci_dev *pdev = phba->pcidev;
5526 uint16_t nr_virtfn;
5527 int pos;
5528
0a96e975
JS
5529 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5530 if (pos == 0)
5531 return 0;
5532
5533 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
5534 return nr_virtfn;
5535}
5536
912e3acd
JS
5537/**
5538 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
5539 * @phba: pointer to lpfc hba data structure.
5540 * @nr_vfn: number of virtual functions to be enabled.
5541 *
5542 * This function enables the PCI SR-IOV virtual functions to a physical
5543 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5544 * enable the number of virtual functions to the physical function. As
5545 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5546 * API call does not considered as an error condition for most of the device.
5547 **/
5548int
5549lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
5550{
5551 struct pci_dev *pdev = phba->pcidev;
0a96e975 5552 uint16_t max_nr_vfn;
912e3acd
JS
5553 int rc;
5554
0a96e975
JS
5555 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
5556 if (nr_vfn > max_nr_vfn) {
5557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5558 "3057 Requested vfs (%d) greater than "
5559 "supported vfs (%d)", nr_vfn, max_nr_vfn);
5560 return -EINVAL;
5561 }
5562
912e3acd
JS
5563 rc = pci_enable_sriov(pdev, nr_vfn);
5564 if (rc) {
5565 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5566 "2806 Failed to enable sriov on this device "
5567 "with vfn number nr_vf:%d, rc:%d\n",
5568 nr_vfn, rc);
5569 } else
5570 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5571 "2807 Successful enable sriov on this device "
5572 "with vfn number nr_vf:%d\n", nr_vfn);
5573 return rc;
5574}
5575
3772a991 5576/**
895427bd 5577 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3772a991
JS
5578 * @phba: pointer to lpfc hba data structure.
5579 *
895427bd
JS
5580 * This routine is invoked to set up the driver internal resources before the
5581 * device specific resource setup to support the HBA device it attached to.
3772a991
JS
5582 *
5583 * Return codes
895427bd
JS
5584 * 0 - successful
5585 * other values - error
3772a991
JS
5586 **/
5587static int
895427bd 5588lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3772a991 5589{
895427bd 5590 struct lpfc_sli *psli = &phba->sli;
dea3101e 5591
2e0fef85 5592 /*
895427bd 5593 * Driver resources common to all SLI revisions
2e0fef85 5594 */
895427bd
JS
5595 atomic_set(&phba->fast_event_count, 0);
5596 spin_lock_init(&phba->hbalock);
dea3101e 5597
895427bd
JS
5598 /* Initialize ndlp management spinlock */
5599 spin_lock_init(&phba->ndlp_lock);
5600
5601 INIT_LIST_HEAD(&phba->port_list);
5602 INIT_LIST_HEAD(&phba->work_list);
5603 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5604
5605 /* Initialize the wait queue head for the kernel thread */
5606 init_waitqueue_head(&phba->work_waitq);
5607
5608 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
f358dd0c 5609 "1403 Protocols supported %s %s %s\n",
895427bd
JS
5610 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
5611 "SCSI" : " "),
5612 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
f358dd0c
JS
5613 "NVME" : " "),
5614 (phba->nvmet_support ? "NVMET" : " "));
895427bd
JS
5615
5616 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5617 /* Initialize the scsi buffer list used by driver for scsi IO */
5618 spin_lock_init(&phba->scsi_buf_list_get_lock);
5619 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5620 spin_lock_init(&phba->scsi_buf_list_put_lock);
5621 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5622 }
5623
5624 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
5625 (phba->nvmet_support == 0)) {
5626 /* Initialize the NVME buffer list used by driver for NVME IO */
5627 spin_lock_init(&phba->nvme_buf_list_get_lock);
5628 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
cf1a1d3e 5629 phba->get_nvme_bufs = 0;
895427bd
JS
5630 spin_lock_init(&phba->nvme_buf_list_put_lock);
5631 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
cf1a1d3e 5632 phba->put_nvme_bufs = 0;
895427bd
JS
5633 }
5634
5635 /* Initialize the fabric iocb list */
5636 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5637
5638 /* Initialize list to save ELS buffers */
5639 INIT_LIST_HEAD(&phba->elsbuf);
5640
5641 /* Initialize FCF connection rec list */
5642 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5643
5644 /* Initialize OAS configuration list */
5645 spin_lock_init(&phba->devicelock);
5646 INIT_LIST_HEAD(&phba->luns);
858c9f6c 5647
3772a991 5648 /* MBOX heartbeat timer */
f22eb4d3 5649 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
3772a991 5650 /* Fabric block timer */
f22eb4d3 5651 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
3772a991 5652 /* EA polling mode timer */
f22eb4d3 5653 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
895427bd 5654 /* Heartbeat timer */
f22eb4d3 5655 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
895427bd
JS
5656
5657 return 0;
5658}
5659
5660/**
5661 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
5662 * @phba: pointer to lpfc hba data structure.
5663 *
5664 * This routine is invoked to set up the driver internal resources specific to
5665 * support the SLI-3 HBA device it attached to.
5666 *
5667 * Return codes
5668 * 0 - successful
5669 * other values - error
5670 **/
5671static int
5672lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5673{
5674 int rc;
5675
5676 /*
5677 * Initialize timers used by driver
5678 */
5679
5680 /* FCP polling mode timer */
f22eb4d3 5681 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
dea3101e 5682
3772a991
JS
5683 /* Host attention work mask setup */
5684 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
5685 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 5686
3772a991
JS
5687 /* Get all the module params for configuring this host */
5688 lpfc_get_cfgparam(phba);
895427bd
JS
5689 /* Set up phase-1 common device driver resources */
5690
5691 rc = lpfc_setup_driver_resource_phase1(phba);
5692 if (rc)
5693 return -ENODEV;
5694
49198b37
JS
5695 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
5696 phba->menlo_flag |= HBA_MENLO_SUPPORT;
5697 /* check for menlo minimum sg count */
5698 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
5699 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
5700 }
5701
895427bd
JS
5702 if (!phba->sli.sli3_ring)
5703 phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
2a76a283 5704 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
895427bd 5705 if (!phba->sli.sli3_ring)
2a76a283
JS
5706 return -ENOMEM;
5707
dea3101e 5708 /*
96f7077f 5709 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
3772a991 5710 * used to create the sg_dma_buf_pool must be dynamically calculated.
dea3101e 5711 */
3772a991 5712
96f7077f
JS
5713 /* Initialize the host templates the configured values. */
5714 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96418b5e
JS
5715 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5716 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96f7077f
JS
5717
5718 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
3772a991 5719 if (phba->cfg_enable_bg) {
96f7077f
JS
5720 /*
5721 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5722 * the FCP rsp, and a BDE for each. Sice we have no control
5723 * over how many protection data segments the SCSI Layer
5724 * will hand us (ie: there could be one for every block
5725 * in the IO), we just allocate enough BDEs to accomidate
5726 * our max amount and we need to limit lpfc_sg_seg_cnt to
5727 * minimize the risk of running out.
5728 */
5729 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5730 sizeof(struct fcp_rsp) +
5731 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5732
5733 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5734 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5735
5736 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5737 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5738 } else {
5739 /*
5740 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5741 * the FCP rsp, a BDE for each, and a BDE for up to
5742 * cfg_sg_seg_cnt data segments.
5743 */
5744 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5745 sizeof(struct fcp_rsp) +
5746 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5747
5748 /* Total BDEs in BPL for scsi_sg_list */
5749 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
901a920f 5750 }
dea3101e 5751
96f7077f
JS
5752 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5753 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5754 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5755 phba->cfg_total_seg_cnt);
dea3101e 5756
3772a991
JS
5757 phba->max_vpi = LPFC_MAX_VPI;
5758 /* This will be set to correct value after config_port mbox */
5759 phba->max_vports = 0;
dea3101e 5760
3772a991
JS
5761 /*
5762 * Initialize the SLI Layer to run with lpfc HBAs.
5763 */
5764 lpfc_sli_setup(phba);
895427bd 5765 lpfc_sli_queue_init(phba);
ed957684 5766
3772a991
JS
5767 /* Allocate device driver memory */
5768 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5769 return -ENOMEM;
51ef4c26 5770
912e3acd
JS
5771 /*
5772 * Enable sr-iov virtual functions if supported and configured
5773 * through the module parameter.
5774 */
5775 if (phba->cfg_sriov_nr_virtfn > 0) {
5776 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5777 phba->cfg_sriov_nr_virtfn);
5778 if (rc) {
5779 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5780 "2808 Requested number of SR-IOV "
5781 "virtual functions (%d) is not "
5782 "supported\n",
5783 phba->cfg_sriov_nr_virtfn);
5784 phba->cfg_sriov_nr_virtfn = 0;
5785 }
5786 }
5787
3772a991
JS
5788 return 0;
5789}
ed957684 5790
3772a991
JS
5791/**
5792 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5793 * @phba: pointer to lpfc hba data structure.
5794 *
5795 * This routine is invoked to unset the driver internal resources set up
5796 * specific for supporting the SLI-3 HBA device it attached to.
5797 **/
5798static void
5799lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5800{
5801 /* Free device driver memory allocated */
5802 lpfc_mem_free_all(phba);
3163f725 5803
3772a991
JS
5804 return;
5805}
dea3101e 5806
3772a991 5807/**
da0436e9 5808 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
5809 * @phba: pointer to lpfc hba data structure.
5810 *
da0436e9
JS
5811 * This routine is invoked to set up the driver internal resources specific to
5812 * support the SLI-4 HBA device it attached to.
3772a991
JS
5813 *
5814 * Return codes
af901ca1 5815 * 0 - successful
da0436e9 5816 * other values - error
3772a991
JS
5817 **/
5818static int
da0436e9 5819lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 5820{
28baac74 5821 LPFC_MBOXQ_t *mboxq;
f358dd0c 5822 MAILBOX_t *mb;
895427bd 5823 int rc, i, max_buf_size;
28baac74
JS
5824 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5825 struct lpfc_mqe *mqe;
09294d46 5826 int longs;
1ba981fd 5827 int fof_vectors = 0;
81e6a637 5828 int extra;
f358dd0c 5829 uint64_t wwn;
da0436e9 5830
895427bd
JS
5831 phba->sli4_hba.num_online_cpu = num_online_cpus();
5832 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
5833 phba->sli4_hba.curr_disp_cpu = 0;
5834
716d3bc5
JS
5835 /* Get all the module params for configuring this host */
5836 lpfc_get_cfgparam(phba);
5837
895427bd
JS
5838 /* Set up phase-1 common device driver resources */
5839 rc = lpfc_setup_driver_resource_phase1(phba);
5840 if (rc)
5841 return -ENODEV;
5842
da0436e9
JS
5843 /* Before proceed, wait for POST done and device ready */
5844 rc = lpfc_sli4_post_status_check(phba);
5845 if (rc)
5846 return -ENODEV;
5847
3772a991 5848 /*
da0436e9 5849 * Initialize timers used by driver
3772a991 5850 */
3772a991 5851
f22eb4d3 5852 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
3772a991 5853
ecfd03c6 5854 /* FCF rediscover timer */
f22eb4d3 5855 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
ecfd03c6 5856
7ad20aa9
JS
5857 /*
5858 * Control structure for handling external multi-buffer mailbox
5859 * command pass-through.
5860 */
5861 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5862 sizeof(struct lpfc_mbox_ext_buf_ctx));
5863 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5864
da0436e9 5865 phba->max_vpi = LPFC_MAX_VPI;
67d12733 5866
da0436e9
JS
5867 /* This will be set to correct value after the read_config mbox */
5868 phba->max_vports = 0;
3772a991 5869
da0436e9
JS
5870 /* Program the default value of vlan_id and fc_map */
5871 phba->valid_vlan = 0;
5872 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5873 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5874 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 5875
2a76a283
JS
5876 /*
5877 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
895427bd
JS
5878 * we will associate a new ring, for each EQ/CQ/WQ tuple.
5879 * The WQ create will allocate the ring.
2a76a283 5880 */
09294d46 5881
81e6a637
JS
5882 /*
5883 * 1 for cmd, 1 for rsp, NVME adds an extra one
5884 * for boundary conditions in its max_sgl_segment template.
5885 */
5886 extra = 2;
5887 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
5888 extra++;
5889
da0436e9 5890 /*
09294d46
JS
5891 * It doesn't matter what family our adapter is in, we are
5892 * limited to 2 Pages, 512 SGEs, for our SGL.
5893 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5894 */
5895 max_buf_size = (2 * SLI4_PAGE_SIZE);
81e6a637
JS
5896 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
5897 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
09294d46 5898
da0436e9 5899 /*
895427bd
JS
5900 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
5901 * used to create the sg_dma_buf_pool must be calculated.
da0436e9 5902 */
96f7077f
JS
5903 if (phba->cfg_enable_bg) {
5904 /*
895427bd
JS
5905 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
5906 * the FCP rsp, and a SGE. Sice we have no control
5907 * over how many protection segments the SCSI Layer
96f7077f 5908 * will hand us (ie: there could be one for every block
895427bd
JS
5909 * in the IO), just allocate enough SGEs to accomidate
5910 * our max amount and we need to limit lpfc_sg_seg_cnt
5911 * to minimize the risk of running out.
96f7077f
JS
5912 */
5913 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd 5914 sizeof(struct fcp_rsp) + max_buf_size;
96f7077f
JS
5915
5916 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5917 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5918
5919 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
895427bd
JS
5920 phba->cfg_sg_seg_cnt =
5921 LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
96f7077f
JS
5922 } else {
5923 /*
895427bd 5924 * The scsi_buf for a regular I/O holds the FCP cmnd,
96f7077f
JS
5925 * the FCP rsp, a SGE for each, and a SGE for up to
5926 * cfg_sg_seg_cnt data segments.
5927 */
5928 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd 5929 sizeof(struct fcp_rsp) +
81e6a637 5930 ((phba->cfg_sg_seg_cnt + extra) *
895427bd 5931 sizeof(struct sli4_sge));
96f7077f
JS
5932
5933 /* Total SGEs for scsi_sg_list */
81e6a637 5934 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
895427bd 5935
96f7077f 5936 /*
81e6a637 5937 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
895427bd 5938 * need to post 1 page for the SGL.
96f7077f 5939 */
085c647c 5940 }
acd6859b 5941
96f7077f
JS
5942 /* Initialize the host templates with the updated values. */
5943 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5944 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96418b5e 5945 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
96f7077f
JS
5946
5947 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5948 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5949 else
5950 phba->cfg_sg_dma_buf_size =
5951 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5952
5953 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5954 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5955 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5956 phba->cfg_total_seg_cnt);
3772a991 5957
da0436e9 5958 /* Initialize buffer queue management fields */
895427bd 5959 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
da0436e9
JS
5960 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5961 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 5962
da0436e9
JS
5963 /*
5964 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5965 */
895427bd
JS
5966 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5967 /* Initialize the Abort scsi buffer list used by driver */
5968 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5969 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5970 }
5971
5972 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
5973 /* Initialize the Abort nvme buffer list used by driver */
5974 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5975 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379 5976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
a8cf5dfe 5977 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
895427bd
JS
5978 }
5979
da0436e9 5980 /* This abort list used by worker thread */
895427bd 5981 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
a8cf5dfe 5982 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
3772a991 5983
da0436e9 5984 /*
6d368e53 5985 * Initialize driver internal slow-path work queues
da0436e9 5986 */
3772a991 5987
da0436e9
JS
5988 /* Driver internel slow-path CQ Event pool */
5989 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5990 /* Response IOCB work queue list */
45ed1190 5991 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
5992 /* Asynchronous event CQ Event work queue list */
5993 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5994 /* Fast-path XRI aborted CQ Event work queue list */
5995 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5996 /* Slow-path XRI aborted CQ Event work queue list */
5997 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5998 /* Receive queue CQ Event work queue list */
5999 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6000
6d368e53
JS
6001 /* Initialize extent block lists. */
6002 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6003 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6004 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6005 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6006
d1f525aa
JS
6007 /* Initialize mboxq lists. If the early init routines fail
6008 * these lists need to be correctly initialized.
6009 */
6010 INIT_LIST_HEAD(&phba->sli.mboxq);
6011 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6012
448193b5
JS
6013 /* initialize optic_state to 0xFF */
6014 phba->sli4_hba.lnk_info.optic_state = 0xff;
6015
da0436e9
JS
6016 /* Allocate device driver memory */
6017 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6018 if (rc)
6019 return -ENOMEM;
6020
2fcee4bf 6021 /* IF Type 2 ports get initialized now. */
27d6ac0a 6022 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2fcee4bf
JS
6023 LPFC_SLI_INTF_IF_TYPE_2) {
6024 rc = lpfc_pci_function_reset(phba);
895427bd
JS
6025 if (unlikely(rc)) {
6026 rc = -ENODEV;
6027 goto out_free_mem;
6028 }
946727dc 6029 phba->temp_sensor_support = 1;
2fcee4bf
JS
6030 }
6031
da0436e9
JS
6032 /* Create the bootstrap mailbox command */
6033 rc = lpfc_create_bootstrap_mbox(phba);
6034 if (unlikely(rc))
6035 goto out_free_mem;
6036
6037 /* Set up the host's endian order with the device. */
6038 rc = lpfc_setup_endian_order(phba);
6039 if (unlikely(rc))
6040 goto out_free_bsmbx;
6041
6042 /* Set up the hba's configuration parameters. */
6043 rc = lpfc_sli4_read_config(phba);
cff261f6
JS
6044 if (unlikely(rc))
6045 goto out_free_bsmbx;
6046 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
da0436e9
JS
6047 if (unlikely(rc))
6048 goto out_free_bsmbx;
6049
2fcee4bf
JS
6050 /* IF Type 0 ports get initialized now. */
6051 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6052 LPFC_SLI_INTF_IF_TYPE_0) {
6053 rc = lpfc_pci_function_reset(phba);
6054 if (unlikely(rc))
6055 goto out_free_bsmbx;
6056 }
da0436e9 6057
cb5172ea
JS
6058 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6059 GFP_KERNEL);
6060 if (!mboxq) {
6061 rc = -ENOMEM;
6062 goto out_free_bsmbx;
6063 }
6064
f358dd0c 6065 /* Check for NVMET being configured */
895427bd 6066 phba->nvmet_support = 0;
f358dd0c
JS
6067 if (lpfc_enable_nvmet_cnt) {
6068
6069 /* First get WWN of HBA instance */
6070 lpfc_read_nv(phba, mboxq);
6071 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6072 if (rc != MBX_SUCCESS) {
6073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6074 "6016 Mailbox failed , mbxCmd x%x "
6075 "READ_NV, mbxStatus x%x\n",
6076 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6077 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
d1f525aa 6078 mempool_free(mboxq, phba->mbox_mem_pool);
f358dd0c
JS
6079 rc = -EIO;
6080 goto out_free_bsmbx;
6081 }
6082 mb = &mboxq->u.mb;
6083 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6084 sizeof(uint64_t));
6085 wwn = cpu_to_be64(wwn);
6086 phba->sli4_hba.wwnn.u.name = wwn;
6087 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6088 sizeof(uint64_t));
6089 /* wwn is WWPN of HBA instance */
6090 wwn = cpu_to_be64(wwn);
6091 phba->sli4_hba.wwpn.u.name = wwn;
6092
6093 /* Check to see if it matches any module parameter */
6094 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6095 if (wwn == lpfc_enable_nvmet[i]) {
7d708033 6096#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3c603be9
JS
6097 if (lpfc_nvmet_mem_alloc(phba))
6098 break;
6099
6100 phba->nvmet_support = 1; /* a match */
6101
f358dd0c
JS
6102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6103 "6017 NVME Target %016llx\n",
6104 wwn);
7d708033
JS
6105#else
6106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6107 "6021 Can't enable NVME Target."
6108 " NVME_TARGET_FC infrastructure"
6109 " is not in kernel\n");
6110#endif
3c603be9 6111 break;
f358dd0c
JS
6112 }
6113 }
6114 }
895427bd
JS
6115
6116 lpfc_nvme_mod_param_dep(phba);
6117
fedd3b7b 6118 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
cb5172ea
JS
6119 lpfc_supported_pages(mboxq);
6120 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
fedd3b7b
JS
6121 if (!rc) {
6122 mqe = &mboxq->u.mqe;
6123 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6124 LPFC_MAX_SUPPORTED_PAGES);
6125 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6126 switch (pn_page[i]) {
6127 case LPFC_SLI4_PARAMETERS:
6128 phba->sli4_hba.pc_sli4_params.supported = 1;
6129 break;
6130 default:
6131 break;
6132 }
6133 }
6134 /* Read the port's SLI4 Parameters capabilities if supported. */
6135 if (phba->sli4_hba.pc_sli4_params.supported)
6136 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6137 if (rc) {
6138 mempool_free(mboxq, phba->mbox_mem_pool);
6139 rc = -EIO;
6140 goto out_free_bsmbx;
cb5172ea
JS
6141 }
6142 }
65791f1f 6143
fedd3b7b
JS
6144 /*
6145 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
6146 * If this call fails, it isn't critical unless the SLI4 parameters come
6147 * back in conflict.
fedd3b7b 6148 */
6d368e53
JS
6149 rc = lpfc_get_sli4_parameters(phba, mboxq);
6150 if (rc) {
6151 if (phba->sli4_hba.extents_in_use &&
6152 phba->sli4_hba.rpi_hdrs_in_use) {
6153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6154 "2999 Unsupported SLI4 Parameters "
6155 "Extents and RPI headers enabled.\n");
6d368e53 6156 }
895427bd 6157 mempool_free(mboxq, phba->mbox_mem_pool);
5c756065 6158 rc = -EIO;
895427bd 6159 goto out_free_bsmbx;
6d368e53 6160 }
895427bd 6161
cb5172ea 6162 mempool_free(mboxq, phba->mbox_mem_pool);
1ba981fd
JS
6163
6164 /* Verify OAS is supported */
6165 lpfc_sli4_oas_verify(phba);
6166 if (phba->cfg_fof)
6167 fof_vectors = 1;
6168
5350d872
JS
6169 /* Verify all the SLI4 queues */
6170 rc = lpfc_sli4_queue_verify(phba);
da0436e9
JS
6171 if (rc)
6172 goto out_free_bsmbx;
6173
6174 /* Create driver internal CQE event pool */
6175 rc = lpfc_sli4_cq_event_pool_create(phba);
6176 if (rc)
5350d872 6177 goto out_free_bsmbx;
da0436e9 6178
8a9d2e80
JS
6179 /* Initialize sgl lists per host */
6180 lpfc_init_sgl_list(phba);
6181
6182 /* Allocate and initialize active sgl array */
da0436e9
JS
6183 rc = lpfc_init_active_sgl_array(phba);
6184 if (rc) {
6185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6186 "1430 Failed to initialize sgl list.\n");
8a9d2e80 6187 goto out_destroy_cq_event_pool;
da0436e9 6188 }
da0436e9
JS
6189 rc = lpfc_sli4_init_rpi_hdrs(phba);
6190 if (rc) {
6191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6192 "1432 Failed to initialize rpi headers.\n");
6193 goto out_free_active_sgl;
6194 }
6195
a93ff37a 6196 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5
JS
6197 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6198 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
6199 GFP_KERNEL);
6200 if (!phba->fcf.fcf_rr_bmask) {
6201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6202 "2759 Failed allocate memory for FCF round "
6203 "robin failover bmask\n");
0558056c 6204 rc = -ENOMEM;
0c9ab6f5
JS
6205 goto out_remove_rpi_hdrs;
6206 }
6207
895427bd
JS
6208 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
6209 sizeof(struct lpfc_hba_eq_hdl),
6210 GFP_KERNEL);
6211 if (!phba->sli4_hba.hba_eq_hdl) {
67d12733
JS
6212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6213 "2572 Failed allocate memory for "
6214 "fast-path per-EQ handle array\n");
6215 rc = -ENOMEM;
6216 goto out_free_fcf_rr_bmask;
da0436e9
JS
6217 }
6218
895427bd
JS
6219 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6220 sizeof(struct lpfc_vector_map_info),
6221 GFP_KERNEL);
7bb03bbf
JS
6222 if (!phba->sli4_hba.cpu_map) {
6223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6224 "3327 Failed allocate memory for msi-x "
6225 "interrupt vector mapping\n");
6226 rc = -ENOMEM;
895427bd 6227 goto out_free_hba_eq_hdl;
7bb03bbf 6228 }
b246de17 6229 if (lpfc_used_cpu == NULL) {
895427bd
JS
6230 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
6231 GFP_KERNEL);
b246de17
JS
6232 if (!lpfc_used_cpu) {
6233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6234 "3335 Failed allocate memory for msi-x "
6235 "interrupt vector mapping\n");
6236 kfree(phba->sli4_hba.cpu_map);
6237 rc = -ENOMEM;
895427bd 6238 goto out_free_hba_eq_hdl;
b246de17
JS
6239 }
6240 for (i = 0; i < lpfc_present_cpu; i++)
6241 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
6242 }
6243
912e3acd
JS
6244 /*
6245 * Enable sr-iov virtual functions if supported and configured
6246 * through the module parameter.
6247 */
6248 if (phba->cfg_sriov_nr_virtfn > 0) {
6249 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6250 phba->cfg_sriov_nr_virtfn);
6251 if (rc) {
6252 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6253 "3020 Requested number of SR-IOV "
6254 "virtual functions (%d) is not "
6255 "supported\n",
6256 phba->cfg_sriov_nr_virtfn);
6257 phba->cfg_sriov_nr_virtfn = 0;
6258 }
6259 }
6260
5248a749 6261 return 0;
da0436e9 6262
895427bd
JS
6263out_free_hba_eq_hdl:
6264 kfree(phba->sli4_hba.hba_eq_hdl);
0c9ab6f5
JS
6265out_free_fcf_rr_bmask:
6266 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
6267out_remove_rpi_hdrs:
6268 lpfc_sli4_remove_rpi_hdrs(phba);
6269out_free_active_sgl:
6270 lpfc_free_active_sgl(phba);
da0436e9
JS
6271out_destroy_cq_event_pool:
6272 lpfc_sli4_cq_event_pool_destroy(phba);
da0436e9
JS
6273out_free_bsmbx:
6274 lpfc_destroy_bootstrap_mbox(phba);
6275out_free_mem:
6276 lpfc_mem_free(phba);
6277 return rc;
6278}
6279
6280/**
6281 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6282 * @phba: pointer to lpfc hba data structure.
6283 *
6284 * This routine is invoked to unset the driver internal resources set up
6285 * specific for supporting the SLI-4 HBA device it attached to.
6286 **/
6287static void
6288lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6289{
6290 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6291
7bb03bbf
JS
6292 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6293 kfree(phba->sli4_hba.cpu_map);
6294 phba->sli4_hba.num_present_cpu = 0;
6295 phba->sli4_hba.num_online_cpu = 0;
76fd07a6 6296 phba->sli4_hba.curr_disp_cpu = 0;
7bb03bbf 6297
da0436e9 6298 /* Free memory allocated for fast-path work queue handles */
895427bd 6299 kfree(phba->sli4_hba.hba_eq_hdl);
da0436e9
JS
6300
6301 /* Free the allocated rpi headers. */
6302 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 6303 lpfc_sli4_remove_rpis(phba);
da0436e9 6304
0c9ab6f5
JS
6305 /* Free eligible FCF index bmask */
6306 kfree(phba->fcf.fcf_rr_bmask);
6307
da0436e9
JS
6308 /* Free the ELS sgl list */
6309 lpfc_free_active_sgl(phba);
8a9d2e80 6310 lpfc_free_els_sgl_list(phba);
f358dd0c 6311 lpfc_free_nvmet_sgl_list(phba);
da0436e9 6312
da0436e9
JS
6313 /* Free the completion queue EQ event pool */
6314 lpfc_sli4_cq_event_release_all(phba);
6315 lpfc_sli4_cq_event_pool_destroy(phba);
6316
6d368e53
JS
6317 /* Release resource identifiers. */
6318 lpfc_sli4_dealloc_resource_identifiers(phba);
6319
da0436e9
JS
6320 /* Free the bsmbx region. */
6321 lpfc_destroy_bootstrap_mbox(phba);
6322
6323 /* Free the SLI Layer memory with SLI4 HBAs */
6324 lpfc_mem_free_all(phba);
6325
6326 /* Free the current connect table */
6327 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
6328 &phba->fcf_conn_rec_list, list) {
6329 list_del_init(&conn_entry->list);
da0436e9 6330 kfree(conn_entry);
4d9ab994 6331 }
da0436e9
JS
6332
6333 return;
6334}
6335
6336/**
25985edc 6337 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
6338 * @phba: The hba struct for which this call is being executed.
6339 * @dev_grp: The HBA PCI-Device group number.
6340 *
6341 * This routine sets up the device INIT interface API function jump table
6342 * in @phba struct.
6343 *
6344 * Returns: 0 - success, -ENODEV - failure.
6345 **/
6346int
6347lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6348{
84d1b006
JS
6349 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6350 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 6351 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
6352 switch (dev_grp) {
6353 case LPFC_PCI_DEV_LP:
6354 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6355 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6356 phba->lpfc_stop_port = lpfc_stop_port_s3;
6357 break;
6358 case LPFC_PCI_DEV_OC:
6359 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6360 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6361 phba->lpfc_stop_port = lpfc_stop_port_s4;
6362 break;
6363 default:
6364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6365 "1431 Invalid HBA PCI-device group: 0x%x\n",
6366 dev_grp);
6367 return -ENODEV;
6368 break;
6369 }
6370 return 0;
6371}
6372
da0436e9
JS
6373/**
6374 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6375 * @phba: pointer to lpfc hba data structure.
6376 *
6377 * This routine is invoked to set up the driver internal resources after the
6378 * device specific resource setup to support the HBA device it attached to.
6379 *
6380 * Return codes
af901ca1 6381 * 0 - successful
da0436e9
JS
6382 * other values - error
6383 **/
6384static int
6385lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6386{
6387 int error;
6388
6389 /* Startup the kernel thread for this host adapter. */
6390 phba->worker_thread = kthread_run(lpfc_do_work, phba,
6391 "lpfc_worker_%d", phba->brd_no);
6392 if (IS_ERR(phba->worker_thread)) {
6393 error = PTR_ERR(phba->worker_thread);
6394 return error;
3772a991
JS
6395 }
6396
f485c18d
DK
6397 /* workqueue for deferred irq use */
6398 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6399
3772a991
JS
6400 return 0;
6401}
6402
6403/**
6404 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
6405 * @phba: pointer to lpfc hba data structure.
6406 *
6407 * This routine is invoked to unset the driver internal resources set up after
6408 * the device specific resource setup for supporting the HBA device it
6409 * attached to.
6410 **/
6411static void
6412lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6413{
f485c18d
DK
6414 if (phba->wq) {
6415 flush_workqueue(phba->wq);
6416 destroy_workqueue(phba->wq);
6417 phba->wq = NULL;
6418 }
6419
3772a991
JS
6420 /* Stop kernel worker thread */
6421 kthread_stop(phba->worker_thread);
6422}
6423
6424/**
6425 * lpfc_free_iocb_list - Free iocb list.
6426 * @phba: pointer to lpfc hba data structure.
6427 *
6428 * This routine is invoked to free the driver's IOCB list and memory.
6429 **/
6c621a22 6430void
3772a991
JS
6431lpfc_free_iocb_list(struct lpfc_hba *phba)
6432{
6433 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
6434
6435 spin_lock_irq(&phba->hbalock);
6436 list_for_each_entry_safe(iocbq_entry, iocbq_next,
6437 &phba->lpfc_iocb_list, list) {
6438 list_del(&iocbq_entry->list);
6439 kfree(iocbq_entry);
6440 phba->total_iocbq_bufs--;
98c9ea5c 6441 }
3772a991
JS
6442 spin_unlock_irq(&phba->hbalock);
6443
6444 return;
6445}
6446
6447/**
6448 * lpfc_init_iocb_list - Allocate and initialize iocb list.
6449 * @phba: pointer to lpfc hba data structure.
6450 *
6451 * This routine is invoked to allocate and initizlize the driver's IOCB
6452 * list and set up the IOCB tag array accordingly.
6453 *
6454 * Return codes
af901ca1 6455 * 0 - successful
3772a991
JS
6456 * other values - error
6457 **/
6c621a22 6458int
3772a991
JS
6459lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6460{
6461 struct lpfc_iocbq *iocbq_entry = NULL;
6462 uint16_t iotag;
6463 int i;
dea3101e 6464
6465 /* Initialize and populate the iocb list per host. */
6466 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 6467 for (i = 0; i < iocb_count; i++) {
dd00cc48 6468 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 6469 if (iocbq_entry == NULL) {
6470 printk(KERN_ERR "%s: only allocated %d iocbs of "
6471 "expected %d count. Unloading driver.\n",
cadbd4a5 6472 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e 6473 goto out_free_iocbq;
6474 }
6475
604a3e30
JB
6476 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
6477 if (iotag == 0) {
3772a991 6478 kfree(iocbq_entry);
604a3e30 6479 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 6480 "Unloading driver.\n", __func__);
604a3e30
JB
6481 goto out_free_iocbq;
6482 }
6d368e53 6483 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 6484 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
6485
6486 spin_lock_irq(&phba->hbalock);
dea3101e 6487 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
6488 phba->total_iocbq_bufs++;
2e0fef85 6489 spin_unlock_irq(&phba->hbalock);
dea3101e 6490 }
6491
3772a991 6492 return 0;
dea3101e 6493
3772a991
JS
6494out_free_iocbq:
6495 lpfc_free_iocb_list(phba);
dea3101e 6496
3772a991
JS
6497 return -ENOMEM;
6498}
5e9d9b82 6499
3772a991 6500/**
8a9d2e80 6501 * lpfc_free_sgl_list - Free a given sgl list.
da0436e9 6502 * @phba: pointer to lpfc hba data structure.
8a9d2e80 6503 * @sglq_list: pointer to the head of sgl list.
3772a991 6504 *
8a9d2e80 6505 * This routine is invoked to free a give sgl list and memory.
3772a991 6506 **/
8a9d2e80
JS
6507void
6508lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
3772a991 6509{
da0436e9 6510 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8a9d2e80
JS
6511
6512 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
6513 list_del(&sglq_entry->list);
6514 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
6515 kfree(sglq_entry);
6516 }
6517}
6518
6519/**
6520 * lpfc_free_els_sgl_list - Free els sgl list.
6521 * @phba: pointer to lpfc hba data structure.
6522 *
6523 * This routine is invoked to free the driver's els sgl list and memory.
6524 **/
6525static void
6526lpfc_free_els_sgl_list(struct lpfc_hba *phba)
6527{
da0436e9 6528 LIST_HEAD(sglq_list);
dea3101e 6529
8a9d2e80 6530 /* Retrieve all els sgls from driver list */
da0436e9 6531 spin_lock_irq(&phba->hbalock);
895427bd
JS
6532 spin_lock(&phba->sli4_hba.sgl_list_lock);
6533 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
6534 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9 6535 spin_unlock_irq(&phba->hbalock);
dea3101e 6536
8a9d2e80
JS
6537 /* Now free the sgl list */
6538 lpfc_free_sgl_list(phba, &sglq_list);
da0436e9 6539}
92d7f7b0 6540
f358dd0c
JS
6541/**
6542 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
6543 * @phba: pointer to lpfc hba data structure.
6544 *
6545 * This routine is invoked to free the driver's nvmet sgl list and memory.
6546 **/
6547static void
6548lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
6549{
6550 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6551 LIST_HEAD(sglq_list);
6552
6553 /* Retrieve all nvmet sgls from driver list */
6554 spin_lock_irq(&phba->hbalock);
6555 spin_lock(&phba->sli4_hba.sgl_list_lock);
6556 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
6557 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6558 spin_unlock_irq(&phba->hbalock);
6559
6560 /* Now free the sgl list */
6561 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
6562 list_del(&sglq_entry->list);
6563 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
6564 kfree(sglq_entry);
6565 }
4b40d02b
DK
6566
6567 /* Update the nvmet_xri_cnt to reflect no current sgls.
6568 * The next initialization cycle sets the count and allocates
6569 * the sgls over again.
6570 */
6571 phba->sli4_hba.nvmet_xri_cnt = 0;
f358dd0c
JS
6572}
6573
da0436e9
JS
6574/**
6575 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
6576 * @phba: pointer to lpfc hba data structure.
6577 *
6578 * This routine is invoked to allocate the driver's active sgl memory.
6579 * This array will hold the sglq_entry's for active IOs.
6580 **/
6581static int
6582lpfc_init_active_sgl_array(struct lpfc_hba *phba)
6583{
6584 int size;
6585 size = sizeof(struct lpfc_sglq *);
6586 size *= phba->sli4_hba.max_cfg_param.max_xri;
6587
6588 phba->sli4_hba.lpfc_sglq_active_list =
6589 kzalloc(size, GFP_KERNEL);
6590 if (!phba->sli4_hba.lpfc_sglq_active_list)
6591 return -ENOMEM;
6592 return 0;
3772a991
JS
6593}
6594
6595/**
da0436e9 6596 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
6597 * @phba: pointer to lpfc hba data structure.
6598 *
da0436e9
JS
6599 * This routine is invoked to walk through the array of active sglq entries
6600 * and free all of the resources.
6601 * This is just a place holder for now.
3772a991
JS
6602 **/
6603static void
da0436e9 6604lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 6605{
da0436e9 6606 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
6607}
6608
6609/**
da0436e9 6610 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
6611 * @phba: pointer to lpfc hba data structure.
6612 *
da0436e9
JS
6613 * This routine is invoked to allocate and initizlize the driver's sgl
6614 * list and set up the sgl xritag tag array accordingly.
3772a991 6615 *
3772a991 6616 **/
8a9d2e80 6617static void
da0436e9 6618lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 6619{
da0436e9 6620 /* Initialize and populate the sglq list per host/VF. */
895427bd 6621 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
da0436e9 6622 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 6623 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
86c67379 6624 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
da0436e9 6625
8a9d2e80
JS
6626 /* els xri-sgl book keeping */
6627 phba->sli4_hba.els_xri_cnt = 0;
0ff10d46 6628
8a9d2e80 6629 /* scsi xri-buffer book keeping */
da0436e9 6630 phba->sli4_hba.scsi_xri_cnt = 0;
895427bd
JS
6631
6632 /* nvme xri-buffer book keeping */
6633 phba->sli4_hba.nvme_xri_cnt = 0;
da0436e9
JS
6634}
6635
6636/**
6637 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
6638 * @phba: pointer to lpfc hba data structure.
6639 *
6640 * This routine is invoked to post rpi header templates to the
88a2cfbb 6641 * port for those SLI4 ports that do not support extents. This routine
da0436e9 6642 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
6643 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
6644 * and should be called only when interrupts are disabled.
da0436e9
JS
6645 *
6646 * Return codes
af901ca1 6647 * 0 - successful
88a2cfbb 6648 * -ERROR - otherwise.
da0436e9
JS
6649 **/
6650int
6651lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
6652{
6653 int rc = 0;
da0436e9
JS
6654 struct lpfc_rpi_hdr *rpi_hdr;
6655
6656 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
ff78d8f9 6657 if (!phba->sli4_hba.rpi_hdrs_in_use)
6d368e53 6658 return rc;
6d368e53
JS
6659 if (phba->sli4_hba.extents_in_use)
6660 return -EIO;
da0436e9
JS
6661
6662 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
6663 if (!rpi_hdr) {
6664 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6665 "0391 Error during rpi post operation\n");
6666 lpfc_sli4_remove_rpis(phba);
6667 rc = -ENODEV;
6668 }
6669
6670 return rc;
6671}
6672
6673/**
6674 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
6675 * @phba: pointer to lpfc hba data structure.
6676 *
6677 * This routine is invoked to allocate a single 4KB memory region to
6678 * support rpis and stores them in the phba. This single region
6679 * provides support for up to 64 rpis. The region is used globally
6680 * by the device.
6681 *
6682 * Returns:
6683 * A valid rpi hdr on success.
6684 * A NULL pointer on any failure.
6685 **/
6686struct lpfc_rpi_hdr *
6687lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6688{
6689 uint16_t rpi_limit, curr_rpi_range;
6690 struct lpfc_dmabuf *dmabuf;
6691 struct lpfc_rpi_hdr *rpi_hdr;
6692
6d368e53
JS
6693 /*
6694 * If the SLI4 port supports extents, posting the rpi header isn't
6695 * required. Set the expected maximum count and let the actual value
6696 * get set when extents are fully allocated.
6697 */
6698 if (!phba->sli4_hba.rpi_hdrs_in_use)
6699 return NULL;
6700 if (phba->sli4_hba.extents_in_use)
6701 return NULL;
6702
6703 /* The limit on the logical index is just the max_rpi count. */
845d9e8d 6704 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
da0436e9
JS
6705
6706 spin_lock_irq(&phba->hbalock);
6d368e53
JS
6707 /*
6708 * Establish the starting RPI in this header block. The starting
6709 * rpi is normalized to a zero base because the physical rpi is
6710 * port based.
6711 */
97f2ecf1 6712 curr_rpi_range = phba->sli4_hba.next_rpi;
da0436e9
JS
6713 spin_unlock_irq(&phba->hbalock);
6714
845d9e8d
JS
6715 /* Reached full RPI range */
6716 if (curr_rpi_range == rpi_limit)
6d368e53 6717 return NULL;
845d9e8d 6718
da0436e9
JS
6719 /*
6720 * First allocate the protocol header region for the port. The
6721 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6722 */
6723 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6724 if (!dmabuf)
6725 return NULL;
6726
1aee383d
JP
6727 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6728 LPFC_HDR_TEMPLATE_SIZE,
6729 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
6730 if (!dmabuf->virt) {
6731 rpi_hdr = NULL;
6732 goto err_free_dmabuf;
6733 }
6734
da0436e9
JS
6735 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6736 rpi_hdr = NULL;
6737 goto err_free_coherent;
6738 }
6739
6740 /* Save the rpi header data for cleanup later. */
6741 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6742 if (!rpi_hdr)
6743 goto err_free_coherent;
6744
6745 rpi_hdr->dmabuf = dmabuf;
6746 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6747 rpi_hdr->page_count = 1;
6748 spin_lock_irq(&phba->hbalock);
6d368e53
JS
6749
6750 /* The rpi_hdr stores the logical index only. */
6751 rpi_hdr->start_rpi = curr_rpi_range;
845d9e8d 6752 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
da0436e9
JS
6753 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6754
da0436e9
JS
6755 spin_unlock_irq(&phba->hbalock);
6756 return rpi_hdr;
6757
6758 err_free_coherent:
6759 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6760 dmabuf->virt, dmabuf->phys);
6761 err_free_dmabuf:
6762 kfree(dmabuf);
6763 return NULL;
6764}
6765
6766/**
6767 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6768 * @phba: pointer to lpfc hba data structure.
6769 *
6770 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
6771 * to support rpis for SLI4 ports not supporting extents. This routine
6772 * presumes the caller has released all rpis consumed by fabric or port
6773 * logins and is prepared to have the header pages removed.
da0436e9
JS
6774 **/
6775void
6776lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6777{
6778 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6779
6d368e53
JS
6780 if (!phba->sli4_hba.rpi_hdrs_in_use)
6781 goto exit;
6782
da0436e9
JS
6783 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6784 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6785 list_del(&rpi_hdr->list);
6786 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6787 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6788 kfree(rpi_hdr->dmabuf);
6789 kfree(rpi_hdr);
6790 }
6d368e53
JS
6791 exit:
6792 /* There are no rpis available to the port now. */
6793 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
6794}
6795
6796/**
6797 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6798 * @pdev: pointer to pci device data structure.
6799 *
6800 * This routine is invoked to allocate the driver hba data structure for an
6801 * HBA device. If the allocation is successful, the phba reference to the
6802 * PCI device data structure is set.
6803 *
6804 * Return codes
af901ca1 6805 * pointer to @phba - successful
da0436e9
JS
6806 * NULL - error
6807 **/
6808static struct lpfc_hba *
6809lpfc_hba_alloc(struct pci_dev *pdev)
6810{
6811 struct lpfc_hba *phba;
6812
6813 /* Allocate memory for HBA structure */
6814 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6815 if (!phba) {
e34ccdfe 6816 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
6817 return NULL;
6818 }
6819
6820 /* Set reference to PCI device in HBA structure */
6821 phba->pcidev = pdev;
6822
6823 /* Assign an unused board number */
6824 phba->brd_no = lpfc_get_instance();
6825 if (phba->brd_no < 0) {
6826 kfree(phba);
6827 return NULL;
6828 }
65791f1f 6829 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
da0436e9 6830
4fede78f 6831 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
6832 INIT_LIST_HEAD(&phba->ct_ev_waiters);
6833
da0436e9
JS
6834 return phba;
6835}
6836
6837/**
6838 * lpfc_hba_free - Free driver hba data structure with a device.
6839 * @phba: pointer to lpfc hba data structure.
6840 *
6841 * This routine is invoked to free the driver hba data structure with an
6842 * HBA device.
6843 **/
6844static void
6845lpfc_hba_free(struct lpfc_hba *phba)
6846{
6847 /* Release the driver assigned board number */
6848 idr_remove(&lpfc_hba_index, phba->brd_no);
6849
895427bd
JS
6850 /* Free memory allocated with sli3 rings */
6851 kfree(phba->sli.sli3_ring);
6852 phba->sli.sli3_ring = NULL;
2a76a283 6853
da0436e9
JS
6854 kfree(phba);
6855 return;
6856}
6857
6858/**
6859 * lpfc_create_shost - Create hba physical port with associated scsi host.
6860 * @phba: pointer to lpfc hba data structure.
6861 *
6862 * This routine is invoked to create HBA physical port and associate a SCSI
6863 * host with it.
6864 *
6865 * Return codes
af901ca1 6866 * 0 - successful
da0436e9
JS
6867 * other values - error
6868 **/
6869static int
6870lpfc_create_shost(struct lpfc_hba *phba)
6871{
6872 struct lpfc_vport *vport;
6873 struct Scsi_Host *shost;
6874
6875 /* Initialize HBA FC structure */
6876 phba->fc_edtov = FF_DEF_EDTOV;
6877 phba->fc_ratov = FF_DEF_RATOV;
6878 phba->fc_altov = FF_DEF_ALTOV;
6879 phba->fc_arbtov = FF_DEF_ARBTOV;
6880
d7c47992 6881 atomic_set(&phba->sdev_cnt, 0);
2cee7808
JS
6882 atomic_set(&phba->fc4ScsiInputRequests, 0);
6883 atomic_set(&phba->fc4ScsiOutputRequests, 0);
6884 atomic_set(&phba->fc4ScsiControlRequests, 0);
6885 atomic_set(&phba->fc4ScsiIoCmpls, 0);
6886 atomic_set(&phba->fc4NvmeInputRequests, 0);
6887 atomic_set(&phba->fc4NvmeOutputRequests, 0);
6888 atomic_set(&phba->fc4NvmeControlRequests, 0);
6889 atomic_set(&phba->fc4NvmeIoCmpls, 0);
6890 atomic_set(&phba->fc4NvmeLsRequests, 0);
6891 atomic_set(&phba->fc4NvmeLsCmpls, 0);
da0436e9
JS
6892 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6893 if (!vport)
6894 return -ENODEV;
6895
6896 shost = lpfc_shost_from_vport(vport);
6897 phba->pport = vport;
2ea259ee 6898
f358dd0c
JS
6899 if (phba->nvmet_support) {
6900 /* Only 1 vport (pport) will support NVME target */
6901 if (phba->txrdy_payload_pool == NULL) {
771db5c0
RP
6902 phba->txrdy_payload_pool = dma_pool_create(
6903 "txrdy_pool", &phba->pcidev->dev,
f358dd0c
JS
6904 TXRDY_PAYLOAD_LEN, 16, 0);
6905 if (phba->txrdy_payload_pool) {
6906 phba->targetport = NULL;
6907 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
6908 lpfc_printf_log(phba, KERN_INFO,
6909 LOG_INIT | LOG_NVME_DISC,
6910 "6076 NVME Target Found\n");
6911 }
6912 }
6913 }
6914
da0436e9
JS
6915 lpfc_debugfs_initialize(vport);
6916 /* Put reference to SCSI host to driver's device private data */
6917 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 6918
4258e98e
JS
6919 /*
6920 * At this point we are fully registered with PSA. In addition,
6921 * any initial discovery should be completed.
6922 */
6923 vport->load_flag |= FC_ALLOW_FDMI;
8663cbbe
JS
6924 if (phba->cfg_enable_SmartSAN ||
6925 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
6926
6927 /* Setup appropriate attribute masks */
6928 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 6929 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
6930 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
6931 else
6932 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
6933 }
3772a991
JS
6934 return 0;
6935}
db2378e0 6936
3772a991
JS
6937/**
6938 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6939 * @phba: pointer to lpfc hba data structure.
6940 *
6941 * This routine is invoked to destroy HBA physical port and the associated
6942 * SCSI host.
6943 **/
6944static void
6945lpfc_destroy_shost(struct lpfc_hba *phba)
6946{
6947 struct lpfc_vport *vport = phba->pport;
6948
6949 /* Destroy physical port that associated with the SCSI host */
6950 destroy_port(vport);
6951
6952 return;
6953}
6954
6955/**
6956 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6957 * @phba: pointer to lpfc hba data structure.
6958 * @shost: the shost to be used to detect Block guard settings.
6959 *
6960 * This routine sets up the local Block guard protocol settings for @shost.
6961 * This routine also allocates memory for debugging bg buffers.
6962 **/
6963static void
6964lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6965{
bbeb79b9
JS
6966 uint32_t old_mask;
6967 uint32_t old_guard;
6968
3772a991 6969 int pagecnt = 10;
b3b98b74 6970 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
3772a991
JS
6971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972 "1478 Registering BlockGuard with the "
6973 "SCSI layer\n");
bbeb79b9 6974
b3b98b74
JS
6975 old_mask = phba->cfg_prot_mask;
6976 old_guard = phba->cfg_prot_guard;
bbeb79b9
JS
6977
6978 /* Only allow supported values */
b3b98b74 6979 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
bbeb79b9
JS
6980 SHOST_DIX_TYPE0_PROTECTION |
6981 SHOST_DIX_TYPE1_PROTECTION);
b3b98b74
JS
6982 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
6983 SHOST_DIX_GUARD_CRC);
bbeb79b9
JS
6984
6985 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
b3b98b74
JS
6986 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6987 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
bbeb79b9 6988
b3b98b74
JS
6989 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6990 if ((old_mask != phba->cfg_prot_mask) ||
6991 (old_guard != phba->cfg_prot_guard))
bbeb79b9
JS
6992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6993 "1475 Registering BlockGuard with the "
6994 "SCSI layer: mask %d guard %d\n",
b3b98b74
JS
6995 phba->cfg_prot_mask,
6996 phba->cfg_prot_guard);
bbeb79b9 6997
b3b98b74
JS
6998 scsi_host_set_prot(shost, phba->cfg_prot_mask);
6999 scsi_host_set_guard(shost, phba->cfg_prot_guard);
bbeb79b9
JS
7000 } else
7001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7002 "1479 Not Registering BlockGuard with the SCSI "
7003 "layer, Bad protection parameters: %d %d\n",
7004 old_mask, old_guard);
3772a991 7005 }
bbeb79b9 7006
3772a991
JS
7007 if (!_dump_buf_data) {
7008 while (pagecnt) {
7009 spin_lock_init(&_dump_buf_lock);
7010 _dump_buf_data =
7011 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7012 if (_dump_buf_data) {
6a9c52cf
JS
7013 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7014 "9043 BLKGRD: allocated %d pages for "
3772a991
JS
7015 "_dump_buf_data at 0x%p\n",
7016 (1 << pagecnt), _dump_buf_data);
7017 _dump_buf_data_order = pagecnt;
7018 memset(_dump_buf_data, 0,
7019 ((1 << PAGE_SHIFT) << pagecnt));
7020 break;
7021 } else
7022 --pagecnt;
7023 }
7024 if (!_dump_buf_data_order)
6a9c52cf
JS
7025 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7026 "9044 BLKGRD: ERROR unable to allocate "
3772a991
JS
7027 "memory for hexdump\n");
7028 } else
6a9c52cf
JS
7029 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7030 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
3772a991
JS
7031 "\n", _dump_buf_data);
7032 if (!_dump_buf_dif) {
7033 while (pagecnt) {
7034 _dump_buf_dif =
7035 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7036 if (_dump_buf_dif) {
6a9c52cf
JS
7037 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7038 "9046 BLKGRD: allocated %d pages for "
3772a991
JS
7039 "_dump_buf_dif at 0x%p\n",
7040 (1 << pagecnt), _dump_buf_dif);
7041 _dump_buf_dif_order = pagecnt;
7042 memset(_dump_buf_dif, 0,
7043 ((1 << PAGE_SHIFT) << pagecnt));
7044 break;
7045 } else
7046 --pagecnt;
7047 }
7048 if (!_dump_buf_dif_order)
6a9c52cf
JS
7049 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7050 "9047 BLKGRD: ERROR unable to allocate "
3772a991
JS
7051 "memory for hexdump\n");
7052 } else
6a9c52cf
JS
7053 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7054 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
3772a991
JS
7055 _dump_buf_dif);
7056}
7057
7058/**
7059 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7060 * @phba: pointer to lpfc hba data structure.
7061 *
7062 * This routine is invoked to perform all the necessary post initialization
7063 * setup for the device.
7064 **/
7065static void
7066lpfc_post_init_setup(struct lpfc_hba *phba)
7067{
7068 struct Scsi_Host *shost;
7069 struct lpfc_adapter_event_header adapter_event;
7070
7071 /* Get the default values for Model Name and Description */
7072 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7073
7074 /*
7075 * hba setup may have changed the hba_queue_depth so we need to
7076 * adjust the value of can_queue.
7077 */
7078 shost = pci_get_drvdata(phba->pcidev);
7079 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7080 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
7081 lpfc_setup_bg(phba, shost);
7082
7083 lpfc_host_attrib_init(shost);
7084
7085 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7086 spin_lock_irq(shost->host_lock);
7087 lpfc_poll_start_timer(phba);
7088 spin_unlock_irq(shost->host_lock);
7089 }
7090
7091 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7092 "0428 Perform SCSI scan\n");
7093 /* Send board arrival event to upper layer */
7094 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7095 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7096 fc_host_post_vendor_event(shost, fc_get_event_number(),
7097 sizeof(adapter_event),
7098 (char *) &adapter_event,
7099 LPFC_NL_VENDOR_ID);
7100 return;
7101}
7102
7103/**
7104 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7105 * @phba: pointer to lpfc hba data structure.
7106 *
7107 * This routine is invoked to set up the PCI device memory space for device
7108 * with SLI-3 interface spec.
7109 *
7110 * Return codes
af901ca1 7111 * 0 - successful
3772a991
JS
7112 * other values - error
7113 **/
7114static int
7115lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7116{
7117 struct pci_dev *pdev;
7118 unsigned long bar0map_len, bar2map_len;
7119 int i, hbq_count;
7120 void *ptr;
7121 int error = -ENODEV;
7122
7123 /* Obtain PCI device reference */
7124 if (!phba->pcidev)
7125 return error;
7126 else
7127 pdev = phba->pcidev;
7128
7129 /* Set the device DMA mask size */
8e68597d
MR
7130 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7131 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7132 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7133 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
3772a991 7134 return error;
8e68597d
MR
7135 }
7136 }
3772a991
JS
7137
7138 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7139 * required by each mapping.
7140 */
7141 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7142 bar0map_len = pci_resource_len(pdev, 0);
7143
7144 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7145 bar2map_len = pci_resource_len(pdev, 2);
7146
7147 /* Map HBA SLIM to a kernel virtual address. */
7148 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7149 if (!phba->slim_memmap_p) {
7150 dev_printk(KERN_ERR, &pdev->dev,
7151 "ioremap failed for SLIM memory.\n");
7152 goto out;
7153 }
7154
7155 /* Map HBA Control Registers to a kernel virtual address. */
7156 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7157 if (!phba->ctrl_regs_memmap_p) {
7158 dev_printk(KERN_ERR, &pdev->dev,
7159 "ioremap failed for HBA control registers.\n");
7160 goto out_iounmap_slim;
7161 }
7162
7163 /* Allocate memory for SLI-2 structures */
1aee383d
JP
7164 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7165 &phba->slim2p.phys, GFP_KERNEL);
3772a991
JS
7166 if (!phba->slim2p.virt)
7167 goto out_iounmap;
7168
3772a991 7169 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
7170 phba->mbox_ext = (phba->slim2p.virt +
7171 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
7172 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7173 phba->IOCBs = (phba->slim2p.virt +
7174 offsetof(struct lpfc_sli2_slim, IOCBs));
7175
7176 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7177 lpfc_sli_hbq_size(),
7178 &phba->hbqslimp.phys,
7179 GFP_KERNEL);
7180 if (!phba->hbqslimp.virt)
7181 goto out_free_slim;
7182
7183 hbq_count = lpfc_sli_hbq_count();
7184 ptr = phba->hbqslimp.virt;
7185 for (i = 0; i < hbq_count; ++i) {
7186 phba->hbqs[i].hbq_virt = ptr;
7187 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7188 ptr += (lpfc_hbq_defs[i]->entry_count *
7189 sizeof(struct lpfc_hbq_entry));
7190 }
7191 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7192 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7193
7194 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7195
3772a991
JS
7196 phba->MBslimaddr = phba->slim_memmap_p;
7197 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7198 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7199 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7200 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7201
7202 return 0;
7203
7204out_free_slim:
7205 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7206 phba->slim2p.virt, phba->slim2p.phys);
7207out_iounmap:
7208 iounmap(phba->ctrl_regs_memmap_p);
7209out_iounmap_slim:
7210 iounmap(phba->slim_memmap_p);
7211out:
7212 return error;
7213}
7214
7215/**
7216 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7217 * @phba: pointer to lpfc hba data structure.
7218 *
7219 * This routine is invoked to unset the PCI device memory space for device
7220 * with SLI-3 interface spec.
7221 **/
7222static void
7223lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7224{
7225 struct pci_dev *pdev;
7226
7227 /* Obtain PCI device reference */
7228 if (!phba->pcidev)
7229 return;
7230 else
7231 pdev = phba->pcidev;
7232
7233 /* Free coherent DMA memory allocated */
7234 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7235 phba->hbqslimp.virt, phba->hbqslimp.phys);
7236 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7237 phba->slim2p.virt, phba->slim2p.phys);
7238
7239 /* I/O memory unmap */
7240 iounmap(phba->ctrl_regs_memmap_p);
7241 iounmap(phba->slim_memmap_p);
7242
7243 return;
7244}
7245
7246/**
da0436e9 7247 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
7248 * @phba: pointer to lpfc hba data structure.
7249 *
da0436e9
JS
7250 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7251 * done and check status.
3772a991 7252 *
da0436e9 7253 * Return 0 if successful, otherwise -ENODEV.
3772a991 7254 **/
da0436e9
JS
7255int
7256lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 7257{
2fcee4bf
JS
7258 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7259 struct lpfc_register reg_data;
7260 int i, port_error = 0;
7261 uint32_t if_type;
3772a991 7262
9940b97b
JS
7263 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7264 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 7265 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 7266 return -ENODEV;
3772a991 7267
da0436e9
JS
7268 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7269 for (i = 0; i < 3000; i++) {
9940b97b
JS
7270 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7271 &portsmphr_reg.word0) ||
7272 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 7273 /* Port has a fatal POST error, break out */
da0436e9
JS
7274 port_error = -ENODEV;
7275 break;
7276 }
2fcee4bf
JS
7277 if (LPFC_POST_STAGE_PORT_READY ==
7278 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 7279 break;
da0436e9 7280 msleep(10);
3772a991
JS
7281 }
7282
2fcee4bf
JS
7283 /*
7284 * If there was a port error during POST, then don't proceed with
7285 * other register reads as the data may not be valid. Just exit.
7286 */
7287 if (port_error) {
da0436e9 7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
7289 "1408 Port Failed POST - portsmphr=0x%x, "
7290 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7291 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7292 portsmphr_reg.word0,
7293 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7294 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7295 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7296 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7297 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7298 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7299 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7300 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7301 } else {
28baac74 7302 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
7303 "2534 Device Info: SLIFamily=0x%x, "
7304 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7305 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
7306 bf_get(lpfc_sli_intf_sli_family,
7307 &phba->sli4_hba.sli_intf),
7308 bf_get(lpfc_sli_intf_slirev,
7309 &phba->sli4_hba.sli_intf),
085c647c
JS
7310 bf_get(lpfc_sli_intf_if_type,
7311 &phba->sli4_hba.sli_intf),
7312 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 7313 &phba->sli4_hba.sli_intf),
085c647c
JS
7314 bf_get(lpfc_sli_intf_sli_hint2,
7315 &phba->sli4_hba.sli_intf),
7316 bf_get(lpfc_sli_intf_func_type,
28baac74 7317 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
7318 /*
7319 * Check for other Port errors during the initialization
7320 * process. Fail the load if the port did not come up
7321 * correctly.
7322 */
7323 if_type = bf_get(lpfc_sli_intf_if_type,
7324 &phba->sli4_hba.sli_intf);
7325 switch (if_type) {
7326 case LPFC_SLI_INTF_IF_TYPE_0:
7327 phba->sli4_hba.ue_mask_lo =
7328 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7329 phba->sli4_hba.ue_mask_hi =
7330 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7331 uerrlo_reg.word0 =
7332 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7333 uerrhi_reg.word0 =
7334 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7335 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7336 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7338 "1422 Unrecoverable Error "
7339 "Detected during POST "
7340 "uerr_lo_reg=0x%x, "
7341 "uerr_hi_reg=0x%x, "
7342 "ue_mask_lo_reg=0x%x, "
7343 "ue_mask_hi_reg=0x%x\n",
7344 uerrlo_reg.word0,
7345 uerrhi_reg.word0,
7346 phba->sli4_hba.ue_mask_lo,
7347 phba->sli4_hba.ue_mask_hi);
7348 port_error = -ENODEV;
7349 }
7350 break;
7351 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 7352 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf 7353 /* Final checks. The port status should be clean. */
9940b97b
JS
7354 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7355 &reg_data.word0) ||
0558056c
JS
7356 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7357 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
7358 phba->work_status[0] =
7359 readl(phba->sli4_hba.u.if_type2.
7360 ERR1regaddr);
7361 phba->work_status[1] =
7362 readl(phba->sli4_hba.u.if_type2.
7363 ERR2regaddr);
7364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8fcb8acd
JS
7365 "2888 Unrecoverable port error "
7366 "following POST: port status reg "
7367 "0x%x, port_smphr reg 0x%x, "
2fcee4bf
JS
7368 "error 1=0x%x, error 2=0x%x\n",
7369 reg_data.word0,
7370 portsmphr_reg.word0,
7371 phba->work_status[0],
7372 phba->work_status[1]);
7373 port_error = -ENODEV;
7374 }
7375 break;
7376 case LPFC_SLI_INTF_IF_TYPE_1:
7377 default:
7378 break;
7379 }
28baac74 7380 }
da0436e9
JS
7381 return port_error;
7382}
3772a991 7383
da0436e9
JS
7384/**
7385 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7386 * @phba: pointer to lpfc hba data structure.
2fcee4bf 7387 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
7388 *
7389 * This routine is invoked to set up SLI4 BAR0 PCI config space register
7390 * memory map.
7391 **/
7392static void
2fcee4bf
JS
7393lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7394{
7395 switch (if_type) {
7396 case LPFC_SLI_INTF_IF_TYPE_0:
7397 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7398 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7399 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7400 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7401 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7402 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7403 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7404 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7405 phba->sli4_hba.SLIINTFregaddr =
7406 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7407 break;
7408 case LPFC_SLI_INTF_IF_TYPE_2:
0cf07f84
JS
7409 phba->sli4_hba.u.if_type2.EQDregaddr =
7410 phba->sli4_hba.conf_regs_memmap_p +
7411 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
2fcee4bf 7412 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
7413 phba->sli4_hba.conf_regs_memmap_p +
7414 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 7415 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
7416 phba->sli4_hba.conf_regs_memmap_p +
7417 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 7418 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
7419 phba->sli4_hba.conf_regs_memmap_p +
7420 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 7421 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
7422 phba->sli4_hba.conf_regs_memmap_p +
7423 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
7424 phba->sli4_hba.SLIINTFregaddr =
7425 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7426 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
7427 phba->sli4_hba.conf_regs_memmap_p +
7428 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf 7429 phba->sli4_hba.RQDBregaddr =
962bc51b
JS
7430 phba->sli4_hba.conf_regs_memmap_p +
7431 LPFC_ULP0_RQ_DOORBELL;
2fcee4bf 7432 phba->sli4_hba.WQDBregaddr =
962bc51b
JS
7433 phba->sli4_hba.conf_regs_memmap_p +
7434 LPFC_ULP0_WQ_DOORBELL;
9dd35425 7435 phba->sli4_hba.CQDBregaddr =
2fcee4bf 7436 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9dd35425 7437 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
2fcee4bf
JS
7438 phba->sli4_hba.MQDBregaddr =
7439 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
7440 phba->sli4_hba.BMBXregaddr =
7441 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7442 break;
27d6ac0a
JS
7443 case LPFC_SLI_INTF_IF_TYPE_6:
7444 phba->sli4_hba.u.if_type2.EQDregaddr =
7445 phba->sli4_hba.conf_regs_memmap_p +
7446 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
7447 phba->sli4_hba.u.if_type2.ERR1regaddr =
7448 phba->sli4_hba.conf_regs_memmap_p +
7449 LPFC_CTL_PORT_ER1_OFFSET;
7450 phba->sli4_hba.u.if_type2.ERR2regaddr =
7451 phba->sli4_hba.conf_regs_memmap_p +
7452 LPFC_CTL_PORT_ER2_OFFSET;
7453 phba->sli4_hba.u.if_type2.CTRLregaddr =
7454 phba->sli4_hba.conf_regs_memmap_p +
7455 LPFC_CTL_PORT_CTL_OFFSET;
7456 phba->sli4_hba.u.if_type2.STATUSregaddr =
7457 phba->sli4_hba.conf_regs_memmap_p +
7458 LPFC_CTL_PORT_STA_OFFSET;
7459 phba->sli4_hba.PSMPHRregaddr =
7460 phba->sli4_hba.conf_regs_memmap_p +
7461 LPFC_CTL_PORT_SEM_OFFSET;
7462 phba->sli4_hba.BMBXregaddr =
7463 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7464 break;
2fcee4bf
JS
7465 case LPFC_SLI_INTF_IF_TYPE_1:
7466 default:
7467 dev_printk(KERN_ERR, &phba->pcidev->dev,
7468 "FATAL - unsupported SLI4 interface type - %d\n",
7469 if_type);
7470 break;
7471 }
da0436e9 7472}
3772a991 7473
da0436e9
JS
7474/**
7475 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
7476 * @phba: pointer to lpfc hba data structure.
7477 *
27d6ac0a 7478 * This routine is invoked to set up SLI4 BAR1 register memory map.
da0436e9
JS
7479 **/
7480static void
27d6ac0a 7481lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
da0436e9 7482{
27d6ac0a
JS
7483 switch (if_type) {
7484 case LPFC_SLI_INTF_IF_TYPE_0:
7485 phba->sli4_hba.PSMPHRregaddr =
7486 phba->sli4_hba.ctrl_regs_memmap_p +
7487 LPFC_SLIPORT_IF0_SMPHR;
7488 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7489 LPFC_HST_ISR0;
7490 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7491 LPFC_HST_IMR0;
7492 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7493 LPFC_HST_ISCR0;
7494 break;
7495 case LPFC_SLI_INTF_IF_TYPE_6:
7496 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7497 LPFC_IF6_RQ_DOORBELL;
7498 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7499 LPFC_IF6_WQ_DOORBELL;
7500 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7501 LPFC_IF6_CQ_DOORBELL;
7502 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7503 LPFC_IF6_EQ_DOORBELL;
7504 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7505 LPFC_IF6_MQ_DOORBELL;
7506 break;
7507 case LPFC_SLI_INTF_IF_TYPE_2:
7508 case LPFC_SLI_INTF_IF_TYPE_1:
7509 default:
7510 dev_err(&phba->pcidev->dev,
7511 "FATAL - unsupported SLI4 interface type - %d\n",
7512 if_type);
7513 break;
7514 }
3772a991
JS
7515}
7516
7517/**
da0436e9 7518 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 7519 * @phba: pointer to lpfc hba data structure.
da0436e9 7520 * @vf: virtual function number
3772a991 7521 *
da0436e9
JS
7522 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
7523 * based on the given viftual function number, @vf.
7524 *
7525 * Return 0 if successful, otherwise -ENODEV.
3772a991 7526 **/
da0436e9
JS
7527static int
7528lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 7529{
da0436e9
JS
7530 if (vf > LPFC_VIR_FUNC_MAX)
7531 return -ENODEV;
3772a991 7532
da0436e9 7533 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
7534 vf * LPFC_VFR_PAGE_SIZE +
7535 LPFC_ULP0_RQ_DOORBELL);
da0436e9 7536 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
7537 vf * LPFC_VFR_PAGE_SIZE +
7538 LPFC_ULP0_WQ_DOORBELL);
9dd35425
JS
7539 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7540 vf * LPFC_VFR_PAGE_SIZE +
7541 LPFC_EQCQ_DOORBELL);
7542 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
da0436e9
JS
7543 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7544 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
7545 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7546 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
7547 return 0;
3772a991
JS
7548}
7549
7550/**
da0436e9 7551 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
7552 * @phba: pointer to lpfc hba data structure.
7553 *
da0436e9
JS
7554 * This routine is invoked to create the bootstrap mailbox
7555 * region consistent with the SLI-4 interface spec. This
7556 * routine allocates all memory necessary to communicate
7557 * mailbox commands to the port and sets up all alignment
7558 * needs. No locks are expected to be held when calling
7559 * this routine.
3772a991
JS
7560 *
7561 * Return codes
af901ca1 7562 * 0 - successful
d439d286 7563 * -ENOMEM - could not allocated memory.
da0436e9 7564 **/
3772a991 7565static int
da0436e9 7566lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 7567{
da0436e9
JS
7568 uint32_t bmbx_size;
7569 struct lpfc_dmabuf *dmabuf;
7570 struct dma_address *dma_address;
7571 uint32_t pa_addr;
7572 uint64_t phys_addr;
7573
7574 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7575 if (!dmabuf)
7576 return -ENOMEM;
3772a991 7577
da0436e9
JS
7578 /*
7579 * The bootstrap mailbox region is comprised of 2 parts
7580 * plus an alignment restriction of 16 bytes.
7581 */
7582 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
1aee383d
JP
7583 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
7584 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
7585 if (!dmabuf->virt) {
7586 kfree(dmabuf);
7587 return -ENOMEM;
3772a991
JS
7588 }
7589
da0436e9
JS
7590 /*
7591 * Initialize the bootstrap mailbox pointers now so that the register
7592 * operations are simple later. The mailbox dma address is required
7593 * to be 16-byte aligned. Also align the virtual memory as each
7594 * maibox is copied into the bmbx mailbox region before issuing the
7595 * command to the port.
7596 */
7597 phba->sli4_hba.bmbx.dmabuf = dmabuf;
7598 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
7599
7600 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
7601 LPFC_ALIGN_16_BYTE);
7602 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
7603 LPFC_ALIGN_16_BYTE);
7604
7605 /*
7606 * Set the high and low physical addresses now. The SLI4 alignment
7607 * requirement is 16 bytes and the mailbox is posted to the port
7608 * as two 30-bit addresses. The other data is a bit marking whether
7609 * the 30-bit address is the high or low address.
7610 * Upcast bmbx aphys to 64bits so shift instruction compiles
7611 * clean on 32 bit machines.
7612 */
7613 dma_address = &phba->sli4_hba.bmbx.dma_address;
7614 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
7615 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
7616 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
7617 LPFC_BMBX_BIT1_ADDR_HI);
7618
7619 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
7620 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
7621 LPFC_BMBX_BIT1_ADDR_LO);
7622 return 0;
3772a991
JS
7623}
7624
7625/**
da0436e9 7626 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
7627 * @phba: pointer to lpfc hba data structure.
7628 *
da0436e9
JS
7629 * This routine is invoked to teardown the bootstrap mailbox
7630 * region and release all host resources. This routine requires
7631 * the caller to ensure all mailbox commands recovered, no
7632 * additional mailbox comands are sent, and interrupts are disabled
7633 * before calling this routine.
7634 *
7635 **/
3772a991 7636static void
da0436e9 7637lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 7638{
da0436e9
JS
7639 dma_free_coherent(&phba->pcidev->dev,
7640 phba->sli4_hba.bmbx.bmbx_size,
7641 phba->sli4_hba.bmbx.dmabuf->virt,
7642 phba->sli4_hba.bmbx.dmabuf->phys);
7643
7644 kfree(phba->sli4_hba.bmbx.dmabuf);
7645 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
7646}
7647
7648/**
da0436e9 7649 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
7650 * @phba: pointer to lpfc hba data structure.
7651 *
da0436e9
JS
7652 * This routine is invoked to read the configuration parameters from the HBA.
7653 * The configuration parameters are used to set the base and maximum values
7654 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
7655 * allocation for the port.
3772a991
JS
7656 *
7657 * Return codes
af901ca1 7658 * 0 - successful
25985edc 7659 * -ENOMEM - No available memory
d439d286 7660 * -EIO - The mailbox failed to complete successfully.
3772a991 7661 **/
ff78d8f9 7662int
da0436e9 7663lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 7664{
da0436e9
JS
7665 LPFC_MBOXQ_t *pmb;
7666 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
7667 union lpfc_sli4_cfg_shdr *shdr;
7668 uint32_t shdr_status, shdr_add_status;
7669 struct lpfc_mbx_get_func_cfg *get_func_cfg;
7670 struct lpfc_rsrc_desc_fcfcoe *desc;
8aa134a8 7671 char *pdesc_0;
c691816e
JS
7672 uint16_t forced_link_speed;
7673 uint32_t if_type;
8aa134a8 7674 int length, i, rc = 0, rc2;
3772a991 7675
da0436e9
JS
7676 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7677 if (!pmb) {
7678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7679 "2011 Unable to allocate memory for issuing "
7680 "SLI_CONFIG_SPECIAL mailbox command\n");
7681 return -ENOMEM;
3772a991
JS
7682 }
7683
da0436e9 7684 lpfc_read_config(phba, pmb);
3772a991 7685
da0436e9
JS
7686 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7687 if (rc != MBX_SUCCESS) {
7688 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7689 "2012 Mailbox failed , mbxCmd x%x "
7690 "READ_CONFIG, mbxStatus x%x\n",
7691 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7692 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7693 rc = -EIO;
7694 } else {
7695 rd_config = &pmb->u.mqe.un.rd_config;
ff78d8f9
JS
7696 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
7697 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
7698 phba->sli4_hba.lnk_info.lnk_tp =
7699 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
7700 phba->sli4_hba.lnk_info.lnk_no =
7701 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
7702 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7703 "3081 lnk_type:%d, lnk_numb:%d\n",
7704 phba->sli4_hba.lnk_info.lnk_tp,
7705 phba->sli4_hba.lnk_info.lnk_no);
7706 } else
7707 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7708 "3082 Mailbox (x%x) returned ldv:x0\n",
7709 bf_get(lpfc_mqe_command, &pmb->u.mqe));
44fd7fe3
JS
7710 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
7711 phba->bbcredit_support = 1;
7712 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
7713 }
7714
6d368e53
JS
7715 phba->sli4_hba.extents_in_use =
7716 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
7717 phba->sli4_hba.max_cfg_param.max_xri =
7718 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
7719 phba->sli4_hba.max_cfg_param.xri_base =
7720 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
7721 phba->sli4_hba.max_cfg_param.max_vpi =
7722 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
7723 phba->sli4_hba.max_cfg_param.vpi_base =
7724 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
7725 phba->sli4_hba.max_cfg_param.max_rpi =
7726 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
7727 phba->sli4_hba.max_cfg_param.rpi_base =
7728 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
7729 phba->sli4_hba.max_cfg_param.max_vfi =
7730 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
7731 phba->sli4_hba.max_cfg_param.vfi_base =
7732 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
7733 phba->sli4_hba.max_cfg_param.max_fcfi =
7734 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
7735 phba->sli4_hba.max_cfg_param.max_eq =
7736 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
7737 phba->sli4_hba.max_cfg_param.max_rq =
7738 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
7739 phba->sli4_hba.max_cfg_param.max_wq =
7740 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
7741 phba->sli4_hba.max_cfg_param.max_cq =
7742 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
7743 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
7744 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
7745 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
7746 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5ffc266e
JS
7747 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
7748 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9
JS
7749 phba->max_vports = phba->max_vpi;
7750 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
7751 "2003 cfg params Extents? %d "
7752 "XRI(B:%d M:%d), "
da0436e9
JS
7753 "VPI(B:%d M:%d) "
7754 "VFI(B:%d M:%d) "
7755 "RPI(B:%d M:%d) "
2ea259ee 7756 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
6d368e53 7757 phba->sli4_hba.extents_in_use,
da0436e9
JS
7758 phba->sli4_hba.max_cfg_param.xri_base,
7759 phba->sli4_hba.max_cfg_param.max_xri,
7760 phba->sli4_hba.max_cfg_param.vpi_base,
7761 phba->sli4_hba.max_cfg_param.max_vpi,
7762 phba->sli4_hba.max_cfg_param.vfi_base,
7763 phba->sli4_hba.max_cfg_param.max_vfi,
7764 phba->sli4_hba.max_cfg_param.rpi_base,
7765 phba->sli4_hba.max_cfg_param.max_rpi,
2ea259ee
JS
7766 phba->sli4_hba.max_cfg_param.max_fcfi,
7767 phba->sli4_hba.max_cfg_param.max_eq,
7768 phba->sli4_hba.max_cfg_param.max_cq,
7769 phba->sli4_hba.max_cfg_param.max_wq,
7770 phba->sli4_hba.max_cfg_param.max_rq);
7771
3772a991 7772 }
912e3acd
JS
7773
7774 if (rc)
7775 goto read_cfg_out;
da0436e9 7776
c691816e
JS
7777 /* Update link speed if forced link speed is supported */
7778 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
27d6ac0a 7779 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
c691816e
JS
7780 forced_link_speed =
7781 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7782 if (forced_link_speed) {
7783 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7784
7785 switch (forced_link_speed) {
7786 case LINK_SPEED_1G:
7787 phba->cfg_link_speed =
7788 LPFC_USER_LINK_SPEED_1G;
7789 break;
7790 case LINK_SPEED_2G:
7791 phba->cfg_link_speed =
7792 LPFC_USER_LINK_SPEED_2G;
7793 break;
7794 case LINK_SPEED_4G:
7795 phba->cfg_link_speed =
7796 LPFC_USER_LINK_SPEED_4G;
7797 break;
7798 case LINK_SPEED_8G:
7799 phba->cfg_link_speed =
7800 LPFC_USER_LINK_SPEED_8G;
7801 break;
7802 case LINK_SPEED_10G:
7803 phba->cfg_link_speed =
7804 LPFC_USER_LINK_SPEED_10G;
7805 break;
7806 case LINK_SPEED_16G:
7807 phba->cfg_link_speed =
7808 LPFC_USER_LINK_SPEED_16G;
7809 break;
7810 case LINK_SPEED_32G:
7811 phba->cfg_link_speed =
7812 LPFC_USER_LINK_SPEED_32G;
7813 break;
7814 case 0xffff:
7815 phba->cfg_link_speed =
7816 LPFC_USER_LINK_SPEED_AUTO;
7817 break;
7818 default:
7819 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7820 "0047 Unrecognized link "
7821 "speed : %d\n",
7822 forced_link_speed);
7823 phba->cfg_link_speed =
7824 LPFC_USER_LINK_SPEED_AUTO;
7825 }
7826 }
7827 }
7828
da0436e9 7829 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
7830 length = phba->sli4_hba.max_cfg_param.max_xri -
7831 lpfc_sli4_get_els_iocb_cnt(phba);
7832 if (phba->cfg_hba_queue_depth > length) {
7833 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7834 "3361 HBA queue depth changed from %d to %d\n",
7835 phba->cfg_hba_queue_depth, length);
7836 phba->cfg_hba_queue_depth = length;
7837 }
912e3acd 7838
27d6ac0a 7839 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
912e3acd
JS
7840 LPFC_SLI_INTF_IF_TYPE_2)
7841 goto read_cfg_out;
7842
7843 /* get the pf# and vf# for SLI4 if_type 2 port */
7844 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
7845 sizeof(struct lpfc_sli4_cfg_mhdr));
7846 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
7847 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
7848 length, LPFC_SLI4_MBX_EMBED);
7849
8aa134a8 7850 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
912e3acd
JS
7851 shdr = (union lpfc_sli4_cfg_shdr *)
7852 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7853 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7854 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8aa134a8 7855 if (rc2 || shdr_status || shdr_add_status) {
912e3acd
JS
7856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7857 "3026 Mailbox failed , mbxCmd x%x "
7858 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7859 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7860 bf_get(lpfc_mqe_status, &pmb->u.mqe));
912e3acd
JS
7861 goto read_cfg_out;
7862 }
7863
7864 /* search for fc_fcoe resrouce descriptor */
7865 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
912e3acd 7866
8aa134a8
JS
7867 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
7868 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
7869 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
7870 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
7871 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
7872 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
7873 goto read_cfg_out;
7874
912e3acd 7875 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8aa134a8 7876 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
912e3acd 7877 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8aa134a8 7878 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
912e3acd
JS
7879 phba->sli4_hba.iov.pf_number =
7880 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
7881 phba->sli4_hba.iov.vf_number =
7882 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7883 break;
7884 }
7885 }
7886
7887 if (i < LPFC_RSRC_DESC_MAX_NUM)
7888 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7889 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7890 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7891 phba->sli4_hba.iov.vf_number);
8aa134a8 7892 else
912e3acd
JS
7893 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7894 "3028 GET_FUNCTION_CONFIG: failed to find "
7895 "Resrouce Descriptor:x%x\n",
7896 LPFC_RSRC_DESC_TYPE_FCFCOE);
912e3acd
JS
7897
7898read_cfg_out:
7899 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 7900 return rc;
3772a991
JS
7901}
7902
7903/**
2fcee4bf 7904 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
7905 * @phba: pointer to lpfc hba data structure.
7906 *
2fcee4bf
JS
7907 * This routine is invoked to setup the port-side endian order when
7908 * the port if_type is 0. This routine has no function for other
7909 * if_types.
da0436e9
JS
7910 *
7911 * Return codes
af901ca1 7912 * 0 - successful
25985edc 7913 * -ENOMEM - No available memory
d439d286 7914 * -EIO - The mailbox failed to complete successfully.
3772a991 7915 **/
da0436e9
JS
7916static int
7917lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 7918{
da0436e9 7919 LPFC_MBOXQ_t *mboxq;
2fcee4bf 7920 uint32_t if_type, rc = 0;
da0436e9
JS
7921 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
7922 HOST_ENDIAN_HIGH_WORD1};
3772a991 7923
2fcee4bf
JS
7924 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7925 switch (if_type) {
7926 case LPFC_SLI_INTF_IF_TYPE_0:
7927 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7928 GFP_KERNEL);
7929 if (!mboxq) {
7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7931 "0492 Unable to allocate memory for "
7932 "issuing SLI_CONFIG_SPECIAL mailbox "
7933 "command\n");
7934 return -ENOMEM;
7935 }
3772a991 7936
2fcee4bf
JS
7937 /*
7938 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7939 * two words to contain special data values and no other data.
7940 */
7941 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
7942 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
7943 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7944 if (rc != MBX_SUCCESS) {
7945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7946 "0493 SLI_CONFIG_SPECIAL mailbox "
7947 "failed with status x%x\n",
7948 rc);
7949 rc = -EIO;
7950 }
7951 mempool_free(mboxq, phba->mbox_mem_pool);
7952 break;
27d6ac0a 7953 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf
JS
7954 case LPFC_SLI_INTF_IF_TYPE_2:
7955 case LPFC_SLI_INTF_IF_TYPE_1:
7956 default:
7957 break;
da0436e9 7958 }
da0436e9 7959 return rc;
3772a991
JS
7960}
7961
7962/**
895427bd 7963 * lpfc_sli4_queue_verify - Verify and update EQ counts
3772a991
JS
7964 * @phba: pointer to lpfc hba data structure.
7965 *
895427bd
JS
7966 * This routine is invoked to check the user settable queue counts for EQs.
7967 * After this routine is called the counts will be set to valid values that
5350d872
JS
7968 * adhere to the constraints of the system's interrupt vectors and the port's
7969 * queue resources.
da0436e9
JS
7970 *
7971 * Return codes
af901ca1 7972 * 0 - successful
25985edc 7973 * -ENOMEM - No available memory
3772a991 7974 **/
da0436e9 7975static int
5350d872 7976lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 7977{
895427bd 7978 int io_channel;
1ba981fd 7979 int fof_vectors = phba->cfg_fof ? 1 : 0;
3772a991 7980
da0436e9 7981 /*
67d12733 7982 * Sanity check for configured queue parameters against the run-time
da0436e9
JS
7983 * device parameters
7984 */
3772a991 7985
67d12733 7986 /* Sanity check on HBA EQ parameters */
895427bd 7987 io_channel = phba->io_channel_irqs;
67d12733 7988
895427bd 7989 if (phba->sli4_hba.num_online_cpu < io_channel) {
82c3e9ba
JS
7990 lpfc_printf_log(phba,
7991 KERN_ERR, LOG_INIT,
90695ee0 7992 "3188 Reducing IO channels to match number of "
7bb03bbf 7993 "online CPUs: from %d to %d\n",
895427bd
JS
7994 io_channel, phba->sli4_hba.num_online_cpu);
7995 io_channel = phba->sli4_hba.num_online_cpu;
90695ee0
JS
7996 }
7997
895427bd 7998 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
82c3e9ba
JS
7999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8000 "2575 Reducing IO channels to match number of "
8001 "available EQs: from %d to %d\n",
895427bd 8002 io_channel,
82c3e9ba 8003 phba->sli4_hba.max_cfg_param.max_eq);
895427bd 8004 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
da0436e9 8005 }
67d12733 8006
895427bd
JS
8007 /* The actual number of FCP / NVME event queues adopted */
8008 if (io_channel != phba->io_channel_irqs)
8009 phba->io_channel_irqs = io_channel;
8010 if (phba->cfg_fcp_io_channel > io_channel)
8011 phba->cfg_fcp_io_channel = io_channel;
8012 if (phba->cfg_nvme_io_channel > io_channel)
8013 phba->cfg_nvme_io_channel = io_channel;
bcb24f65
JS
8014 if (phba->nvmet_support) {
8015 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
8016 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
8017 }
8018 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8019 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
895427bd
JS
8020
8021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2d7dbc4c 8022 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
895427bd 8023 phba->io_channel_irqs, phba->cfg_fcp_io_channel,
2d7dbc4c 8024 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
3772a991 8025
da0436e9
JS
8026 /* Get EQ depth from module parameter, fake the default for now */
8027 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8028 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 8029
5350d872
JS
8030 /* Get CQ depth from module parameter, fake the default for now */
8031 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8032 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
895427bd
JS
8033 return 0;
8034}
8035
8036static int
8037lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8038{
8039 struct lpfc_queue *qdesc;
5350d872 8040
a51e41b6 8041 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
81b96eda 8042 phba->sli4_hba.cq_esize,
a51e41b6 8043 LPFC_CQE_EXP_COUNT);
895427bd
JS
8044 if (!qdesc) {
8045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8046 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8047 wqidx);
8048 return 1;
8049 }
8050 phba->sli4_hba.nvme_cq[wqidx] = qdesc;
8051
a51e41b6
JS
8052 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8053 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
895427bd
JS
8054 if (!qdesc) {
8055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8056 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8057 wqidx);
8058 return 1;
8059 }
8060 phba->sli4_hba.nvme_wq[wqidx] = qdesc;
8061 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8062 return 0;
8063}
8064
8065static int
8066lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8067{
8068 struct lpfc_queue *qdesc;
c176ffa0 8069 uint32_t wqesize;
895427bd
JS
8070
8071 /* Create Fast Path FCP CQs */
c176ffa0 8072 if (phba->enab_exp_wqcq_pages)
a51e41b6
JS
8073 /* Increase the CQ size when WQEs contain an embedded cdb */
8074 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8075 phba->sli4_hba.cq_esize,
8076 LPFC_CQE_EXP_COUNT);
8077
8078 else
8079 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8080 phba->sli4_hba.cq_esize,
8081 phba->sli4_hba.cq_ecount);
895427bd
JS
8082 if (!qdesc) {
8083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8084 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8085 return 1;
8086 }
8087 phba->sli4_hba.fcp_cq[wqidx] = qdesc;
8088
8089 /* Create Fast Path FCP WQs */
c176ffa0 8090 if (phba->enab_exp_wqcq_pages) {
a51e41b6 8091 /* Increase the WQ size when WQEs contain an embedded cdb */
c176ffa0
JS
8092 wqesize = (phba->fcp_embed_io) ?
8093 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
a51e41b6 8094 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
c176ffa0 8095 wqesize,
a51e41b6 8096 LPFC_WQE_EXP_COUNT);
c176ffa0 8097 } else
a51e41b6
JS
8098 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8099 phba->sli4_hba.wq_esize,
8100 phba->sli4_hba.wq_ecount);
c176ffa0 8101
895427bd
JS
8102 if (!qdesc) {
8103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8104 "0503 Failed allocate fast-path FCP WQ (%d)\n",
8105 wqidx);
8106 return 1;
8107 }
8108 phba->sli4_hba.fcp_wq[wqidx] = qdesc;
8109 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
5350d872 8110 return 0;
5350d872
JS
8111}
8112
8113/**
8114 * lpfc_sli4_queue_create - Create all the SLI4 queues
8115 * @phba: pointer to lpfc hba data structure.
8116 *
8117 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8118 * operation. For each SLI4 queue type, the parameters such as queue entry
8119 * count (queue depth) shall be taken from the module parameter. For now,
8120 * we just use some constant number as place holder.
8121 *
8122 * Return codes
4907cb7b 8123 * 0 - successful
5350d872
JS
8124 * -ENOMEM - No availble memory
8125 * -EIO - The mailbox failed to complete successfully.
8126 **/
8127int
8128lpfc_sli4_queue_create(struct lpfc_hba *phba)
8129{
8130 struct lpfc_queue *qdesc;
d1f525aa 8131 int idx, io_channel;
5350d872
JS
8132
8133 /*
67d12733 8134 * Create HBA Record arrays.
895427bd 8135 * Both NVME and FCP will share that same vectors / EQs
5350d872 8136 */
895427bd
JS
8137 io_channel = phba->io_channel_irqs;
8138 if (!io_channel)
67d12733 8139 return -ERANGE;
5350d872 8140
67d12733
JS
8141 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8142 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8143 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8144 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8145 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8146 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
895427bd
JS
8147 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8148 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8149 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8150 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
67d12733 8151
895427bd
JS
8152 phba->sli4_hba.hba_eq = kcalloc(io_channel,
8153 sizeof(struct lpfc_queue *),
8154 GFP_KERNEL);
67d12733
JS
8155 if (!phba->sli4_hba.hba_eq) {
8156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8157 "2576 Failed allocate memory for "
8158 "fast-path EQ record array\n");
8159 goto out_error;
8160 }
8161
895427bd
JS
8162 if (phba->cfg_fcp_io_channel) {
8163 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
8164 sizeof(struct lpfc_queue *),
8165 GFP_KERNEL);
8166 if (!phba->sli4_hba.fcp_cq) {
8167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8168 "2577 Failed allocate memory for "
8169 "fast-path CQ record array\n");
8170 goto out_error;
8171 }
8172 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
8173 sizeof(struct lpfc_queue *),
8174 GFP_KERNEL);
8175 if (!phba->sli4_hba.fcp_wq) {
8176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8177 "2578 Failed allocate memory for "
8178 "fast-path FCP WQ record array\n");
8179 goto out_error;
8180 }
8181 /*
8182 * Since the first EQ can have multiple CQs associated with it,
8183 * this array is used to quickly see if we have a FCP fast-path
8184 * CQ match.
8185 */
8186 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
8187 sizeof(uint16_t),
8188 GFP_KERNEL);
8189 if (!phba->sli4_hba.fcp_cq_map) {
8190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8191 "2545 Failed allocate memory for "
8192 "fast-path CQ map\n");
8193 goto out_error;
8194 }
67d12733
JS
8195 }
8196
895427bd
JS
8197 if (phba->cfg_nvme_io_channel) {
8198 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
8199 sizeof(struct lpfc_queue *),
8200 GFP_KERNEL);
8201 if (!phba->sli4_hba.nvme_cq) {
8202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8203 "6077 Failed allocate memory for "
8204 "fast-path CQ record array\n");
8205 goto out_error;
8206 }
da0436e9 8207
895427bd
JS
8208 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
8209 sizeof(struct lpfc_queue *),
8210 GFP_KERNEL);
8211 if (!phba->sli4_hba.nvme_wq) {
8212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8213 "2581 Failed allocate memory for "
8214 "fast-path NVME WQ record array\n");
8215 goto out_error;
8216 }
8217
8218 /*
8219 * Since the first EQ can have multiple CQs associated with it,
8220 * this array is used to quickly see if we have a NVME fast-path
8221 * CQ match.
8222 */
8223 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
8224 sizeof(uint16_t),
8225 GFP_KERNEL);
8226 if (!phba->sli4_hba.nvme_cq_map) {
8227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8228 "6078 Failed allocate memory for "
8229 "fast-path CQ map\n");
8230 goto out_error;
8231 }
2d7dbc4c
JS
8232
8233 if (phba->nvmet_support) {
8234 phba->sli4_hba.nvmet_cqset = kcalloc(
8235 phba->cfg_nvmet_mrq,
8236 sizeof(struct lpfc_queue *),
8237 GFP_KERNEL);
8238 if (!phba->sli4_hba.nvmet_cqset) {
8239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8240 "3121 Fail allocate memory for "
8241 "fast-path CQ set array\n");
8242 goto out_error;
8243 }
8244 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8245 phba->cfg_nvmet_mrq,
8246 sizeof(struct lpfc_queue *),
8247 GFP_KERNEL);
8248 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8250 "3122 Fail allocate memory for "
8251 "fast-path RQ set hdr array\n");
8252 goto out_error;
8253 }
8254 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8255 phba->cfg_nvmet_mrq,
8256 sizeof(struct lpfc_queue *),
8257 GFP_KERNEL);
8258 if (!phba->sli4_hba.nvmet_mrq_data) {
8259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8260 "3124 Fail allocate memory for "
8261 "fast-path RQ set data array\n");
8262 goto out_error;
8263 }
8264 }
da0436e9 8265 }
67d12733 8266
895427bd 8267 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
67d12733 8268
895427bd
JS
8269 /* Create HBA Event Queues (EQs) */
8270 for (idx = 0; idx < io_channel; idx++) {
67d12733 8271 /* Create EQs */
81b96eda
JS
8272 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8273 phba->sli4_hba.eq_esize,
da0436e9
JS
8274 phba->sli4_hba.eq_ecount);
8275 if (!qdesc) {
8276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
67d12733
JS
8277 "0497 Failed allocate EQ (%d)\n", idx);
8278 goto out_error;
da0436e9 8279 }
67d12733 8280 phba->sli4_hba.hba_eq[idx] = qdesc;
895427bd 8281 }
67d12733 8282
895427bd 8283 /* FCP and NVME io channels are not required to be balanced */
67d12733 8284
895427bd
JS
8285 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8286 if (lpfc_alloc_fcp_wq_cq(phba, idx))
67d12733 8287 goto out_error;
da0436e9 8288
895427bd
JS
8289 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
8290 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8291 goto out_error;
67d12733 8292
2d7dbc4c
JS
8293 if (phba->nvmet_support) {
8294 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8295 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda
JS
8296 LPFC_DEFAULT_PAGE_SIZE,
8297 phba->sli4_hba.cq_esize,
8298 phba->sli4_hba.cq_ecount);
2d7dbc4c
JS
8299 if (!qdesc) {
8300 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8301 "3142 Failed allocate NVME "
8302 "CQ Set (%d)\n", idx);
8303 goto out_error;
8304 }
8305 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8306 }
8307 }
8308
da0436e9 8309 /*
67d12733 8310 * Create Slow Path Completion Queues (CQs)
da0436e9
JS
8311 */
8312
da0436e9 8313 /* Create slow-path Mailbox Command Complete Queue */
81b96eda
JS
8314 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8315 phba->sli4_hba.cq_esize,
da0436e9
JS
8316 phba->sli4_hba.cq_ecount);
8317 if (!qdesc) {
8318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8319 "0500 Failed allocate slow-path mailbox CQ\n");
67d12733 8320 goto out_error;
da0436e9
JS
8321 }
8322 phba->sli4_hba.mbx_cq = qdesc;
8323
8324 /* Create slow-path ELS Complete Queue */
81b96eda
JS
8325 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8326 phba->sli4_hba.cq_esize,
da0436e9
JS
8327 phba->sli4_hba.cq_ecount);
8328 if (!qdesc) {
8329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8330 "0501 Failed allocate slow-path ELS CQ\n");
67d12733 8331 goto out_error;
da0436e9
JS
8332 }
8333 phba->sli4_hba.els_cq = qdesc;
8334
da0436e9 8335
5350d872 8336 /*
67d12733 8337 * Create Slow Path Work Queues (WQs)
5350d872 8338 */
da0436e9
JS
8339
8340 /* Create Mailbox Command Queue */
da0436e9 8341
81b96eda
JS
8342 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8343 phba->sli4_hba.mq_esize,
da0436e9
JS
8344 phba->sli4_hba.mq_ecount);
8345 if (!qdesc) {
8346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8347 "0505 Failed allocate slow-path MQ\n");
67d12733 8348 goto out_error;
da0436e9
JS
8349 }
8350 phba->sli4_hba.mbx_wq = qdesc;
8351
8352 /*
67d12733 8353 * Create ELS Work Queues
da0436e9 8354 */
da0436e9
JS
8355
8356 /* Create slow-path ELS Work Queue */
81b96eda
JS
8357 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8358 phba->sli4_hba.wq_esize,
da0436e9
JS
8359 phba->sli4_hba.wq_ecount);
8360 if (!qdesc) {
8361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8362 "0504 Failed allocate slow-path ELS WQ\n");
67d12733 8363 goto out_error;
da0436e9
JS
8364 }
8365 phba->sli4_hba.els_wq = qdesc;
895427bd
JS
8366 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8367
8368 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8369 /* Create NVME LS Complete Queue */
81b96eda
JS
8370 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8371 phba->sli4_hba.cq_esize,
895427bd
JS
8372 phba->sli4_hba.cq_ecount);
8373 if (!qdesc) {
8374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8375 "6079 Failed allocate NVME LS CQ\n");
8376 goto out_error;
8377 }
8378 phba->sli4_hba.nvmels_cq = qdesc;
8379
8380 /* Create NVME LS Work Queue */
81b96eda
JS
8381 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8382 phba->sli4_hba.wq_esize,
895427bd
JS
8383 phba->sli4_hba.wq_ecount);
8384 if (!qdesc) {
8385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8386 "6080 Failed allocate NVME LS WQ\n");
8387 goto out_error;
8388 }
8389 phba->sli4_hba.nvmels_wq = qdesc;
8390 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8391 }
da0436e9 8392
da0436e9
JS
8393 /*
8394 * Create Receive Queue (RQ)
8395 */
da0436e9
JS
8396
8397 /* Create Receive Queue for header */
81b96eda
JS
8398 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8399 phba->sli4_hba.rq_esize,
da0436e9
JS
8400 phba->sli4_hba.rq_ecount);
8401 if (!qdesc) {
8402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8403 "0506 Failed allocate receive HRQ\n");
67d12733 8404 goto out_error;
da0436e9
JS
8405 }
8406 phba->sli4_hba.hdr_rq = qdesc;
8407
8408 /* Create Receive Queue for data */
81b96eda
JS
8409 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8410 phba->sli4_hba.rq_esize,
da0436e9
JS
8411 phba->sli4_hba.rq_ecount);
8412 if (!qdesc) {
8413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8414 "0507 Failed allocate receive DRQ\n");
67d12733 8415 goto out_error;
da0436e9
JS
8416 }
8417 phba->sli4_hba.dat_rq = qdesc;
8418
2d7dbc4c
JS
8419 if (phba->nvmet_support) {
8420 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8421 /* Create NVMET Receive Queue for header */
8422 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 8423 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 8424 phba->sli4_hba.rq_esize,
61f3d4bf 8425 LPFC_NVMET_RQE_DEF_COUNT);
2d7dbc4c
JS
8426 if (!qdesc) {
8427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8428 "3146 Failed allocate "
8429 "receive HRQ\n");
8430 goto out_error;
8431 }
8432 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
8433
8434 /* Only needed for header of RQ pair */
8435 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
8436 GFP_KERNEL);
8437 if (qdesc->rqbp == NULL) {
8438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8439 "6131 Failed allocate "
8440 "Header RQBP\n");
8441 goto out_error;
8442 }
8443
4b40d02b
DK
8444 /* Put list in known state in case driver load fails. */
8445 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
8446
2d7dbc4c
JS
8447 /* Create NVMET Receive Queue for data */
8448 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 8449 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 8450 phba->sli4_hba.rq_esize,
61f3d4bf 8451 LPFC_NVMET_RQE_DEF_COUNT);
2d7dbc4c
JS
8452 if (!qdesc) {
8453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8454 "3156 Failed allocate "
8455 "receive DRQ\n");
8456 goto out_error;
8457 }
8458 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
8459 }
8460 }
8461
1ba981fd
JS
8462 /* Create the Queues needed for Flash Optimized Fabric operations */
8463 if (phba->cfg_fof)
8464 lpfc_fof_queue_create(phba);
da0436e9
JS
8465 return 0;
8466
da0436e9 8467out_error:
67d12733 8468 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
8469 return -ENOMEM;
8470}
8471
895427bd
JS
8472static inline void
8473__lpfc_sli4_release_queue(struct lpfc_queue **qp)
8474{
8475 if (*qp != NULL) {
8476 lpfc_sli4_queue_free(*qp);
8477 *qp = NULL;
8478 }
8479}
8480
8481static inline void
8482lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
8483{
8484 int idx;
8485
8486 if (*qs == NULL)
8487 return;
8488
8489 for (idx = 0; idx < max; idx++)
8490 __lpfc_sli4_release_queue(&(*qs)[idx]);
8491
8492 kfree(*qs);
8493 *qs = NULL;
8494}
8495
8496static inline void
8497lpfc_sli4_release_queue_map(uint16_t **qmap)
8498{
8499 if (*qmap != NULL) {
8500 kfree(*qmap);
8501 *qmap = NULL;
8502 }
8503}
8504
da0436e9
JS
8505/**
8506 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
8507 * @phba: pointer to lpfc hba data structure.
8508 *
8509 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
8510 * operation.
8511 *
8512 * Return codes
af901ca1 8513 * 0 - successful
25985edc 8514 * -ENOMEM - No available memory
d439d286 8515 * -EIO - The mailbox failed to complete successfully.
da0436e9 8516 **/
5350d872 8517void
da0436e9
JS
8518lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8519{
1ba981fd
JS
8520 if (phba->cfg_fof)
8521 lpfc_fof_queue_destroy(phba);
8522
895427bd
JS
8523 /* Release HBA eqs */
8524 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
8525
8526 /* Release FCP cqs */
8527 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
d1f525aa 8528 phba->cfg_fcp_io_channel);
895427bd
JS
8529
8530 /* Release FCP wqs */
8531 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
d1f525aa 8532 phba->cfg_fcp_io_channel);
895427bd
JS
8533
8534 /* Release FCP CQ mapping array */
8535 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
8536
8537 /* Release NVME cqs */
8538 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
8539 phba->cfg_nvme_io_channel);
8540
8541 /* Release NVME wqs */
8542 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
8543 phba->cfg_nvme_io_channel);
8544
8545 /* Release NVME CQ mapping array */
8546 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
8547
bcb24f65
JS
8548 if (phba->nvmet_support) {
8549 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
8550 phba->cfg_nvmet_mrq);
2d7dbc4c 8551
bcb24f65
JS
8552 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
8553 phba->cfg_nvmet_mrq);
8554 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
8555 phba->cfg_nvmet_mrq);
8556 }
2d7dbc4c 8557
895427bd
JS
8558 /* Release mailbox command work queue */
8559 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
8560
8561 /* Release ELS work queue */
8562 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
8563
8564 /* Release ELS work queue */
8565 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
8566
8567 /* Release unsolicited receive queue */
8568 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
8569 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
8570
8571 /* Release ELS complete queue */
8572 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
8573
8574 /* Release NVME LS complete queue */
8575 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
8576
8577 /* Release mailbox command complete queue */
8578 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
8579
8580 /* Everything on this list has been freed */
8581 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8582}
8583
895427bd
JS
8584int
8585lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8586{
8587 struct lpfc_rqb *rqbp;
8588 struct lpfc_dmabuf *h_buf;
8589 struct rqb_dmabuf *rqb_buffer;
8590
8591 rqbp = rq->rqbp;
8592 while (!list_empty(&rqbp->rqb_buffer_list)) {
8593 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
8594 struct lpfc_dmabuf, list);
8595
8596 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
8597 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8598 rqbp->buffer_count--;
67d12733 8599 }
895427bd
JS
8600 return 1;
8601}
67d12733 8602
895427bd
JS
8603static int
8604lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8605 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
8606 int qidx, uint32_t qtype)
8607{
8608 struct lpfc_sli_ring *pring;
8609 int rc;
8610
8611 if (!eq || !cq || !wq) {
8612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8613 "6085 Fast-path %s (%d) not allocated\n",
8614 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
8615 return -ENOMEM;
8616 }
8617
8618 /* create the Cq first */
8619 rc = lpfc_cq_create(phba, cq, eq,
8620 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
8621 if (rc) {
8622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8623 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
8624 qidx, (uint32_t)rc);
8625 return rc;
67d12733 8626 }
81b96eda 8627 cq->chann = qidx;
67d12733 8628
895427bd
JS
8629 if (qtype != LPFC_MBOX) {
8630 /* Setup nvme_cq_map for fast lookup */
8631 if (cq_map)
8632 *cq_map = cq->queue_id;
da0436e9 8633
895427bd
JS
8634 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8635 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
8636 qidx, cq->queue_id, qidx, eq->queue_id);
da0436e9 8637
895427bd
JS
8638 /* create the wq */
8639 rc = lpfc_wq_create(phba, wq, cq, qtype);
8640 if (rc) {
8641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8642 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
8643 qidx, (uint32_t)rc);
8644 /* no need to tear down cq - caller will do so */
8645 return rc;
8646 }
81b96eda 8647 wq->chann = qidx;
da0436e9 8648
895427bd
JS
8649 /* Bind this CQ/WQ to the NVME ring */
8650 pring = wq->pring;
8651 pring->sli.sli4.wqp = (void *)wq;
8652 cq->pring = pring;
da0436e9 8653
895427bd
JS
8654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8655 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
8656 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
8657 } else {
8658 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
8659 if (rc) {
8660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8661 "0539 Failed setup of slow-path MQ: "
8662 "rc = 0x%x\n", rc);
8663 /* no need to tear down cq - caller will do so */
8664 return rc;
8665 }
da0436e9 8666
895427bd
JS
8667 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8668 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
8669 phba->sli4_hba.mbx_wq->queue_id,
8670 phba->sli4_hba.mbx_cq->queue_id);
67d12733 8671 }
da0436e9 8672
895427bd 8673 return 0;
da0436e9
JS
8674}
8675
8676/**
8677 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
8678 * @phba: pointer to lpfc hba data structure.
8679 *
8680 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
8681 * operation.
8682 *
8683 * Return codes
af901ca1 8684 * 0 - successful
25985edc 8685 * -ENOMEM - No available memory
d439d286 8686 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
8687 **/
8688int
8689lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8690{
962bc51b
JS
8691 uint32_t shdr_status, shdr_add_status;
8692 union lpfc_sli4_cfg_shdr *shdr;
8693 LPFC_MBOXQ_t *mboxq;
895427bd
JS
8694 int qidx;
8695 uint32_t length, io_channel;
8696 int rc = -ENOMEM;
962bc51b
JS
8697
8698 /* Check for dual-ULP support */
8699 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8700 if (!mboxq) {
8701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8702 "3249 Unable to allocate memory for "
8703 "QUERY_FW_CFG mailbox command\n");
8704 return -ENOMEM;
8705 }
8706 length = (sizeof(struct lpfc_mbx_query_fw_config) -
8707 sizeof(struct lpfc_sli4_cfg_mhdr));
8708 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8709 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
8710 length, LPFC_SLI4_MBX_EMBED);
8711
8712 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8713
8714 shdr = (union lpfc_sli4_cfg_shdr *)
8715 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8716 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8717 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8718 if (shdr_status || shdr_add_status || rc) {
8719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8720 "3250 QUERY_FW_CFG mailbox failed with status "
8721 "x%x add_status x%x, mbx status x%x\n",
8722 shdr_status, shdr_add_status, rc);
8723 if (rc != MBX_TIMEOUT)
8724 mempool_free(mboxq, phba->mbox_mem_pool);
8725 rc = -ENXIO;
8726 goto out_error;
8727 }
8728
8729 phba->sli4_hba.fw_func_mode =
8730 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
8731 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
8732 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8b017a30
JS
8733 phba->sli4_hba.physical_port =
8734 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
962bc51b
JS
8735 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8736 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
8737 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
8738 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
8739
8740 if (rc != MBX_TIMEOUT)
8741 mempool_free(mboxq, phba->mbox_mem_pool);
da0436e9
JS
8742
8743 /*
67d12733 8744 * Set up HBA Event Queues (EQs)
da0436e9 8745 */
895427bd 8746 io_channel = phba->io_channel_irqs;
da0436e9 8747
67d12733 8748 /* Set up HBA event queue */
895427bd 8749 if (io_channel && !phba->sli4_hba.hba_eq) {
2e90f4b5
JS
8750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8751 "3147 Fast-path EQs not allocated\n");
1b51197d 8752 rc = -ENOMEM;
67d12733 8753 goto out_error;
2e90f4b5 8754 }
895427bd
JS
8755 for (qidx = 0; qidx < io_channel; qidx++) {
8756 if (!phba->sli4_hba.hba_eq[qidx]) {
da0436e9
JS
8757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8758 "0522 Fast-path EQ (%d) not "
895427bd 8759 "allocated\n", qidx);
1b51197d 8760 rc = -ENOMEM;
895427bd 8761 goto out_destroy;
da0436e9 8762 }
895427bd
JS
8763 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
8764 phba->cfg_fcp_imax);
da0436e9
JS
8765 if (rc) {
8766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8767 "0523 Failed setup of fast-path EQ "
895427bd 8768 "(%d), rc = 0x%x\n", qidx,
a2fc4aef 8769 (uint32_t)rc);
895427bd 8770 goto out_destroy;
da0436e9
JS
8771 }
8772 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
895427bd
JS
8773 "2584 HBA EQ setup: queue[%d]-id=%d\n",
8774 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
67d12733
JS
8775 }
8776
895427bd
JS
8777 if (phba->cfg_nvme_io_channel) {
8778 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
67d12733 8779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8780 "6084 Fast-path NVME %s array not allocated\n",
8781 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
67d12733 8782 rc = -ENOMEM;
895427bd 8783 goto out_destroy;
67d12733
JS
8784 }
8785
895427bd
JS
8786 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
8787 rc = lpfc_create_wq_cq(phba,
8788 phba->sli4_hba.hba_eq[
8789 qidx % io_channel],
8790 phba->sli4_hba.nvme_cq[qidx],
8791 phba->sli4_hba.nvme_wq[qidx],
8792 &phba->sli4_hba.nvme_cq_map[qidx],
8793 qidx, LPFC_NVME);
8794 if (rc) {
8795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8796 "6123 Failed to setup fastpath "
8797 "NVME WQ/CQ (%d), rc = 0x%x\n",
8798 qidx, (uint32_t)rc);
8799 goto out_destroy;
8800 }
8801 }
67d12733
JS
8802 }
8803
895427bd
JS
8804 if (phba->cfg_fcp_io_channel) {
8805 /* Set up fast-path FCP Response Complete Queue */
8806 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
67d12733 8807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8808 "3148 Fast-path FCP %s array not allocated\n",
8809 phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
67d12733 8810 rc = -ENOMEM;
895427bd 8811 goto out_destroy;
67d12733
JS
8812 }
8813
895427bd
JS
8814 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
8815 rc = lpfc_create_wq_cq(phba,
8816 phba->sli4_hba.hba_eq[
8817 qidx % io_channel],
8818 phba->sli4_hba.fcp_cq[qidx],
8819 phba->sli4_hba.fcp_wq[qidx],
8820 &phba->sli4_hba.fcp_cq_map[qidx],
8821 qidx, LPFC_FCP);
8822 if (rc) {
8823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8824 "0535 Failed to setup fastpath "
8825 "FCP WQ/CQ (%d), rc = 0x%x\n",
8826 qidx, (uint32_t)rc);
8827 goto out_destroy;
8828 }
8829 }
67d12733 8830 }
895427bd 8831
da0436e9 8832 /*
895427bd 8833 * Set up Slow Path Complete Queues (CQs)
da0436e9
JS
8834 */
8835
895427bd 8836 /* Set up slow-path MBOX CQ/MQ */
da0436e9 8837
895427bd 8838 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
da0436e9 8839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8840 "0528 %s not allocated\n",
8841 phba->sli4_hba.mbx_cq ?
d1f525aa 8842 "Mailbox WQ" : "Mailbox CQ");
1b51197d 8843 rc = -ENOMEM;
895427bd 8844 goto out_destroy;
da0436e9 8845 }
da0436e9 8846
895427bd 8847 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
d1f525aa
JS
8848 phba->sli4_hba.mbx_cq,
8849 phba->sli4_hba.mbx_wq,
8850 NULL, 0, LPFC_MBOX);
da0436e9
JS
8851 if (rc) {
8852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8853 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
8854 (uint32_t)rc);
8855 goto out_destroy;
da0436e9 8856 }
2d7dbc4c
JS
8857 if (phba->nvmet_support) {
8858 if (!phba->sli4_hba.nvmet_cqset) {
8859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8860 "3165 Fast-path NVME CQ Set "
8861 "array not allocated\n");
8862 rc = -ENOMEM;
8863 goto out_destroy;
8864 }
8865 if (phba->cfg_nvmet_mrq > 1) {
8866 rc = lpfc_cq_create_set(phba,
8867 phba->sli4_hba.nvmet_cqset,
8868 phba->sli4_hba.hba_eq,
8869 LPFC_WCQ, LPFC_NVMET);
8870 if (rc) {
8871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8872 "3164 Failed setup of NVME CQ "
8873 "Set, rc = 0x%x\n",
8874 (uint32_t)rc);
8875 goto out_destroy;
8876 }
8877 } else {
8878 /* Set up NVMET Receive Complete Queue */
8879 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
8880 phba->sli4_hba.hba_eq[0],
8881 LPFC_WCQ, LPFC_NVMET);
8882 if (rc) {
8883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8884 "6089 Failed setup NVMET CQ: "
8885 "rc = 0x%x\n", (uint32_t)rc);
8886 goto out_destroy;
8887 }
81b96eda
JS
8888 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
8889
2d7dbc4c
JS
8890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8891 "6090 NVMET CQ setup: cq-id=%d, "
8892 "parent eq-id=%d\n",
8893 phba->sli4_hba.nvmet_cqset[0]->queue_id,
8894 phba->sli4_hba.hba_eq[0]->queue_id);
8895 }
8896 }
da0436e9 8897
895427bd
JS
8898 /* Set up slow-path ELS WQ/CQ */
8899 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
da0436e9 8900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8901 "0530 ELS %s not allocated\n",
8902 phba->sli4_hba.els_cq ? "WQ" : "CQ");
1b51197d 8903 rc = -ENOMEM;
895427bd 8904 goto out_destroy;
da0436e9 8905 }
895427bd
JS
8906 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8907 phba->sli4_hba.els_cq,
8908 phba->sli4_hba.els_wq,
8909 NULL, 0, LPFC_ELS);
da0436e9
JS
8910 if (rc) {
8911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
8912 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
8913 (uint32_t)rc);
8914 goto out_destroy;
da0436e9
JS
8915 }
8916 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8917 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
8918 phba->sli4_hba.els_wq->queue_id,
8919 phba->sli4_hba.els_cq->queue_id);
8920
895427bd
JS
8921 if (phba->cfg_nvme_io_channel) {
8922 /* Set up NVME LS Complete Queue */
8923 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
8924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8925 "6091 LS %s not allocated\n",
8926 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
8927 rc = -ENOMEM;
8928 goto out_destroy;
8929 }
8930 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8931 phba->sli4_hba.nvmels_cq,
8932 phba->sli4_hba.nvmels_wq,
8933 NULL, 0, LPFC_NVME_LS);
8934 if (rc) {
8935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8936 "0529 Failed setup of NVVME LS WQ/CQ: "
8937 "rc = 0x%x\n", (uint32_t)rc);
8938 goto out_destroy;
8939 }
8940
8941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8942 "6096 ELS WQ setup: wq-id=%d, "
8943 "parent cq-id=%d\n",
8944 phba->sli4_hba.nvmels_wq->queue_id,
8945 phba->sli4_hba.nvmels_cq->queue_id);
8946 }
8947
2d7dbc4c
JS
8948 /*
8949 * Create NVMET Receive Queue (RQ)
8950 */
8951 if (phba->nvmet_support) {
8952 if ((!phba->sli4_hba.nvmet_cqset) ||
8953 (!phba->sli4_hba.nvmet_mrq_hdr) ||
8954 (!phba->sli4_hba.nvmet_mrq_data)) {
8955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8956 "6130 MRQ CQ Queues not "
8957 "allocated\n");
8958 rc = -ENOMEM;
8959 goto out_destroy;
8960 }
8961 if (phba->cfg_nvmet_mrq > 1) {
8962 rc = lpfc_mrq_create(phba,
8963 phba->sli4_hba.nvmet_mrq_hdr,
8964 phba->sli4_hba.nvmet_mrq_data,
8965 phba->sli4_hba.nvmet_cqset,
8966 LPFC_NVMET);
8967 if (rc) {
8968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8969 "6098 Failed setup of NVMET "
8970 "MRQ: rc = 0x%x\n",
8971 (uint32_t)rc);
8972 goto out_destroy;
8973 }
8974
8975 } else {
8976 rc = lpfc_rq_create(phba,
8977 phba->sli4_hba.nvmet_mrq_hdr[0],
8978 phba->sli4_hba.nvmet_mrq_data[0],
8979 phba->sli4_hba.nvmet_cqset[0],
8980 LPFC_NVMET);
8981 if (rc) {
8982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8983 "6057 Failed setup of NVMET "
8984 "Receive Queue: rc = 0x%x\n",
8985 (uint32_t)rc);
8986 goto out_destroy;
8987 }
8988
8989 lpfc_printf_log(
8990 phba, KERN_INFO, LOG_INIT,
8991 "6099 NVMET RQ setup: hdr-rq-id=%d, "
8992 "dat-rq-id=%d parent cq-id=%d\n",
8993 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
8994 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
8995 phba->sli4_hba.nvmet_cqset[0]->queue_id);
8996
8997 }
8998 }
8999
da0436e9
JS
9000 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9002 "0540 Receive Queue not allocated\n");
1b51197d 9003 rc = -ENOMEM;
895427bd 9004 goto out_destroy;
da0436e9 9005 }
73d91e50 9006
da0436e9 9007 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 9008 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9
JS
9009 if (rc) {
9010 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9011 "0541 Failed setup of Receive Queue: "
a2fc4aef 9012 "rc = 0x%x\n", (uint32_t)rc);
895427bd 9013 goto out_destroy;
da0436e9 9014 }
73d91e50 9015
da0436e9
JS
9016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9017 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9018 "parent cq-id=%d\n",
9019 phba->sli4_hba.hdr_rq->queue_id,
9020 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 9021 phba->sli4_hba.els_cq->queue_id);
1ba981fd
JS
9022
9023 if (phba->cfg_fof) {
9024 rc = lpfc_fof_queue_setup(phba);
9025 if (rc) {
9026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9027 "0549 Failed setup of FOF Queues: "
9028 "rc = 0x%x\n", rc);
895427bd 9029 goto out_destroy;
1ba981fd
JS
9030 }
9031 }
2c9c5a00 9032
43140ca6 9033 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
0cf07f84
JS
9034 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9035 phba->cfg_fcp_imax);
43140ca6 9036
da0436e9
JS
9037 return 0;
9038
895427bd
JS
9039out_destroy:
9040 lpfc_sli4_queue_unset(phba);
da0436e9
JS
9041out_error:
9042 return rc;
9043}
9044
9045/**
9046 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9047 * @phba: pointer to lpfc hba data structure.
9048 *
9049 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9050 * operation.
9051 *
9052 * Return codes
af901ca1 9053 * 0 - successful
25985edc 9054 * -ENOMEM - No available memory
d439d286 9055 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9056 **/
9057void
9058lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9059{
895427bd 9060 int qidx;
da0436e9 9061
1ba981fd
JS
9062 /* Unset the queues created for Flash Optimized Fabric operations */
9063 if (phba->cfg_fof)
9064 lpfc_fof_queue_destroy(phba);
895427bd 9065
da0436e9 9066 /* Unset mailbox command work queue */
895427bd
JS
9067 if (phba->sli4_hba.mbx_wq)
9068 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9069
9070 /* Unset NVME LS work queue */
9071 if (phba->sli4_hba.nvmels_wq)
9072 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9073
da0436e9 9074 /* Unset ELS work queue */
019c0d66 9075 if (phba->sli4_hba.els_wq)
895427bd
JS
9076 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9077
da0436e9 9078 /* Unset unsolicited receive queue */
895427bd
JS
9079 if (phba->sli4_hba.hdr_rq)
9080 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9081 phba->sli4_hba.dat_rq);
9082
da0436e9 9083 /* Unset FCP work queue */
895427bd
JS
9084 if (phba->sli4_hba.fcp_wq)
9085 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9086 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
9087
9088 /* Unset NVME work queue */
9089 if (phba->sli4_hba.nvme_wq) {
9090 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9091 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
67d12733 9092 }
895427bd 9093
da0436e9 9094 /* Unset mailbox command complete queue */
895427bd
JS
9095 if (phba->sli4_hba.mbx_cq)
9096 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9097
da0436e9 9098 /* Unset ELS complete queue */
895427bd
JS
9099 if (phba->sli4_hba.els_cq)
9100 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9101
9102 /* Unset NVME LS complete queue */
9103 if (phba->sli4_hba.nvmels_cq)
9104 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9105
9106 /* Unset NVME response complete queue */
9107 if (phba->sli4_hba.nvme_cq)
9108 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9109 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
9110
bcb24f65
JS
9111 if (phba->nvmet_support) {
9112 /* Unset NVMET MRQ queue */
9113 if (phba->sli4_hba.nvmet_mrq_hdr) {
9114 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9115 lpfc_rq_destroy(
9116 phba,
2d7dbc4c
JS
9117 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9118 phba->sli4_hba.nvmet_mrq_data[qidx]);
bcb24f65 9119 }
2d7dbc4c 9120
bcb24f65
JS
9121 /* Unset NVMET CQ Set complete queue */
9122 if (phba->sli4_hba.nvmet_cqset) {
9123 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9124 lpfc_cq_destroy(
9125 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9126 }
2d7dbc4c
JS
9127 }
9128
da0436e9 9129 /* Unset FCP response complete queue */
895427bd
JS
9130 if (phba->sli4_hba.fcp_cq)
9131 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9132 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
9133
da0436e9 9134 /* Unset fast-path event queue */
895427bd
JS
9135 if (phba->sli4_hba.hba_eq)
9136 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
9137 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
da0436e9
JS
9138}
9139
9140/**
9141 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9142 * @phba: pointer to lpfc hba data structure.
9143 *
9144 * This routine is invoked to allocate and set up a pool of completion queue
9145 * events. The body of the completion queue event is a completion queue entry
9146 * CQE. For now, this pool is used for the interrupt service routine to queue
9147 * the following HBA completion queue events for the worker thread to process:
9148 * - Mailbox asynchronous events
9149 * - Receive queue completion unsolicited events
9150 * Later, this can be used for all the slow-path events.
9151 *
9152 * Return codes
af901ca1 9153 * 0 - successful
25985edc 9154 * -ENOMEM - No available memory
da0436e9
JS
9155 **/
9156static int
9157lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9158{
9159 struct lpfc_cq_event *cq_event;
9160 int i;
9161
9162 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9163 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9164 if (!cq_event)
9165 goto out_pool_create_fail;
9166 list_add_tail(&cq_event->list,
9167 &phba->sli4_hba.sp_cqe_event_pool);
9168 }
9169 return 0;
9170
9171out_pool_create_fail:
9172 lpfc_sli4_cq_event_pool_destroy(phba);
9173 return -ENOMEM;
9174}
9175
9176/**
9177 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9178 * @phba: pointer to lpfc hba data structure.
9179 *
9180 * This routine is invoked to free the pool of completion queue events at
9181 * driver unload time. Note that, it is the responsibility of the driver
9182 * cleanup routine to free all the outstanding completion-queue events
9183 * allocated from this pool back into the pool before invoking this routine
9184 * to destroy the pool.
9185 **/
9186static void
9187lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9188{
9189 struct lpfc_cq_event *cq_event, *next_cq_event;
9190
9191 list_for_each_entry_safe(cq_event, next_cq_event,
9192 &phba->sli4_hba.sp_cqe_event_pool, list) {
9193 list_del(&cq_event->list);
9194 kfree(cq_event);
9195 }
9196}
9197
9198/**
9199 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9200 * @phba: pointer to lpfc hba data structure.
9201 *
9202 * This routine is the lock free version of the API invoked to allocate a
9203 * completion-queue event from the free pool.
9204 *
9205 * Return: Pointer to the newly allocated completion-queue event if successful
9206 * NULL otherwise.
9207 **/
9208struct lpfc_cq_event *
9209__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9210{
9211 struct lpfc_cq_event *cq_event = NULL;
9212
9213 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9214 struct lpfc_cq_event, list);
9215 return cq_event;
9216}
9217
9218/**
9219 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9220 * @phba: pointer to lpfc hba data structure.
9221 *
9222 * This routine is the lock version of the API invoked to allocate a
9223 * completion-queue event from the free pool.
9224 *
9225 * Return: Pointer to the newly allocated completion-queue event if successful
9226 * NULL otherwise.
9227 **/
9228struct lpfc_cq_event *
9229lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9230{
9231 struct lpfc_cq_event *cq_event;
9232 unsigned long iflags;
9233
9234 spin_lock_irqsave(&phba->hbalock, iflags);
9235 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9236 spin_unlock_irqrestore(&phba->hbalock, iflags);
9237 return cq_event;
9238}
9239
9240/**
9241 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9242 * @phba: pointer to lpfc hba data structure.
9243 * @cq_event: pointer to the completion queue event to be freed.
9244 *
9245 * This routine is the lock free version of the API invoked to release a
9246 * completion-queue event back into the free pool.
9247 **/
9248void
9249__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9250 struct lpfc_cq_event *cq_event)
9251{
9252 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9253}
9254
9255/**
9256 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9257 * @phba: pointer to lpfc hba data structure.
9258 * @cq_event: pointer to the completion queue event to be freed.
9259 *
9260 * This routine is the lock version of the API invoked to release a
9261 * completion-queue event back into the free pool.
9262 **/
9263void
9264lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9265 struct lpfc_cq_event *cq_event)
9266{
9267 unsigned long iflags;
9268 spin_lock_irqsave(&phba->hbalock, iflags);
9269 __lpfc_sli4_cq_event_release(phba, cq_event);
9270 spin_unlock_irqrestore(&phba->hbalock, iflags);
9271}
9272
9273/**
9274 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9275 * @phba: pointer to lpfc hba data structure.
9276 *
9277 * This routine is to free all the pending completion-queue events to the
9278 * back into the free pool for device reset.
9279 **/
9280static void
9281lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9282{
9283 LIST_HEAD(cqelist);
9284 struct lpfc_cq_event *cqe;
9285 unsigned long iflags;
9286
9287 /* Retrieve all the pending WCQEs from pending WCQE lists */
9288 spin_lock_irqsave(&phba->hbalock, iflags);
9289 /* Pending FCP XRI abort events */
9290 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9291 &cqelist);
9292 /* Pending ELS XRI abort events */
9293 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9294 &cqelist);
9295 /* Pending asynnc events */
9296 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9297 &cqelist);
9298 spin_unlock_irqrestore(&phba->hbalock, iflags);
9299
9300 while (!list_empty(&cqelist)) {
9301 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9302 lpfc_sli4_cq_event_release(phba, cqe);
9303 }
9304}
9305
9306/**
9307 * lpfc_pci_function_reset - Reset pci function.
9308 * @phba: pointer to lpfc hba data structure.
9309 *
9310 * This routine is invoked to request a PCI function reset. It will destroys
9311 * all resources assigned to the PCI function which originates this request.
9312 *
9313 * Return codes
af901ca1 9314 * 0 - successful
25985edc 9315 * -ENOMEM - No available memory
d439d286 9316 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9317 **/
9318int
9319lpfc_pci_function_reset(struct lpfc_hba *phba)
9320{
9321 LPFC_MBOXQ_t *mboxq;
2fcee4bf 9322 uint32_t rc = 0, if_type;
da0436e9 9323 uint32_t shdr_status, shdr_add_status;
2f6fa2c9
JS
9324 uint32_t rdy_chk;
9325 uint32_t port_reset = 0;
da0436e9 9326 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 9327 struct lpfc_register reg_data;
2b81f942 9328 uint16_t devid;
da0436e9 9329
2fcee4bf
JS
9330 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9331 switch (if_type) {
9332 case LPFC_SLI_INTF_IF_TYPE_0:
9333 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9334 GFP_KERNEL);
9335 if (!mboxq) {
9336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9337 "0494 Unable to allocate memory for "
9338 "issuing SLI_FUNCTION_RESET mailbox "
9339 "command\n");
9340 return -ENOMEM;
9341 }
da0436e9 9342
2fcee4bf
JS
9343 /* Setup PCI function reset mailbox-ioctl command */
9344 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9345 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9346 LPFC_SLI4_MBX_EMBED);
9347 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9348 shdr = (union lpfc_sli4_cfg_shdr *)
9349 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9350 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9351 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
9352 &shdr->response);
9353 if (rc != MBX_TIMEOUT)
9354 mempool_free(mboxq, phba->mbox_mem_pool);
9355 if (shdr_status || shdr_add_status || rc) {
9356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9357 "0495 SLI_FUNCTION_RESET mailbox "
9358 "failed with status x%x add_status x%x,"
9359 " mbx status x%x\n",
9360 shdr_status, shdr_add_status, rc);
9361 rc = -ENXIO;
9362 }
9363 break;
9364 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 9365 case LPFC_SLI_INTF_IF_TYPE_6:
2f6fa2c9
JS
9366wait:
9367 /*
9368 * Poll the Port Status Register and wait for RDY for
9369 * up to 30 seconds. If the port doesn't respond, treat
9370 * it as an error.
9371 */
77d093fb 9372 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
2f6fa2c9
JS
9373 if (lpfc_readl(phba->sli4_hba.u.if_type2.
9374 STATUSregaddr, &reg_data.word0)) {
9375 rc = -ENODEV;
9376 goto out;
9377 }
9378 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
9379 break;
9380 msleep(20);
9381 }
9382
9383 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
9384 phba->work_status[0] = readl(
9385 phba->sli4_hba.u.if_type2.ERR1regaddr);
9386 phba->work_status[1] = readl(
9387 phba->sli4_hba.u.if_type2.ERR2regaddr);
9388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9389 "2890 Port not ready, port status reg "
9390 "0x%x error 1=0x%x, error 2=0x%x\n",
9391 reg_data.word0,
9392 phba->work_status[0],
9393 phba->work_status[1]);
9394 rc = -ENODEV;
9395 goto out;
9396 }
9397
9398 if (!port_reset) {
9399 /*
9400 * Reset the port now
9401 */
2fcee4bf
JS
9402 reg_data.word0 = 0;
9403 bf_set(lpfc_sliport_ctrl_end, &reg_data,
9404 LPFC_SLIPORT_LITTLE_ENDIAN);
9405 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
9406 LPFC_SLIPORT_INIT_PORT);
9407 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
9408 CTRLregaddr);
8fcb8acd 9409 /* flush */
2b81f942
JS
9410 pci_read_config_word(phba->pcidev,
9411 PCI_DEVICE_ID, &devid);
2fcee4bf 9412
2f6fa2c9
JS
9413 port_reset = 1;
9414 msleep(20);
9415 goto wait;
9416 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
9417 rc = -ENODEV;
9418 goto out;
2fcee4bf
JS
9419 }
9420 break;
2f6fa2c9 9421
2fcee4bf
JS
9422 case LPFC_SLI_INTF_IF_TYPE_1:
9423 default:
9424 break;
da0436e9 9425 }
2fcee4bf 9426
73d91e50 9427out:
2fcee4bf 9428 /* Catch the not-ready port failure after a port reset. */
2f6fa2c9 9429 if (rc) {
229adb0e
JS
9430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9431 "3317 HBA not functional: IP Reset Failed "
2f6fa2c9 9432 "try: echo fw_reset > board_mode\n");
2fcee4bf 9433 rc = -ENODEV;
229adb0e 9434 }
2fcee4bf 9435
da0436e9
JS
9436 return rc;
9437}
9438
da0436e9
JS
9439/**
9440 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
9441 * @phba: pointer to lpfc hba data structure.
9442 *
9443 * This routine is invoked to set up the PCI device memory space for device
9444 * with SLI-4 interface spec.
9445 *
9446 * Return codes
af901ca1 9447 * 0 - successful
da0436e9
JS
9448 * other values - error
9449 **/
9450static int
9451lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9452{
9453 struct pci_dev *pdev;
9454 unsigned long bar0map_len, bar1map_len, bar2map_len;
9455 int error = -ENODEV;
2fcee4bf 9456 uint32_t if_type;
da0436e9
JS
9457
9458 /* Obtain PCI device reference */
9459 if (!phba->pcidev)
9460 return error;
9461 else
9462 pdev = phba->pcidev;
9463
9464 /* Set the device DMA mask size */
8e68597d
MR
9465 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
9466 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
9467 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
9468 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
da0436e9 9469 return error;
8e68597d
MR
9470 }
9471 }
da0436e9 9472
2fcee4bf
JS
9473 /*
9474 * The BARs and register set definitions and offset locations are
9475 * dependent on the if_type.
9476 */
9477 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
9478 &phba->sli4_hba.sli_intf.word0)) {
9479 return error;
9480 }
9481
9482 /* There is no SLI3 failback for SLI4 devices. */
9483 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
9484 LPFC_SLI_INTF_VALID) {
9485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9486 "2894 SLI_INTF reg contents invalid "
9487 "sli_intf reg 0x%x\n",
9488 phba->sli4_hba.sli_intf.word0);
9489 return error;
9490 }
9491
9492 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9493 /*
9494 * Get the bus address of SLI4 device Bar regions and the
9495 * number of bytes required by each mapping. The mapping of the
9496 * particular PCI BARs regions is dependent on the type of
9497 * SLI4 device.
da0436e9 9498 */
f5ca6f2e
JS
9499 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
9500 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
9501 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
2fcee4bf
JS
9502
9503 /*
9504 * Map SLI4 PCI Config Space Register base to a kernel virtual
9505 * addr
9506 */
9507 phba->sli4_hba.conf_regs_memmap_p =
9508 ioremap(phba->pci_bar0_map, bar0map_len);
9509 if (!phba->sli4_hba.conf_regs_memmap_p) {
9510 dev_printk(KERN_ERR, &pdev->dev,
9511 "ioremap failed for SLI4 PCI config "
9512 "registers.\n");
9513 goto out;
9514 }
f5ca6f2e 9515 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
2fcee4bf
JS
9516 /* Set up BAR0 PCI config space register memory map */
9517 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
9518 } else {
9519 phba->pci_bar0_map = pci_resource_start(pdev, 1);
9520 bar0map_len = pci_resource_len(pdev, 1);
27d6ac0a 9521 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
2fcee4bf
JS
9522 dev_printk(KERN_ERR, &pdev->dev,
9523 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
9524 goto out;
9525 }
9526 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 9527 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
9528 if (!phba->sli4_hba.conf_regs_memmap_p) {
9529 dev_printk(KERN_ERR, &pdev->dev,
9530 "ioremap failed for SLI4 PCI config "
9531 "registers.\n");
9532 goto out;
9533 }
9534 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
9535 }
9536
e4b9794e
JS
9537 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9538 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
9539 /*
9540 * Map SLI4 if type 0 HBA Control Register base to a
9541 * kernel virtual address and setup the registers.
9542 */
9543 phba->pci_bar1_map = pci_resource_start(pdev,
9544 PCI_64BIT_BAR2);
9545 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9546 phba->sli4_hba.ctrl_regs_memmap_p =
9547 ioremap(phba->pci_bar1_map,
9548 bar1map_len);
9549 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
9550 dev_err(&pdev->dev,
9551 "ioremap failed for SLI4 HBA "
9552 "control registers.\n");
9553 error = -ENOMEM;
9554 goto out_iounmap_conf;
9555 }
9556 phba->pci_bar2_memmap_p =
9557 phba->sli4_hba.ctrl_regs_memmap_p;
27d6ac0a 9558 lpfc_sli4_bar1_register_memmap(phba, if_type);
e4b9794e
JS
9559 } else {
9560 error = -ENOMEM;
2fcee4bf
JS
9561 goto out_iounmap_conf;
9562 }
da0436e9
JS
9563 }
9564
27d6ac0a
JS
9565 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
9566 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
9567 /*
9568 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
9569 * virtual address and setup the registers.
9570 */
9571 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
9572 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9573 phba->sli4_hba.drbl_regs_memmap_p =
9574 ioremap(phba->pci_bar1_map, bar1map_len);
9575 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9576 dev_err(&pdev->dev,
9577 "ioremap failed for SLI4 HBA doorbell registers.\n");
9578 goto out_iounmap_conf;
9579 }
9580 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
9581 lpfc_sli4_bar1_register_memmap(phba, if_type);
9582 }
9583
e4b9794e
JS
9584 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9585 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
9586 /*
9587 * Map SLI4 if type 0 HBA Doorbell Register base to
9588 * a kernel virtual address and setup the registers.
9589 */
9590 phba->pci_bar2_map = pci_resource_start(pdev,
9591 PCI_64BIT_BAR4);
9592 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9593 phba->sli4_hba.drbl_regs_memmap_p =
9594 ioremap(phba->pci_bar2_map,
9595 bar2map_len);
9596 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9597 dev_err(&pdev->dev,
9598 "ioremap failed for SLI4 HBA"
9599 " doorbell registers.\n");
9600 error = -ENOMEM;
9601 goto out_iounmap_ctrl;
9602 }
9603 phba->pci_bar4_memmap_p =
9604 phba->sli4_hba.drbl_regs_memmap_p;
9605 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
9606 if (error)
9607 goto out_iounmap_all;
9608 } else {
9609 error = -ENOMEM;
2fcee4bf 9610 goto out_iounmap_all;
e4b9794e 9611 }
da0436e9
JS
9612 }
9613
1351e69f
JS
9614 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
9615 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
9616 /*
9617 * Map SLI4 if type 6 HBA DPP Register base to a kernel
9618 * virtual address and setup the registers.
9619 */
9620 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
9621 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9622 phba->sli4_hba.dpp_regs_memmap_p =
9623 ioremap(phba->pci_bar2_map, bar2map_len);
9624 if (!phba->sli4_hba.dpp_regs_memmap_p) {
9625 dev_err(&pdev->dev,
9626 "ioremap failed for SLI4 HBA dpp registers.\n");
9627 goto out_iounmap_ctrl;
9628 }
9629 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
9630 }
9631
b71413dd 9632 /* Set up the EQ/CQ register handeling functions now */
27d6ac0a
JS
9633 switch (if_type) {
9634 case LPFC_SLI_INTF_IF_TYPE_0:
9635 case LPFC_SLI_INTF_IF_TYPE_2:
b71413dd
JS
9636 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
9637 phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
9638 phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
27d6ac0a
JS
9639 break;
9640 case LPFC_SLI_INTF_IF_TYPE_6:
9641 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
9642 phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
9643 phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
9644 break;
9645 default:
9646 break;
b71413dd
JS
9647 }
9648
da0436e9
JS
9649 return 0;
9650
9651out_iounmap_all:
9652 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9653out_iounmap_ctrl:
9654 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9655out_iounmap_conf:
9656 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9657out:
9658 return error;
9659}
9660
9661/**
9662 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
9663 * @phba: pointer to lpfc hba data structure.
9664 *
9665 * This routine is invoked to unset the PCI device memory space for device
9666 * with SLI-4 interface spec.
9667 **/
9668static void
9669lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
9670{
2e90f4b5
JS
9671 uint32_t if_type;
9672 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
da0436e9 9673
2e90f4b5
JS
9674 switch (if_type) {
9675 case LPFC_SLI_INTF_IF_TYPE_0:
9676 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9677 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9678 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9679 break;
9680 case LPFC_SLI_INTF_IF_TYPE_2:
9681 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9682 break;
27d6ac0a
JS
9683 case LPFC_SLI_INTF_IF_TYPE_6:
9684 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9685 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9686 break;
2e90f4b5
JS
9687 case LPFC_SLI_INTF_IF_TYPE_1:
9688 default:
9689 dev_printk(KERN_ERR, &phba->pcidev->dev,
9690 "FATAL - unsupported SLI4 interface type - %d\n",
9691 if_type);
9692 break;
9693 }
da0436e9
JS
9694}
9695
9696/**
9697 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
9698 * @phba: pointer to lpfc hba data structure.
9699 *
9700 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 9701 * with SLI-3 interface specs.
da0436e9
JS
9702 *
9703 * Return codes
af901ca1 9704 * 0 - successful
da0436e9
JS
9705 * other values - error
9706 **/
9707static int
9708lpfc_sli_enable_msix(struct lpfc_hba *phba)
9709{
45ffac19 9710 int rc;
da0436e9
JS
9711 LPFC_MBOXQ_t *pmb;
9712
9713 /* Set up MSI-X multi-message vectors */
45ffac19
CH
9714 rc = pci_alloc_irq_vectors(phba->pcidev,
9715 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
9716 if (rc < 0) {
da0436e9
JS
9717 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9718 "0420 PCI enable MSI-X failed (%d)\n", rc);
029165ac 9719 goto vec_fail_out;
da0436e9 9720 }
45ffac19 9721
da0436e9
JS
9722 /*
9723 * Assign MSI-X vectors to interrupt handlers
9724 */
9725
9726 /* vector-0 is associated to slow-path handler */
45ffac19 9727 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
ed243d37 9728 &lpfc_sli_sp_intr_handler, 0,
da0436e9
JS
9729 LPFC_SP_DRIVER_HANDLER_NAME, phba);
9730 if (rc) {
9731 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9732 "0421 MSI-X slow-path request_irq failed "
9733 "(%d)\n", rc);
9734 goto msi_fail_out;
9735 }
9736
9737 /* vector-1 is associated to fast-path handler */
45ffac19 9738 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
ed243d37 9739 &lpfc_sli_fp_intr_handler, 0,
da0436e9
JS
9740 LPFC_FP_DRIVER_HANDLER_NAME, phba);
9741
9742 if (rc) {
9743 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9744 "0429 MSI-X fast-path request_irq failed "
9745 "(%d)\n", rc);
9746 goto irq_fail_out;
9747 }
9748
9749 /*
9750 * Configure HBA MSI-X attention conditions to messages
9751 */
9752 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9753
9754 if (!pmb) {
9755 rc = -ENOMEM;
9756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9757 "0474 Unable to allocate memory for issuing "
9758 "MBOX_CONFIG_MSI command\n");
9759 goto mem_fail_out;
9760 }
9761 rc = lpfc_config_msi(phba, pmb);
9762 if (rc)
9763 goto mbx_fail_out;
9764 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9765 if (rc != MBX_SUCCESS) {
9766 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
9767 "0351 Config MSI mailbox command failed, "
9768 "mbxCmd x%x, mbxStatus x%x\n",
9769 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
9770 goto mbx_fail_out;
9771 }
9772
9773 /* Free memory allocated for mailbox command */
9774 mempool_free(pmb, phba->mbox_mem_pool);
9775 return rc;
9776
9777mbx_fail_out:
9778 /* Free memory allocated for mailbox command */
9779 mempool_free(pmb, phba->mbox_mem_pool);
9780
9781mem_fail_out:
9782 /* free the irq already requested */
45ffac19 9783 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
da0436e9
JS
9784
9785irq_fail_out:
9786 /* free the irq already requested */
45ffac19 9787 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
da0436e9
JS
9788
9789msi_fail_out:
9790 /* Unconfigure MSI-X capability structure */
45ffac19 9791 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
9792
9793vec_fail_out:
da0436e9
JS
9794 return rc;
9795}
9796
da0436e9
JS
9797/**
9798 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
9799 * @phba: pointer to lpfc hba data structure.
9800 *
9801 * This routine is invoked to enable the MSI interrupt mode to device with
9802 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
9803 * enable the MSI vector. The device driver is responsible for calling the
9804 * request_irq() to register MSI vector with a interrupt the handler, which
9805 * is done in this function.
9806 *
9807 * Return codes
af901ca1 9808 * 0 - successful
da0436e9
JS
9809 * other values - error
9810 */
9811static int
9812lpfc_sli_enable_msi(struct lpfc_hba *phba)
9813{
9814 int rc;
9815
9816 rc = pci_enable_msi(phba->pcidev);
9817 if (!rc)
9818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9819 "0462 PCI enable MSI mode success.\n");
9820 else {
9821 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9822 "0471 PCI enable MSI mode failed (%d)\n", rc);
9823 return rc;
9824 }
9825
9826 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
ed243d37 9827 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
9828 if (rc) {
9829 pci_disable_msi(phba->pcidev);
9830 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9831 "0478 MSI request_irq failed (%d)\n", rc);
9832 }
9833 return rc;
9834}
9835
da0436e9
JS
9836/**
9837 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
9838 * @phba: pointer to lpfc hba data structure.
9839 *
9840 * This routine is invoked to enable device interrupt and associate driver's
9841 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
9842 * spec. Depends on the interrupt mode configured to the driver, the driver
9843 * will try to fallback from the configured interrupt mode to an interrupt
9844 * mode which is supported by the platform, kernel, and device in the order
9845 * of:
9846 * MSI-X -> MSI -> IRQ.
9847 *
9848 * Return codes
af901ca1 9849 * 0 - successful
da0436e9
JS
9850 * other values - error
9851 **/
9852static uint32_t
9853lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9854{
9855 uint32_t intr_mode = LPFC_INTR_ERROR;
9856 int retval;
9857
9858 if (cfg_mode == 2) {
9859 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
9860 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
9861 if (!retval) {
9862 /* Now, try to enable MSI-X interrupt mode */
9863 retval = lpfc_sli_enable_msix(phba);
9864 if (!retval) {
9865 /* Indicate initialization to MSI-X mode */
9866 phba->intr_type = MSIX;
9867 intr_mode = 2;
9868 }
9869 }
9870 }
9871
9872 /* Fallback to MSI if MSI-X initialization failed */
9873 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9874 retval = lpfc_sli_enable_msi(phba);
9875 if (!retval) {
9876 /* Indicate initialization to MSI mode */
9877 phba->intr_type = MSI;
9878 intr_mode = 1;
9879 }
9880 }
9881
9882 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9883 if (phba->intr_type == NONE) {
9884 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9885 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9886 if (!retval) {
9887 /* Indicate initialization to INTx mode */
9888 phba->intr_type = INTx;
9889 intr_mode = 0;
9890 }
9891 }
9892 return intr_mode;
9893}
9894
9895/**
9896 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
9897 * @phba: pointer to lpfc hba data structure.
9898 *
9899 * This routine is invoked to disable device interrupt and disassociate the
9900 * driver's interrupt handler(s) from interrupt vector(s) to device with
9901 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
9902 * release the interrupt vector(s) for the message signaled interrupt.
9903 **/
9904static void
9905lpfc_sli_disable_intr(struct lpfc_hba *phba)
9906{
45ffac19
CH
9907 int nr_irqs, i;
9908
da0436e9 9909 if (phba->intr_type == MSIX)
45ffac19
CH
9910 nr_irqs = LPFC_MSIX_VECTORS;
9911 else
9912 nr_irqs = 1;
9913
9914 for (i = 0; i < nr_irqs; i++)
9915 free_irq(pci_irq_vector(phba->pcidev, i), phba);
9916 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
9917
9918 /* Reset interrupt management states */
9919 phba->intr_type = NONE;
9920 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
9921}
9922
7bb03bbf 9923/**
895427bd 9924 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
7bb03bbf 9925 * @phba: pointer to lpfc hba data structure.
895427bd
JS
9926 * @vectors: number of msix vectors allocated.
9927 *
9928 * The routine will figure out the CPU affinity assignment for every
9929 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
9930 * with a pointer to the CPU mask that defines ALL the CPUs this vector
9931 * can be associated with. If the vector can be unquely associated with
9932 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
9933 * In addition, the CPU to IO channel mapping will be calculated
9934 * and the phba->sli4_hba.cpu_map array will reflect this.
7bb03bbf 9935 */
895427bd
JS
9936static void
9937lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
7bb03bbf
JS
9938{
9939 struct lpfc_vector_map_info *cpup;
895427bd
JS
9940 int index = 0;
9941 int vec = 0;
7bb03bbf 9942 int cpu;
7bb03bbf
JS
9943#ifdef CONFIG_X86
9944 struct cpuinfo_x86 *cpuinfo;
9945#endif
7bb03bbf
JS
9946
9947 /* Init cpu_map array */
9948 memset(phba->sli4_hba.cpu_map, 0xff,
9949 (sizeof(struct lpfc_vector_map_info) *
895427bd 9950 phba->sli4_hba.num_present_cpu));
7bb03bbf
JS
9951
9952 /* Update CPU map with physical id and core id of each CPU */
9953 cpup = phba->sli4_hba.cpu_map;
9954 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
9955#ifdef CONFIG_X86
9956 cpuinfo = &cpu_data(cpu);
9957 cpup->phys_id = cpuinfo->phys_proc_id;
9958 cpup->core_id = cpuinfo->cpu_core_id;
9959#else
9960 /* No distinction between CPUs for other platforms */
9961 cpup->phys_id = 0;
9962 cpup->core_id = 0;
9963#endif
895427bd
JS
9964 cpup->channel_id = index; /* For now round robin */
9965 cpup->irq = pci_irq_vector(phba->pcidev, vec);
9966 vec++;
9967 if (vec >= vectors)
9968 vec = 0;
9969 index++;
9970 if (index >= phba->cfg_fcp_io_channel)
9971 index = 0;
7bb03bbf
JS
9972 cpup++;
9973 }
7bb03bbf
JS
9974}
9975
9976
da0436e9
JS
9977/**
9978 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
9979 * @phba: pointer to lpfc hba data structure.
9980 *
9981 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 9982 * with SLI-4 interface spec.
da0436e9
JS
9983 *
9984 * Return codes
af901ca1 9985 * 0 - successful
da0436e9
JS
9986 * other values - error
9987 **/
9988static int
9989lpfc_sli4_enable_msix(struct lpfc_hba *phba)
9990{
75baf696 9991 int vectors, rc, index;
b83d005e 9992 char *name;
da0436e9
JS
9993
9994 /* Set up MSI-X multi-message vectors */
895427bd 9995 vectors = phba->io_channel_irqs;
45ffac19 9996 if (phba->cfg_fof)
1ba981fd 9997 vectors++;
45ffac19 9998
f358dd0c
JS
9999 rc = pci_alloc_irq_vectors(phba->pcidev,
10000 (phba->nvmet_support) ? 1 : 2,
10001 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
4f871e1b 10002 if (rc < 0) {
da0436e9
JS
10003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10004 "0484 PCI enable MSI-X failed (%d)\n", rc);
029165ac 10005 goto vec_fail_out;
da0436e9 10006 }
4f871e1b 10007 vectors = rc;
75baf696 10008
7bb03bbf 10009 /* Assign MSI-X vectors to interrupt handlers */
67d12733 10010 for (index = 0; index < vectors; index++) {
b83d005e
JS
10011 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
10012 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
10013 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
4305f183 10014 LPFC_DRIVER_HANDLER_NAME"%d", index);
da0436e9 10015
895427bd
JS
10016 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10017 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10018 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
1ba981fd 10019 if (phba->cfg_fof && (index == (vectors - 1)))
45ffac19 10020 rc = request_irq(pci_irq_vector(phba->pcidev, index),
ed243d37 10021 &lpfc_sli4_fof_intr_handler, 0,
b83d005e 10022 name,
895427bd 10023 &phba->sli4_hba.hba_eq_hdl[index]);
1ba981fd 10024 else
45ffac19 10025 rc = request_irq(pci_irq_vector(phba->pcidev, index),
ed243d37 10026 &lpfc_sli4_hba_intr_handler, 0,
b83d005e 10027 name,
895427bd 10028 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9
JS
10029 if (rc) {
10030 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10031 "0486 MSI-X fast-path (%d) "
10032 "request_irq failed (%d)\n", index, rc);
10033 goto cfg_fail_out;
10034 }
10035 }
10036
1ba981fd
JS
10037 if (phba->cfg_fof)
10038 vectors--;
10039
895427bd 10040 if (vectors != phba->io_channel_irqs) {
82c3e9ba
JS
10041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10042 "3238 Reducing IO channels to match number of "
10043 "MSI-X vectors, requested %d got %d\n",
895427bd
JS
10044 phba->io_channel_irqs, vectors);
10045 if (phba->cfg_fcp_io_channel > vectors)
10046 phba->cfg_fcp_io_channel = vectors;
10047 if (phba->cfg_nvme_io_channel > vectors)
10048 phba->cfg_nvme_io_channel = vectors;
10049 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
10050 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10051 else
10052 phba->io_channel_irqs = phba->cfg_nvme_io_channel;
82c3e9ba 10053 }
895427bd 10054 lpfc_cpu_affinity_check(phba, vectors);
7bb03bbf 10055
da0436e9
JS
10056 return rc;
10057
10058cfg_fail_out:
10059 /* free the irq already requested */
895427bd
JS
10060 for (--index; index >= 0; index--)
10061 free_irq(pci_irq_vector(phba->pcidev, index),
10062 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9 10063
da0436e9 10064 /* Unconfigure MSI-X capability structure */
45ffac19 10065 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
10066
10067vec_fail_out:
da0436e9
JS
10068 return rc;
10069}
10070
da0436e9
JS
10071/**
10072 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
10073 * @phba: pointer to lpfc hba data structure.
10074 *
10075 * This routine is invoked to enable the MSI interrupt mode to device with
10076 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
10077 * to enable the MSI vector. The device driver is responsible for calling
10078 * the request_irq() to register MSI vector with a interrupt the handler,
10079 * which is done in this function.
10080 *
10081 * Return codes
af901ca1 10082 * 0 - successful
da0436e9
JS
10083 * other values - error
10084 **/
10085static int
10086lpfc_sli4_enable_msi(struct lpfc_hba *phba)
10087{
10088 int rc, index;
10089
10090 rc = pci_enable_msi(phba->pcidev);
10091 if (!rc)
10092 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10093 "0487 PCI enable MSI mode success.\n");
10094 else {
10095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10096 "0488 PCI enable MSI mode failed (%d)\n", rc);
10097 return rc;
10098 }
10099
10100 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
ed243d37 10101 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
10102 if (rc) {
10103 pci_disable_msi(phba->pcidev);
10104 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10105 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 10106 return rc;
da0436e9
JS
10107 }
10108
895427bd
JS
10109 for (index = 0; index < phba->io_channel_irqs; index++) {
10110 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10111 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
da0436e9
JS
10112 }
10113
1ba981fd 10114 if (phba->cfg_fof) {
895427bd
JS
10115 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10116 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
1ba981fd 10117 }
75baf696 10118 return 0;
da0436e9
JS
10119}
10120
da0436e9
JS
10121/**
10122 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
10123 * @phba: pointer to lpfc hba data structure.
10124 *
10125 * This routine is invoked to enable device interrupt and associate driver's
10126 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
10127 * interface spec. Depends on the interrupt mode configured to the driver,
10128 * the driver will try to fallback from the configured interrupt mode to an
10129 * interrupt mode which is supported by the platform, kernel, and device in
10130 * the order of:
10131 * MSI-X -> MSI -> IRQ.
10132 *
10133 * Return codes
af901ca1 10134 * 0 - successful
da0436e9
JS
10135 * other values - error
10136 **/
10137static uint32_t
10138lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10139{
10140 uint32_t intr_mode = LPFC_INTR_ERROR;
895427bd 10141 int retval, idx;
da0436e9
JS
10142
10143 if (cfg_mode == 2) {
10144 /* Preparation before conf_msi mbox cmd */
10145 retval = 0;
10146 if (!retval) {
10147 /* Now, try to enable MSI-X interrupt mode */
10148 retval = lpfc_sli4_enable_msix(phba);
10149 if (!retval) {
10150 /* Indicate initialization to MSI-X mode */
10151 phba->intr_type = MSIX;
10152 intr_mode = 2;
10153 }
10154 }
10155 }
10156
10157 /* Fallback to MSI if MSI-X initialization failed */
10158 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10159 retval = lpfc_sli4_enable_msi(phba);
10160 if (!retval) {
10161 /* Indicate initialization to MSI mode */
10162 phba->intr_type = MSI;
10163 intr_mode = 1;
10164 }
10165 }
10166
10167 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10168 if (phba->intr_type == NONE) {
10169 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
10170 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10171 if (!retval) {
895427bd
JS
10172 struct lpfc_hba_eq_hdl *eqhdl;
10173
da0436e9
JS
10174 /* Indicate initialization to INTx mode */
10175 phba->intr_type = INTx;
10176 intr_mode = 0;
895427bd
JS
10177
10178 for (idx = 0; idx < phba->io_channel_irqs; idx++) {
10179 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
10180 eqhdl->idx = idx;
10181 eqhdl->phba = phba;
10182 atomic_set(&eqhdl->hba_eq_in_use, 1);
da0436e9 10183 }
1ba981fd 10184 if (phba->cfg_fof) {
895427bd
JS
10185 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
10186 eqhdl->idx = idx;
10187 eqhdl->phba = phba;
10188 atomic_set(&eqhdl->hba_eq_in_use, 1);
1ba981fd 10189 }
da0436e9
JS
10190 }
10191 }
10192 return intr_mode;
10193}
10194
10195/**
10196 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
10197 * @phba: pointer to lpfc hba data structure.
10198 *
10199 * This routine is invoked to disable device interrupt and disassociate
10200 * the driver's interrupt handler(s) from interrupt vector(s) to device
10201 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
10202 * will release the interrupt vector(s) for the message signaled interrupt.
10203 **/
10204static void
10205lpfc_sli4_disable_intr(struct lpfc_hba *phba)
10206{
10207 /* Disable the currently initialized interrupt mode */
45ffac19
CH
10208 if (phba->intr_type == MSIX) {
10209 int index;
10210
10211 /* Free up MSI-X multi-message vectors */
895427bd
JS
10212 for (index = 0; index < phba->io_channel_irqs; index++)
10213 free_irq(pci_irq_vector(phba->pcidev, index),
10214 &phba->sli4_hba.hba_eq_hdl[index]);
45ffac19
CH
10215
10216 if (phba->cfg_fof)
895427bd
JS
10217 free_irq(pci_irq_vector(phba->pcidev, index),
10218 &phba->sli4_hba.hba_eq_hdl[index]);
45ffac19 10219 } else {
da0436e9 10220 free_irq(phba->pcidev->irq, phba);
45ffac19
CH
10221 }
10222
10223 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
10224
10225 /* Reset interrupt management states */
10226 phba->intr_type = NONE;
10227 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
10228}
10229
10230/**
10231 * lpfc_unset_hba - Unset SLI3 hba device initialization
10232 * @phba: pointer to lpfc hba data structure.
10233 *
10234 * This routine is invoked to unset the HBA device initialization steps to
10235 * a device with SLI-3 interface spec.
10236 **/
10237static void
10238lpfc_unset_hba(struct lpfc_hba *phba)
10239{
10240 struct lpfc_vport *vport = phba->pport;
10241 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10242
10243 spin_lock_irq(shost->host_lock);
10244 vport->load_flag |= FC_UNLOADING;
10245 spin_unlock_irq(shost->host_lock);
10246
72859909
JS
10247 kfree(phba->vpi_bmask);
10248 kfree(phba->vpi_ids);
10249
da0436e9
JS
10250 lpfc_stop_hba_timers(phba);
10251
10252 phba->pport->work_port_events = 0;
10253
10254 lpfc_sli_hba_down(phba);
10255
10256 lpfc_sli_brdrestart(phba);
10257
10258 lpfc_sli_disable_intr(phba);
10259
10260 return;
10261}
10262
5af5eee7
JS
10263/**
10264 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
10265 * @phba: Pointer to HBA context object.
10266 *
10267 * This function is called in the SLI4 code path to wait for completion
10268 * of device's XRIs exchange busy. It will check the XRI exchange busy
10269 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
10270 * that, it will check the XRI exchange busy on outstanding FCP and ELS
10271 * I/Os every 30 seconds, log error message, and wait forever. Only when
10272 * all XRI exchange busy complete, the driver unload shall proceed with
10273 * invoking the function reset ioctl mailbox command to the CNA and the
10274 * the rest of the driver unload resource release.
10275 **/
10276static void
10277lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
10278{
10279 int wait_time = 0;
895427bd 10280 int nvme_xri_cmpl = 1;
86c67379 10281 int nvmet_xri_cmpl = 1;
895427bd 10282 int fcp_xri_cmpl = 1;
5af5eee7
JS
10283 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10284
c3725bdc
JS
10285 /* Driver just aborted IOs during the hba_unset process. Pause
10286 * here to give the HBA time to complete the IO and get entries
10287 * into the abts lists.
10288 */
10289 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
10290
10291 /* Wait for NVME pending IO to flush back to transport. */
10292 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10293 lpfc_nvme_wait_for_io_drain(phba);
10294
895427bd
JS
10295 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10296 fcp_xri_cmpl =
10297 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
86c67379 10298 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
10299 nvme_xri_cmpl =
10300 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379
JS
10301 nvmet_xri_cmpl =
10302 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10303 }
895427bd 10304
f358dd0c
JS
10305 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
10306 !nvmet_xri_cmpl) {
5af5eee7 10307 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
895427bd
JS
10308 if (!nvme_xri_cmpl)
10309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10310 "6100 NVME XRI exchange busy "
10311 "wait time: %d seconds.\n",
10312 wait_time/1000);
5af5eee7
JS
10313 if (!fcp_xri_cmpl)
10314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10315 "2877 FCP XRI exchange busy "
10316 "wait time: %d seconds.\n",
10317 wait_time/1000);
10318 if (!els_xri_cmpl)
10319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10320 "2878 ELS XRI exchange busy "
10321 "wait time: %d seconds.\n",
10322 wait_time/1000);
10323 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
10324 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
10325 } else {
10326 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
10327 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
10328 }
86c67379 10329 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
10330 nvme_xri_cmpl = list_empty(
10331 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
86c67379
JS
10332 nvmet_xri_cmpl = list_empty(
10333 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10334 }
895427bd
JS
10335
10336 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10337 fcp_xri_cmpl = list_empty(
10338 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
10339
5af5eee7
JS
10340 els_xri_cmpl =
10341 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 10342
5af5eee7
JS
10343 }
10344}
10345
da0436e9
JS
10346/**
10347 * lpfc_sli4_hba_unset - Unset the fcoe hba
10348 * @phba: Pointer to HBA context object.
10349 *
10350 * This function is called in the SLI4 code path to reset the HBA's FCoE
10351 * function. The caller is not required to hold any lock. This routine
10352 * issues PCI function reset mailbox command to reset the FCoE function.
10353 * At the end of the function, it calls lpfc_hba_down_post function to
10354 * free any pending commands.
10355 **/
10356static void
10357lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10358{
10359 int wait_cnt = 0;
10360 LPFC_MBOXQ_t *mboxq;
912e3acd 10361 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
10362
10363 lpfc_stop_hba_timers(phba);
10364 phba->sli4_hba.intr_enable = 0;
10365
10366 /*
10367 * Gracefully wait out the potential current outstanding asynchronous
10368 * mailbox command.
10369 */
10370
10371 /* First, block any pending async mailbox command from posted */
10372 spin_lock_irq(&phba->hbalock);
10373 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10374 spin_unlock_irq(&phba->hbalock);
10375 /* Now, trying to wait it out if we can */
10376 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10377 msleep(10);
10378 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
10379 break;
10380 }
10381 /* Forcefully release the outstanding mailbox command if timed out */
10382 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10383 spin_lock_irq(&phba->hbalock);
10384 mboxq = phba->sli.mbox_active;
10385 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10386 __lpfc_mbox_cmpl_put(phba, mboxq);
10387 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10388 phba->sli.mbox_active = NULL;
10389 spin_unlock_irq(&phba->hbalock);
10390 }
10391
5af5eee7
JS
10392 /* Abort all iocbs associated with the hba */
10393 lpfc_sli_hba_iocb_abort(phba);
10394
10395 /* Wait for completion of device XRI exchange busy */
10396 lpfc_sli4_xri_exchange_busy_wait(phba);
10397
da0436e9
JS
10398 /* Disable PCI subsystem interrupt */
10399 lpfc_sli4_disable_intr(phba);
10400
912e3acd
JS
10401 /* Disable SR-IOV if enabled */
10402 if (phba->cfg_sriov_nr_virtfn)
10403 pci_disable_sriov(pdev);
10404
da0436e9
JS
10405 /* Stop kthread signal shall trigger work_done one more time */
10406 kthread_stop(phba->worker_thread);
10407
d1f525aa
JS
10408 /* Unset the queues shared with the hardware then release all
10409 * allocated resources.
10410 */
10411 lpfc_sli4_queue_unset(phba);
10412 lpfc_sli4_queue_destroy(phba);
10413
3677a3a7
JS
10414 /* Reset SLI4 HBA FCoE function */
10415 lpfc_pci_function_reset(phba);
10416
da0436e9
JS
10417 /* Stop the SLI4 device port */
10418 phba->pport->work_port_events = 0;
10419}
10420
28baac74
JS
10421 /**
10422 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
10423 * @phba: Pointer to HBA context object.
10424 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10425 *
10426 * This function is called in the SLI4 code path to read the port's
10427 * sli4 capabilities.
10428 *
10429 * This function may be be called from any context that can block-wait
10430 * for the completion. The expectation is that this routine is called
10431 * typically from probe_one or from the online routine.
10432 **/
10433int
10434lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10435{
10436 int rc;
10437 struct lpfc_mqe *mqe;
10438 struct lpfc_pc_sli4_params *sli4_params;
10439 uint32_t mbox_tmo;
10440
10441 rc = 0;
10442 mqe = &mboxq->u.mqe;
10443
10444 /* Read the port's SLI4 Parameters port capabilities */
fedd3b7b 10445 lpfc_pc_sli4_params(mboxq);
28baac74
JS
10446 if (!phba->sli4_hba.intr_enable)
10447 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10448 else {
a183a15f 10449 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
28baac74
JS
10450 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10451 }
10452
10453 if (unlikely(rc))
10454 return 1;
10455
10456 sli4_params = &phba->sli4_hba.pc_sli4_params;
10457 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
10458 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
10459 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
10460 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
10461 &mqe->un.sli4_params);
10462 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
10463 &mqe->un.sli4_params);
10464 sli4_params->proto_types = mqe->un.sli4_params.word3;
10465 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
10466 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
10467 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
10468 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
10469 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
10470 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
10471 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
10472 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
10473 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
10474 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
10475 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
10476 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
10477 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
10478 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
10479 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
10480 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
10481 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
10482 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
10483 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
10484 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
0558056c
JS
10485
10486 /* Make sure that sge_supp_len can be handled by the driver */
10487 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10488 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10489
28baac74
JS
10490 return rc;
10491}
10492
fedd3b7b
JS
10493/**
10494 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
10495 * @phba: Pointer to HBA context object.
10496 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10497 *
10498 * This function is called in the SLI4 code path to read the port's
10499 * sli4 capabilities.
10500 *
10501 * This function may be be called from any context that can block-wait
10502 * for the completion. The expectation is that this routine is called
10503 * typically from probe_one or from the online routine.
10504 **/
10505int
10506lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10507{
10508 int rc;
10509 struct lpfc_mqe *mqe = &mboxq->u.mqe;
10510 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 10511 uint32_t mbox_tmo;
fedd3b7b
JS
10512 int length;
10513 struct lpfc_sli4_parameters *mbx_sli4_parameters;
10514
6d368e53
JS
10515 /*
10516 * By default, the driver assumes the SLI4 port requires RPI
10517 * header postings. The SLI4_PARAM response will correct this
10518 * assumption.
10519 */
10520 phba->sli4_hba.rpi_hdrs_in_use = 1;
10521
fedd3b7b
JS
10522 /* Read the port's SLI4 Config Parameters */
10523 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
10524 sizeof(struct lpfc_sli4_cfg_mhdr));
10525 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10526 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
10527 length, LPFC_SLI4_MBX_EMBED);
10528 if (!phba->sli4_hba.intr_enable)
10529 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
10530 else {
10531 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10532 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10533 }
fedd3b7b
JS
10534 if (unlikely(rc))
10535 return rc;
10536 sli4_params = &phba->sli4_hba.pc_sli4_params;
10537 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
10538 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
10539 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
10540 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
10541 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
10542 mbx_sli4_parameters);
10543 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
10544 mbx_sli4_parameters);
10545 if (bf_get(cfg_phwq, mbx_sli4_parameters))
10546 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
10547 else
10548 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
10549 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
10550 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
1ba981fd 10551 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
fedd3b7b
JS
10552 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
10553 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
10554 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
10555 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
0c651878 10556 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
fedd3b7b
JS
10557 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
10558 mbx_sli4_parameters);
895427bd 10559 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
fedd3b7b
JS
10560 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
10561 mbx_sli4_parameters);
6d368e53
JS
10562 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
10563 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
895427bd
JS
10564 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
10565 bf_get(cfg_xib, mbx_sli4_parameters));
10566
10567 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
10568 !phba->nvme_support) {
10569 phba->nvme_support = 0;
10570 phba->nvmet_support = 0;
bcb24f65 10571 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
895427bd
JS
10572 phba->cfg_nvme_io_channel = 0;
10573 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
10575 "6101 Disabling NVME support: "
10576 "Not supported by firmware: %d %d\n",
10577 bf_get(cfg_nvme, mbx_sli4_parameters),
10578 bf_get(cfg_xib, mbx_sli4_parameters));
10579
10580 /* If firmware doesn't support NVME, just use SCSI support */
10581 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
10582 return -ENODEV;
10583 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
10584 }
0558056c 10585
20aefac3
JS
10586 /*
10587 * To support Suppress Response feature we must satisfy 3 conditions.
10588 * lpfc_suppress_rsp module parameter must be set (default).
10589 * In SLI4-Parameters Descriptor:
10590 * Extended Inline Buffers (XIB) must be supported.
10591 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
10592 * (double negative).
10593 */
10594 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
10595 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
f358dd0c 10596 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
20aefac3
JS
10597 else
10598 phba->cfg_suppress_rsp = 0;
f358dd0c 10599
0cf07f84
JS
10600 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
10601 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
10602
0558056c
JS
10603 /* Make sure that sge_supp_len can be handled by the driver */
10604 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10605 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10606
b5c53958 10607 /*
c176ffa0
JS
10608 * Check whether the adapter supports an embedded copy of the
10609 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
10610 * to use this option, 128-byte WQEs must be used.
b5c53958
JS
10611 */
10612 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
10613 phba->fcp_embed_io = 1;
10614 else
10615 phba->fcp_embed_io = 0;
7bdedb34 10616
c176ffa0
JS
10617 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
10618 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
10619 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
10620 phba->enab_exp_wqcq_pages = 1;
10621 else
10622 phba->enab_exp_wqcq_pages = 0;
7bdedb34
JS
10623 /*
10624 * Check if the SLI port supports MDS Diagnostics
10625 */
10626 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
10627 phba->mds_diags_support = 1;
10628 else
10629 phba->mds_diags_support = 0;
fedd3b7b
JS
10630 return 0;
10631}
10632
da0436e9
JS
10633/**
10634 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
10635 * @pdev: pointer to PCI device
10636 * @pid: pointer to PCI device identifier
10637 *
10638 * This routine is to be called to attach a device with SLI-3 interface spec
10639 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10640 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10641 * information of the device and driver to see if the driver state that it can
10642 * support this kind of device. If the match is successful, the driver core
10643 * invokes this routine. If this routine determines it can claim the HBA, it
10644 * does all the initialization that it needs to do to handle the HBA properly.
10645 *
10646 * Return code
10647 * 0 - driver can claim the device
10648 * negative value - driver can not claim the device
10649 **/
6f039790 10650static int
da0436e9
JS
10651lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
10652{
10653 struct lpfc_hba *phba;
10654 struct lpfc_vport *vport = NULL;
6669f9bb 10655 struct Scsi_Host *shost = NULL;
da0436e9
JS
10656 int error;
10657 uint32_t cfg_mode, intr_mode;
10658
10659 /* Allocate memory for HBA structure */
10660 phba = lpfc_hba_alloc(pdev);
10661 if (!phba)
10662 return -ENOMEM;
10663
10664 /* Perform generic PCI device enabling operation */
10665 error = lpfc_enable_pci_dev(phba);
079b5c91 10666 if (error)
da0436e9 10667 goto out_free_phba;
da0436e9
JS
10668
10669 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
10670 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
10671 if (error)
10672 goto out_disable_pci_dev;
10673
10674 /* Set up SLI-3 specific device PCI memory space */
10675 error = lpfc_sli_pci_mem_setup(phba);
10676 if (error) {
10677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10678 "1402 Failed to set up pci memory space.\n");
10679 goto out_disable_pci_dev;
10680 }
10681
da0436e9
JS
10682 /* Set up SLI-3 specific device driver resources */
10683 error = lpfc_sli_driver_resource_setup(phba);
10684 if (error) {
10685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10686 "1404 Failed to set up driver resource.\n");
10687 goto out_unset_pci_mem_s3;
10688 }
10689
10690 /* Initialize and populate the iocb list per host */
d1f525aa 10691
da0436e9
JS
10692 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
10693 if (error) {
10694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10695 "1405 Failed to initialize iocb list.\n");
10696 goto out_unset_driver_resource_s3;
10697 }
10698
10699 /* Set up common device driver resources */
10700 error = lpfc_setup_driver_resource_phase2(phba);
10701 if (error) {
10702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10703 "1406 Failed to set up driver resource.\n");
10704 goto out_free_iocb_list;
10705 }
10706
079b5c91
JS
10707 /* Get the default values for Model Name and Description */
10708 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10709
da0436e9
JS
10710 /* Create SCSI host to the physical port */
10711 error = lpfc_create_shost(phba);
10712 if (error) {
10713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10714 "1407 Failed to create scsi host.\n");
10715 goto out_unset_driver_resource;
10716 }
10717
10718 /* Configure sysfs attributes */
10719 vport = phba->pport;
10720 error = lpfc_alloc_sysfs_attr(vport);
10721 if (error) {
10722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10723 "1476 Failed to allocate sysfs attr\n");
10724 goto out_destroy_shost;
10725 }
10726
6669f9bb 10727 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
10728 /* Now, trying to enable interrupt and bring up the device */
10729 cfg_mode = phba->cfg_use_msi;
10730 while (true) {
10731 /* Put device to a known state before enabling interrupt */
10732 lpfc_stop_port(phba);
10733 /* Configure and enable interrupt */
10734 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
10735 if (intr_mode == LPFC_INTR_ERROR) {
10736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10737 "0431 Failed to enable interrupt.\n");
10738 error = -ENODEV;
10739 goto out_free_sysfs_attr;
10740 }
10741 /* SLI-3 HBA setup */
10742 if (lpfc_sli_hba_setup(phba)) {
10743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10744 "1477 Failed to set up hba\n");
10745 error = -ENODEV;
10746 goto out_remove_device;
10747 }
10748
10749 /* Wait 50ms for the interrupts of previous mailbox commands */
10750 msleep(50);
10751 /* Check active interrupts on message signaled interrupts */
10752 if (intr_mode == 0 ||
10753 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
10754 /* Log the current active interrupt mode */
10755 phba->intr_mode = intr_mode;
10756 lpfc_log_intr_mode(phba, intr_mode);
10757 break;
10758 } else {
10759 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10760 "0447 Configure interrupt mode (%d) "
10761 "failed active interrupt test.\n",
10762 intr_mode);
10763 /* Disable the current interrupt mode */
10764 lpfc_sli_disable_intr(phba);
10765 /* Try next level of interrupt mode */
10766 cfg_mode = --intr_mode;
10767 }
10768 }
10769
10770 /* Perform post initialization setup */
10771 lpfc_post_init_setup(phba);
10772
10773 /* Check if there are static vports to be created. */
10774 lpfc_create_static_vport(phba);
10775
10776 return 0;
10777
10778out_remove_device:
10779 lpfc_unset_hba(phba);
10780out_free_sysfs_attr:
10781 lpfc_free_sysfs_attr(vport);
10782out_destroy_shost:
10783 lpfc_destroy_shost(phba);
10784out_unset_driver_resource:
10785 lpfc_unset_driver_resource_phase2(phba);
10786out_free_iocb_list:
10787 lpfc_free_iocb_list(phba);
10788out_unset_driver_resource_s3:
10789 lpfc_sli_driver_resource_unset(phba);
10790out_unset_pci_mem_s3:
10791 lpfc_sli_pci_mem_unset(phba);
10792out_disable_pci_dev:
10793 lpfc_disable_pci_dev(phba);
6669f9bb
JS
10794 if (shost)
10795 scsi_host_put(shost);
da0436e9
JS
10796out_free_phba:
10797 lpfc_hba_free(phba);
10798 return error;
10799}
10800
10801/**
10802 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
10803 * @pdev: pointer to PCI device
10804 *
10805 * This routine is to be called to disattach a device with SLI-3 interface
10806 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10807 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10808 * device to be removed from the PCI subsystem properly.
10809 **/
6f039790 10810static void
da0436e9
JS
10811lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10812{
10813 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10814 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10815 struct lpfc_vport **vports;
10816 struct lpfc_hba *phba = vport->phba;
10817 int i;
da0436e9
JS
10818
10819 spin_lock_irq(&phba->hbalock);
10820 vport->load_flag |= FC_UNLOADING;
10821 spin_unlock_irq(&phba->hbalock);
10822
10823 lpfc_free_sysfs_attr(vport);
10824
10825 /* Release all the vports against this physical port */
10826 vports = lpfc_create_vport_work_array(phba);
10827 if (vports != NULL)
587a37f6
JS
10828 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10829 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10830 continue;
da0436e9 10831 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 10832 }
da0436e9
JS
10833 lpfc_destroy_vport_work_array(phba, vports);
10834
10835 /* Remove FC host and then SCSI host with the physical port */
10836 fc_remove_host(shost);
10837 scsi_remove_host(shost);
d613b6a7 10838
da0436e9
JS
10839 lpfc_cleanup(vport);
10840
10841 /*
10842 * Bring down the SLI Layer. This step disable all interrupts,
10843 * clears the rings, discards all mailbox commands, and resets
10844 * the HBA.
10845 */
10846
48e34d0f 10847 /* HBA interrupt will be disabled after this call */
da0436e9
JS
10848 lpfc_sli_hba_down(phba);
10849 /* Stop kthread signal shall trigger work_done one more time */
10850 kthread_stop(phba->worker_thread);
10851 /* Final cleanup of txcmplq and reset the HBA */
10852 lpfc_sli_brdrestart(phba);
10853
72859909
JS
10854 kfree(phba->vpi_bmask);
10855 kfree(phba->vpi_ids);
10856
da0436e9
JS
10857 lpfc_stop_hba_timers(phba);
10858 spin_lock_irq(&phba->hbalock);
10859 list_del_init(&vport->listentry);
10860 spin_unlock_irq(&phba->hbalock);
10861
10862 lpfc_debugfs_terminate(vport);
10863
912e3acd
JS
10864 /* Disable SR-IOV if enabled */
10865 if (phba->cfg_sriov_nr_virtfn)
10866 pci_disable_sriov(pdev);
10867
da0436e9
JS
10868 /* Disable interrupt */
10869 lpfc_sli_disable_intr(phba);
10870
da0436e9
JS
10871 scsi_host_put(shost);
10872
10873 /*
10874 * Call scsi_free before mem_free since scsi bufs are released to their
10875 * corresponding pools here.
10876 */
10877 lpfc_scsi_free(phba);
10878 lpfc_mem_free_all(phba);
10879
10880 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
10881 phba->hbqslimp.virt, phba->hbqslimp.phys);
10882
10883 /* Free resources associated with SLI2 interface */
10884 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
10885 phba->slim2p.virt, phba->slim2p.phys);
10886
10887 /* unmap adapter SLIM and Control Registers */
10888 iounmap(phba->ctrl_regs_memmap_p);
10889 iounmap(phba->slim_memmap_p);
10890
10891 lpfc_hba_free(phba);
10892
e0c0483c 10893 pci_release_mem_regions(pdev);
da0436e9
JS
10894 pci_disable_device(pdev);
10895}
10896
10897/**
10898 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
10899 * @pdev: pointer to PCI device
10900 * @msg: power management message
10901 *
10902 * This routine is to be called from the kernel's PCI subsystem to support
10903 * system Power Management (PM) to device with SLI-3 interface spec. When
10904 * PM invokes this method, it quiesces the device by stopping the driver's
10905 * worker thread for the device, turning off device's interrupt and DMA,
10906 * and bring the device offline. Note that as the driver implements the
10907 * minimum PM requirements to a power-aware driver's PM support for the
10908 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10909 * to the suspend() method call will be treated as SUSPEND and the driver will
10910 * fully reinitialize its device during resume() method call, the driver will
10911 * set device to PCI_D3hot state in PCI config space instead of setting it
10912 * according to the @msg provided by the PM.
10913 *
10914 * Return code
10915 * 0 - driver suspended the device
10916 * Error otherwise
10917 **/
10918static int
10919lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
10920{
10921 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10922 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10923
10924 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10925 "0473 PCI device Power Management suspend.\n");
10926
10927 /* Bring down the device */
618a5230 10928 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
10929 lpfc_offline(phba);
10930 kthread_stop(phba->worker_thread);
10931
10932 /* Disable interrupt from device */
10933 lpfc_sli_disable_intr(phba);
10934
10935 /* Save device state to PCI config space */
10936 pci_save_state(pdev);
10937 pci_set_power_state(pdev, PCI_D3hot);
10938
10939 return 0;
10940}
10941
10942/**
10943 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
10944 * @pdev: pointer to PCI device
10945 *
10946 * This routine is to be called from the kernel's PCI subsystem to support
10947 * system Power Management (PM) to device with SLI-3 interface spec. When PM
10948 * invokes this method, it restores the device's PCI config space state and
10949 * fully reinitializes the device and brings it online. Note that as the
10950 * driver implements the minimum PM requirements to a power-aware driver's
10951 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
10952 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
10953 * driver will fully reinitialize its device during resume() method call,
10954 * the device will be set to PCI_D0 directly in PCI config space before
10955 * restoring the state.
10956 *
10957 * Return code
10958 * 0 - driver suspended the device
10959 * Error otherwise
10960 **/
10961static int
10962lpfc_pci_resume_one_s3(struct pci_dev *pdev)
10963{
10964 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10965 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10966 uint32_t intr_mode;
10967 int error;
10968
10969 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10970 "0452 PCI device Power Management resume.\n");
10971
10972 /* Restore device state from PCI config space */
10973 pci_set_power_state(pdev, PCI_D0);
10974 pci_restore_state(pdev);
0d878419 10975
1dfb5a47
JS
10976 /*
10977 * As the new kernel behavior of pci_restore_state() API call clears
10978 * device saved_state flag, need to save the restored state again.
10979 */
10980 pci_save_state(pdev);
10981
da0436e9
JS
10982 if (pdev->is_busmaster)
10983 pci_set_master(pdev);
10984
10985 /* Startup the kernel thread for this host adapter. */
10986 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10987 "lpfc_worker_%d", phba->brd_no);
10988 if (IS_ERR(phba->worker_thread)) {
10989 error = PTR_ERR(phba->worker_thread);
10990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10991 "0434 PM resume failed to start worker "
10992 "thread: error=x%x.\n", error);
10993 return error;
10994 }
10995
10996 /* Configure and enable interrupt */
10997 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10998 if (intr_mode == LPFC_INTR_ERROR) {
10999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11000 "0430 PM resume Failed to enable interrupt\n");
11001 return -EIO;
11002 } else
11003 phba->intr_mode = intr_mode;
11004
11005 /* Restart HBA and bring it online */
11006 lpfc_sli_brdrestart(phba);
11007 lpfc_online(phba);
11008
11009 /* Log the current active interrupt mode */
11010 lpfc_log_intr_mode(phba, phba->intr_mode);
11011
11012 return 0;
11013}
11014
891478a2
JS
11015/**
11016 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
11017 * @phba: pointer to lpfc hba data structure.
11018 *
11019 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 11020 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
11021 **/
11022static void
11023lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
11024{
11025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11026 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
11027
11028 /*
11029 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11030 * and let the SCSI mid-layer to retry them to recover.
11031 */
db55fba8 11032 lpfc_sli_abort_fcp_rings(phba);
891478a2
JS
11033}
11034
0d878419
JS
11035/**
11036 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
11037 * @phba: pointer to lpfc hba data structure.
11038 *
11039 * This routine is called to prepare the SLI3 device for PCI slot reset. It
11040 * disables the device interrupt and pci device, and aborts the internal FCP
11041 * pending I/Os.
11042 **/
11043static void
11044lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
11045{
0d878419 11046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 11047 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 11048
75baf696 11049 /* Block any management I/Os to the device */
618a5230 11050 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
75baf696 11051
e2af0d2e
JS
11052 /* Block all SCSI devices' I/Os on the host */
11053 lpfc_scsi_dev_block(phba);
11054
ea714f3d
JS
11055 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11056 lpfc_sli_flush_fcp_rings(phba);
11057
e2af0d2e
JS
11058 /* stop all timers */
11059 lpfc_stop_hba_timers(phba);
11060
0d878419
JS
11061 /* Disable interrupt and pci device */
11062 lpfc_sli_disable_intr(phba);
11063 pci_disable_device(phba->pcidev);
0d878419
JS
11064}
11065
11066/**
11067 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
11068 * @phba: pointer to lpfc hba data structure.
11069 *
11070 * This routine is called to prepare the SLI3 device for PCI slot permanently
11071 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
11072 * pending I/Os.
11073 **/
11074static void
75baf696 11075lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419
JS
11076{
11077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 11078 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
11079 /* Block all SCSI devices' I/Os on the host */
11080 lpfc_scsi_dev_block(phba);
11081
11082 /* stop all timers */
11083 lpfc_stop_hba_timers(phba);
11084
0d878419
JS
11085 /* Clean up all driver's outstanding SCSI I/Os */
11086 lpfc_sli_flush_fcp_rings(phba);
11087}
11088
da0436e9
JS
11089/**
11090 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
11091 * @pdev: pointer to PCI device.
11092 * @state: the current PCI connection state.
11093 *
11094 * This routine is called from the PCI subsystem for I/O error handling to
11095 * device with SLI-3 interface spec. This function is called by the PCI
11096 * subsystem after a PCI bus error affecting this device has been detected.
11097 * When this function is invoked, it will need to stop all the I/Os and
11098 * interrupt(s) to the device. Once that is done, it will return
11099 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
11100 * as desired.
11101 *
11102 * Return codes
0d878419 11103 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
11104 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11105 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11106 **/
11107static pci_ers_result_t
11108lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
11109{
11110 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11111 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 11112
0d878419
JS
11113 switch (state) {
11114 case pci_channel_io_normal:
891478a2
JS
11115 /* Non-fatal error, prepare for recovery */
11116 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
11117 return PCI_ERS_RESULT_CAN_RECOVER;
11118 case pci_channel_io_frozen:
11119 /* Fatal error, prepare for slot reset */
11120 lpfc_sli_prep_dev_for_reset(phba);
11121 return PCI_ERS_RESULT_NEED_RESET;
11122 case pci_channel_io_perm_failure:
11123 /* Permanent failure, prepare for device down */
75baf696 11124 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 11125 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
11126 default:
11127 /* Unknown state, prepare and request slot reset */
11128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11129 "0472 Unknown PCI error state: x%x\n", state);
11130 lpfc_sli_prep_dev_for_reset(phba);
11131 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 11132 }
da0436e9
JS
11133}
11134
11135/**
11136 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
11137 * @pdev: pointer to PCI device.
11138 *
11139 * This routine is called from the PCI subsystem for error handling to
11140 * device with SLI-3 interface spec. This is called after PCI bus has been
11141 * reset to restart the PCI card from scratch, as if from a cold-boot.
11142 * During the PCI subsystem error recovery, after driver returns
11143 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
11144 * recovery and then call this routine before calling the .resume method
11145 * to recover the device. This function will initialize the HBA device,
11146 * enable the interrupt, but it will just put the HBA to offline state
11147 * without passing any I/O traffic.
11148 *
11149 * Return codes
11150 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11151 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11152 */
11153static pci_ers_result_t
11154lpfc_io_slot_reset_s3(struct pci_dev *pdev)
11155{
11156 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11157 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11158 struct lpfc_sli *psli = &phba->sli;
11159 uint32_t intr_mode;
11160
11161 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
11162 if (pci_enable_device_mem(pdev)) {
11163 printk(KERN_ERR "lpfc: Cannot re-enable "
11164 "PCI device after reset.\n");
11165 return PCI_ERS_RESULT_DISCONNECT;
11166 }
11167
11168 pci_restore_state(pdev);
1dfb5a47
JS
11169
11170 /*
11171 * As the new kernel behavior of pci_restore_state() API call clears
11172 * device saved_state flag, need to save the restored state again.
11173 */
11174 pci_save_state(pdev);
11175
da0436e9
JS
11176 if (pdev->is_busmaster)
11177 pci_set_master(pdev);
11178
11179 spin_lock_irq(&phba->hbalock);
11180 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
11181 spin_unlock_irq(&phba->hbalock);
11182
11183 /* Configure and enable interrupt */
11184 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
11185 if (intr_mode == LPFC_INTR_ERROR) {
11186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11187 "0427 Cannot re-enable interrupt after "
11188 "slot reset.\n");
11189 return PCI_ERS_RESULT_DISCONNECT;
11190 } else
11191 phba->intr_mode = intr_mode;
11192
75baf696 11193 /* Take device offline, it will perform cleanup */
618a5230 11194 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
11195 lpfc_offline(phba);
11196 lpfc_sli_brdrestart(phba);
11197
11198 /* Log the current active interrupt mode */
11199 lpfc_log_intr_mode(phba, phba->intr_mode);
11200
11201 return PCI_ERS_RESULT_RECOVERED;
11202}
11203
11204/**
11205 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
11206 * @pdev: pointer to PCI device
11207 *
11208 * This routine is called from the PCI subsystem for error handling to device
11209 * with SLI-3 interface spec. It is called when kernel error recovery tells
11210 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
11211 * error recovery. After this call, traffic can start to flow from this device
11212 * again.
11213 */
11214static void
11215lpfc_io_resume_s3(struct pci_dev *pdev)
11216{
11217 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11218 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 11219
e2af0d2e 11220 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9 11221 lpfc_online(phba);
0d878419
JS
11222
11223 /* Clean up Advanced Error Reporting (AER) if needed */
11224 if (phba->hba_flag & HBA_AER_ENABLED)
11225 pci_cleanup_aer_uncorrect_error_status(pdev);
da0436e9 11226}
3772a991 11227
da0436e9
JS
11228/**
11229 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
11230 * @phba: pointer to lpfc hba data structure.
11231 *
11232 * returns the number of ELS/CT IOCBs to reserve
11233 **/
11234int
11235lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
11236{
11237 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
11238
f1126688
JS
11239 if (phba->sli_rev == LPFC_SLI_REV4) {
11240 if (max_xri <= 100)
6a9c52cf 11241 return 10;
f1126688 11242 else if (max_xri <= 256)
6a9c52cf 11243 return 25;
f1126688 11244 else if (max_xri <= 512)
6a9c52cf 11245 return 50;
f1126688 11246 else if (max_xri <= 1024)
6a9c52cf 11247 return 100;
8a9d2e80 11248 else if (max_xri <= 1536)
6a9c52cf 11249 return 150;
8a9d2e80
JS
11250 else if (max_xri <= 2048)
11251 return 200;
11252 else
11253 return 250;
f1126688
JS
11254 } else
11255 return 0;
3772a991
JS
11256}
11257
895427bd
JS
11258/**
11259 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
11260 * @phba: pointer to lpfc hba data structure.
11261 *
f358dd0c 11262 * returns the number of ELS/CT + NVMET IOCBs to reserve
895427bd
JS
11263 **/
11264int
11265lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
11266{
11267 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
11268
f358dd0c
JS
11269 if (phba->nvmet_support)
11270 max_xri += LPFC_NVMET_BUF_POST;
895427bd
JS
11271 return max_xri;
11272}
11273
11274
52d52440
JS
11275/**
11276 * lpfc_write_firmware - attempt to write a firmware image to the port
52d52440 11277 * @fw: pointer to firmware image returned from request_firmware.
ce396282 11278 * @phba: pointer to lpfc hba data structure.
52d52440 11279 *
52d52440 11280 **/
ce396282
JS
11281static void
11282lpfc_write_firmware(const struct firmware *fw, void *context)
52d52440 11283{
ce396282 11284 struct lpfc_hba *phba = (struct lpfc_hba *)context;
6b5151fd 11285 char fwrev[FW_REV_STR_SIZE];
ce396282 11286 struct lpfc_grp_hdr *image;
52d52440
JS
11287 struct list_head dma_buffer_list;
11288 int i, rc = 0;
11289 struct lpfc_dmabuf *dmabuf, *next;
11290 uint32_t offset = 0, temp_offset = 0;
6b6ef5db 11291 uint32_t magic_number, ftype, fid, fsize;
52d52440 11292
c71ab861 11293 /* It can be null in no-wait mode, sanity check */
ce396282
JS
11294 if (!fw) {
11295 rc = -ENXIO;
11296 goto out;
11297 }
11298 image = (struct lpfc_grp_hdr *)fw->data;
11299
6b6ef5db
JS
11300 magic_number = be32_to_cpu(image->magic_number);
11301 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
11302 fid = bf_get_be32(lpfc_grp_hdr_id, image),
11303 fsize = be32_to_cpu(image->size);
11304
52d52440 11305 INIT_LIST_HEAD(&dma_buffer_list);
6b6ef5db
JS
11306 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
11307 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
11308 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
52d52440
JS
11309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11310 "3022 Invalid FW image found. "
efe583c6 11311 "Magic:%x Type:%x ID:%x Size %d %zd\n",
6b6ef5db 11312 magic_number, ftype, fid, fsize, fw->size);
ce396282
JS
11313 rc = -EINVAL;
11314 goto release_out;
52d52440
JS
11315 }
11316 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 11317 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
52d52440 11318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
ce396282 11319 "3023 Updating Firmware, Current Version:%s "
52d52440 11320 "New Version:%s\n",
88a2cfbb 11321 fwrev, image->revision);
52d52440
JS
11322 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
11323 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
11324 GFP_KERNEL);
11325 if (!dmabuf) {
11326 rc = -ENOMEM;
ce396282 11327 goto release_out;
52d52440
JS
11328 }
11329 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
11330 SLI4_PAGE_SIZE,
11331 &dmabuf->phys,
11332 GFP_KERNEL);
11333 if (!dmabuf->virt) {
11334 kfree(dmabuf);
11335 rc = -ENOMEM;
ce396282 11336 goto release_out;
52d52440
JS
11337 }
11338 list_add_tail(&dmabuf->list, &dma_buffer_list);
11339 }
11340 while (offset < fw->size) {
11341 temp_offset = offset;
11342 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 11343 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
11344 memcpy(dmabuf->virt,
11345 fw->data + temp_offset,
079b5c91
JS
11346 fw->size - temp_offset);
11347 temp_offset = fw->size;
52d52440
JS
11348 break;
11349 }
52d52440
JS
11350 memcpy(dmabuf->virt, fw->data + temp_offset,
11351 SLI4_PAGE_SIZE);
88a2cfbb 11352 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
11353 }
11354 rc = lpfc_wr_object(phba, &dma_buffer_list,
11355 (fw->size - offset), &offset);
ce396282
JS
11356 if (rc)
11357 goto release_out;
52d52440
JS
11358 }
11359 rc = offset;
11360 }
ce396282
JS
11361
11362release_out:
52d52440
JS
11363 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
11364 list_del(&dmabuf->list);
11365 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
11366 dmabuf->virt, dmabuf->phys);
11367 kfree(dmabuf);
11368 }
ce396282
JS
11369 release_firmware(fw);
11370out:
11371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
c71ab861 11372 "3024 Firmware update done: %d.\n", rc);
ce396282 11373 return;
52d52440
JS
11374}
11375
c71ab861
JS
11376/**
11377 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
11378 * @phba: pointer to lpfc hba data structure.
11379 *
11380 * This routine is called to perform Linux generic firmware upgrade on device
11381 * that supports such feature.
11382 **/
11383int
11384lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
11385{
11386 uint8_t file_name[ELX_MODEL_NAME_SIZE];
11387 int ret;
11388 const struct firmware *fw;
11389
11390 /* Only supported on SLI4 interface type 2 for now */
27d6ac0a 11391 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
c71ab861
JS
11392 LPFC_SLI_INTF_IF_TYPE_2)
11393 return -EPERM;
11394
11395 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
11396
11397 if (fw_upgrade == INT_FW_UPGRADE) {
11398 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
11399 file_name, &phba->pcidev->dev,
11400 GFP_KERNEL, (void *)phba,
11401 lpfc_write_firmware);
11402 } else if (fw_upgrade == RUN_FW_UPGRADE) {
11403 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
11404 if (!ret)
11405 lpfc_write_firmware(fw, (void *)phba);
11406 } else {
11407 ret = -EINVAL;
11408 }
11409
11410 return ret;
11411}
11412
3772a991 11413/**
da0436e9 11414 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
11415 * @pdev: pointer to PCI device
11416 * @pid: pointer to PCI device identifier
11417 *
da0436e9
JS
11418 * This routine is called from the kernel's PCI subsystem to device with
11419 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 11420 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
11421 * information of the device and driver to see if the driver state that it
11422 * can support this kind of device. If the match is successful, the driver
11423 * core invokes this routine. If this routine determines it can claim the HBA,
11424 * it does all the initialization that it needs to do to handle the HBA
11425 * properly.
3772a991
JS
11426 *
11427 * Return code
11428 * 0 - driver can claim the device
11429 * negative value - driver can not claim the device
11430 **/
6f039790 11431static int
da0436e9 11432lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
11433{
11434 struct lpfc_hba *phba;
11435 struct lpfc_vport *vport = NULL;
6669f9bb 11436 struct Scsi_Host *shost = NULL;
6c621a22 11437 int error;
3772a991
JS
11438 uint32_t cfg_mode, intr_mode;
11439
11440 /* Allocate memory for HBA structure */
11441 phba = lpfc_hba_alloc(pdev);
11442 if (!phba)
11443 return -ENOMEM;
11444
11445 /* Perform generic PCI device enabling operation */
11446 error = lpfc_enable_pci_dev(phba);
079b5c91 11447 if (error)
3772a991 11448 goto out_free_phba;
3772a991 11449
da0436e9
JS
11450 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
11451 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
11452 if (error)
11453 goto out_disable_pci_dev;
11454
da0436e9
JS
11455 /* Set up SLI-4 specific device PCI memory space */
11456 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
11457 if (error) {
11458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11459 "1410 Failed to set up pci memory space.\n");
3772a991
JS
11460 goto out_disable_pci_dev;
11461 }
11462
da0436e9
JS
11463 /* Set up SLI-4 Specific device driver resources */
11464 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
11465 if (error) {
11466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
11467 "1412 Failed to set up driver resource.\n");
11468 goto out_unset_pci_mem_s4;
3772a991
JS
11469 }
11470
19ca7609 11471 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 11472 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 11473
3772a991
JS
11474 /* Set up common device driver resources */
11475 error = lpfc_setup_driver_resource_phase2(phba);
11476 if (error) {
11477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11478 "1414 Failed to set up driver resource.\n");
6c621a22 11479 goto out_unset_driver_resource_s4;
3772a991
JS
11480 }
11481
079b5c91
JS
11482 /* Get the default values for Model Name and Description */
11483 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11484
3772a991
JS
11485 /* Create SCSI host to the physical port */
11486 error = lpfc_create_shost(phba);
11487 if (error) {
11488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11489 "1415 Failed to create scsi host.\n");
3772a991
JS
11490 goto out_unset_driver_resource;
11491 }
9399627f 11492
5b75da2f 11493 /* Configure sysfs attributes */
3772a991
JS
11494 vport = phba->pport;
11495 error = lpfc_alloc_sysfs_attr(vport);
11496 if (error) {
9399627f 11497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11498 "1416 Failed to allocate sysfs attr\n");
3772a991 11499 goto out_destroy_shost;
98c9ea5c 11500 }
875fbdfe 11501
6669f9bb 11502 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
3772a991 11503 /* Now, trying to enable interrupt and bring up the device */
5b75da2f 11504 cfg_mode = phba->cfg_use_msi;
5b75da2f 11505
7b15db32
JS
11506 /* Put device to a known state before enabling interrupt */
11507 lpfc_stop_port(phba);
895427bd 11508
7b15db32
JS
11509 /* Configure and enable interrupt */
11510 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
11511 if (intr_mode == LPFC_INTR_ERROR) {
11512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11513 "0426 Failed to enable interrupt.\n");
11514 error = -ENODEV;
11515 goto out_free_sysfs_attr;
11516 }
11517 /* Default to single EQ for non-MSI-X */
895427bd
JS
11518 if (phba->intr_type != MSIX) {
11519 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
11520 phba->cfg_fcp_io_channel = 1;
2d7dbc4c 11521 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd 11522 phba->cfg_nvme_io_channel = 1;
2d7dbc4c
JS
11523 if (phba->nvmet_support)
11524 phba->cfg_nvmet_mrq = 1;
11525 }
895427bd
JS
11526 phba->io_channel_irqs = 1;
11527 }
11528
7b15db32
JS
11529 /* Set up SLI-4 HBA */
11530 if (lpfc_sli4_hba_setup(phba)) {
11531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11532 "1421 Failed to set up hba\n");
11533 error = -ENODEV;
11534 goto out_disable_intr;
98c9ea5c 11535 }
858c9f6c 11536
7b15db32
JS
11537 /* Log the current active interrupt mode */
11538 phba->intr_mode = intr_mode;
11539 lpfc_log_intr_mode(phba, intr_mode);
11540
3772a991
JS
11541 /* Perform post initialization setup */
11542 lpfc_post_init_setup(phba);
dea3101e 11543
01649561
JS
11544 /* NVME support in FW earlier in the driver load corrects the
11545 * FC4 type making a check for nvme_support unnecessary.
11546 */
11547 if ((phba->nvmet_support == 0) &&
11548 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
11549 /* Create NVME binding with nvme_fc_transport. This
d1f525aa
JS
11550 * ensures the vport is initialized. If the localport
11551 * create fails, it should not unload the driver to
11552 * support field issues.
01649561
JS
11553 */
11554 error = lpfc_nvme_create_localport(vport);
11555 if (error) {
11556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11557 "6004 NVME registration failed, "
11558 "error x%x\n",
11559 error);
01649561
JS
11560 }
11561 }
895427bd 11562
c71ab861
JS
11563 /* check for firmware upgrade or downgrade */
11564 if (phba->cfg_request_firmware_upgrade)
db6f1c2f 11565 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
52d52440 11566
1c6834a7
JS
11567 /* Check if there are static vports to be created. */
11568 lpfc_create_static_vport(phba);
dea3101e 11569 return 0;
11570
da0436e9
JS
11571out_disable_intr:
11572 lpfc_sli4_disable_intr(phba);
5b75da2f
JS
11573out_free_sysfs_attr:
11574 lpfc_free_sysfs_attr(vport);
3772a991
JS
11575out_destroy_shost:
11576 lpfc_destroy_shost(phba);
11577out_unset_driver_resource:
11578 lpfc_unset_driver_resource_phase2(phba);
da0436e9
JS
11579out_unset_driver_resource_s4:
11580 lpfc_sli4_driver_resource_unset(phba);
11581out_unset_pci_mem_s4:
11582 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
11583out_disable_pci_dev:
11584 lpfc_disable_pci_dev(phba);
6669f9bb
JS
11585 if (shost)
11586 scsi_host_put(shost);
2e0fef85 11587out_free_phba:
3772a991 11588 lpfc_hba_free(phba);
dea3101e 11589 return error;
11590}
11591
e59058c4 11592/**
da0436e9 11593 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
11594 * @pdev: pointer to PCI device
11595 *
da0436e9
JS
11596 * This routine is called from the kernel's PCI subsystem to device with
11597 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
11598 * removed from PCI bus, it performs all the necessary cleanup for the HBA
11599 * device to be removed from the PCI subsystem properly.
e59058c4 11600 **/
6f039790 11601static void
da0436e9 11602lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 11603{
da0436e9 11604 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 11605 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 11606 struct lpfc_vport **vports;
da0436e9 11607 struct lpfc_hba *phba = vport->phba;
eada272d 11608 int i;
8a4df120 11609
da0436e9 11610 /* Mark the device unloading flag */
549e55cd 11611 spin_lock_irq(&phba->hbalock);
51ef4c26 11612 vport->load_flag |= FC_UNLOADING;
549e55cd 11613 spin_unlock_irq(&phba->hbalock);
2e0fef85 11614
da0436e9 11615 /* Free the HBA sysfs attributes */
858c9f6c
JS
11616 lpfc_free_sysfs_attr(vport);
11617
eada272d
JS
11618 /* Release all the vports against this physical port */
11619 vports = lpfc_create_vport_work_array(phba);
11620 if (vports != NULL)
587a37f6
JS
11621 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11622 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11623 continue;
eada272d 11624 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 11625 }
eada272d
JS
11626 lpfc_destroy_vport_work_array(phba, vports);
11627
11628 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
11629 fc_remove_host(shost);
11630 scsi_remove_host(shost);
da0436e9 11631
d613b6a7
JS
11632 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
11633 * localports are destroyed after to cleanup all transport memory.
895427bd 11634 */
87af33fe 11635 lpfc_cleanup(vport);
d613b6a7
JS
11636 lpfc_nvmet_destroy_targetport(phba);
11637 lpfc_nvme_destroy_localport(vport);
87af33fe 11638
281d6190
JS
11639 /*
11640 * Bring down the SLI Layer. This step disables all interrupts,
11641 * clears the rings, discards all mailbox commands, and resets
11642 * the HBA FCoE function.
11643 */
11644 lpfc_debugfs_terminate(vport);
11645 lpfc_sli4_hba_unset(phba);
a257bf90 11646
1901762f 11647 lpfc_stop_hba_timers(phba);
858c9f6c
JS
11648 spin_lock_irq(&phba->hbalock);
11649 list_del_init(&vport->listentry);
11650 spin_unlock_irq(&phba->hbalock);
11651
3677a3a7 11652 /* Perform scsi free before driver resource_unset since scsi
da0436e9 11653 * buffers are released to their corresponding pools here.
2e0fef85
JS
11654 */
11655 lpfc_scsi_free(phba);
895427bd 11656 lpfc_nvme_free(phba);
01649561 11657 lpfc_free_iocb_list(phba);
67d12733 11658
da0436e9 11659 lpfc_sli4_driver_resource_unset(phba);
ed957684 11660
da0436e9
JS
11661 /* Unmap adapter Control and Doorbell registers */
11662 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 11663
da0436e9
JS
11664 /* Release PCI resources and disable device's PCI function */
11665 scsi_host_put(shost);
11666 lpfc_disable_pci_dev(phba);
2e0fef85 11667
da0436e9 11668 /* Finally, free the driver's device data structure */
3772a991 11669 lpfc_hba_free(phba);
2e0fef85 11670
da0436e9 11671 return;
dea3101e 11672}
11673
3a55b532 11674/**
da0436e9 11675 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
11676 * @pdev: pointer to PCI device
11677 * @msg: power management message
11678 *
da0436e9
JS
11679 * This routine is called from the kernel's PCI subsystem to support system
11680 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
11681 * this method, it quiesces the device by stopping the driver's worker
11682 * thread for the device, turning off device's interrupt and DMA, and bring
11683 * the device offline. Note that as the driver implements the minimum PM
11684 * requirements to a power-aware driver's PM support for suspend/resume -- all
11685 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
11686 * method call will be treated as SUSPEND and the driver will fully
11687 * reinitialize its device during resume() method call, the driver will set
11688 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 11689 * according to the @msg provided by the PM.
3a55b532
JS
11690 *
11691 * Return code
3772a991
JS
11692 * 0 - driver suspended the device
11693 * Error otherwise
3a55b532
JS
11694 **/
11695static int
da0436e9 11696lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
11697{
11698 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11699 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11700
11701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 11702 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
11703
11704 /* Bring down the device */
618a5230 11705 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3a55b532
JS
11706 lpfc_offline(phba);
11707 kthread_stop(phba->worker_thread);
11708
11709 /* Disable interrupt from device */
da0436e9 11710 lpfc_sli4_disable_intr(phba);
5350d872 11711 lpfc_sli4_queue_destroy(phba);
3a55b532
JS
11712
11713 /* Save device state to PCI config space */
11714 pci_save_state(pdev);
11715 pci_set_power_state(pdev, PCI_D3hot);
11716
11717 return 0;
11718}
11719
11720/**
da0436e9 11721 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
11722 * @pdev: pointer to PCI device
11723 *
da0436e9
JS
11724 * This routine is called from the kernel's PCI subsystem to support system
11725 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
11726 * this method, it restores the device's PCI config space state and fully
11727 * reinitializes the device and brings it online. Note that as the driver
11728 * implements the minimum PM requirements to a power-aware driver's PM for
11729 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11730 * to the suspend() method call will be treated as SUSPEND and the driver
11731 * will fully reinitialize its device during resume() method call, the device
11732 * will be set to PCI_D0 directly in PCI config space before restoring the
11733 * state.
3a55b532
JS
11734 *
11735 * Return code
3772a991
JS
11736 * 0 - driver suspended the device
11737 * Error otherwise
3a55b532
JS
11738 **/
11739static int
da0436e9 11740lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
11741{
11742 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11743 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 11744 uint32_t intr_mode;
3a55b532
JS
11745 int error;
11746
11747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 11748 "0292 PCI device Power Management resume.\n");
3a55b532
JS
11749
11750 /* Restore device state from PCI config space */
11751 pci_set_power_state(pdev, PCI_D0);
11752 pci_restore_state(pdev);
1dfb5a47
JS
11753
11754 /*
11755 * As the new kernel behavior of pci_restore_state() API call clears
11756 * device saved_state flag, need to save the restored state again.
11757 */
11758 pci_save_state(pdev);
11759
3a55b532
JS
11760 if (pdev->is_busmaster)
11761 pci_set_master(pdev);
11762
da0436e9 11763 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
11764 phba->worker_thread = kthread_run(lpfc_do_work, phba,
11765 "lpfc_worker_%d", phba->brd_no);
11766 if (IS_ERR(phba->worker_thread)) {
11767 error = PTR_ERR(phba->worker_thread);
11768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11769 "0293 PM resume failed to start worker "
3a55b532
JS
11770 "thread: error=x%x.\n", error);
11771 return error;
11772 }
11773
5b75da2f 11774 /* Configure and enable interrupt */
da0436e9 11775 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 11776 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 11777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 11778 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
11779 return -EIO;
11780 } else
11781 phba->intr_mode = intr_mode;
3a55b532
JS
11782
11783 /* Restart HBA and bring it online */
11784 lpfc_sli_brdrestart(phba);
11785 lpfc_online(phba);
11786
5b75da2f
JS
11787 /* Log the current active interrupt mode */
11788 lpfc_log_intr_mode(phba, phba->intr_mode);
11789
3a55b532
JS
11790 return 0;
11791}
11792
75baf696
JS
11793/**
11794 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
11795 * @phba: pointer to lpfc hba data structure.
11796 *
11797 * This routine is called to prepare the SLI4 device for PCI slot recover. It
11798 * aborts all the outstanding SCSI I/Os to the pci device.
11799 **/
11800static void
11801lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
11802{
75baf696
JS
11803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11804 "2828 PCI channel I/O abort preparing for recovery\n");
11805 /*
11806 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11807 * and let the SCSI mid-layer to retry them to recover.
11808 */
db55fba8 11809 lpfc_sli_abort_fcp_rings(phba);
75baf696
JS
11810}
11811
11812/**
11813 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
11814 * @phba: pointer to lpfc hba data structure.
11815 *
11816 * This routine is called to prepare the SLI4 device for PCI slot reset. It
11817 * disables the device interrupt and pci device, and aborts the internal FCP
11818 * pending I/Os.
11819 **/
11820static void
11821lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
11822{
11823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11824 "2826 PCI channel disable preparing for reset\n");
11825
11826 /* Block any management I/Os to the device */
618a5230 11827 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
75baf696
JS
11828
11829 /* Block all SCSI devices' I/Os on the host */
11830 lpfc_scsi_dev_block(phba);
11831
ea714f3d
JS
11832 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11833 lpfc_sli_flush_fcp_rings(phba);
11834
c3725bdc
JS
11835 /* Flush the outstanding NVME IOs if fc4 type enabled. */
11836 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11837 lpfc_sli_flush_nvme_rings(phba);
11838
75baf696
JS
11839 /* stop all timers */
11840 lpfc_stop_hba_timers(phba);
11841
11842 /* Disable interrupt and pci device */
11843 lpfc_sli4_disable_intr(phba);
5350d872 11844 lpfc_sli4_queue_destroy(phba);
75baf696 11845 pci_disable_device(phba->pcidev);
75baf696
JS
11846}
11847
11848/**
11849 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
11850 * @phba: pointer to lpfc hba data structure.
11851 *
11852 * This routine is called to prepare the SLI4 device for PCI slot permanently
11853 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
11854 * pending I/Os.
11855 **/
11856static void
11857lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
11858{
11859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11860 "2827 PCI channel permanent disable for failure\n");
11861
11862 /* Block all SCSI devices' I/Os on the host */
11863 lpfc_scsi_dev_block(phba);
11864
11865 /* stop all timers */
11866 lpfc_stop_hba_timers(phba);
11867
11868 /* Clean up all driver's outstanding SCSI I/Os */
11869 lpfc_sli_flush_fcp_rings(phba);
c3725bdc
JS
11870
11871 /* Flush the outstanding NVME IOs if fc4 type enabled. */
11872 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11873 lpfc_sli_flush_nvme_rings(phba);
75baf696
JS
11874}
11875
8d63f375 11876/**
da0436e9 11877 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
11878 * @pdev: pointer to PCI device.
11879 * @state: the current PCI connection state.
8d63f375 11880 *
da0436e9
JS
11881 * This routine is called from the PCI subsystem for error handling to device
11882 * with SLI-4 interface spec. This function is called by the PCI subsystem
11883 * after a PCI bus error affecting this device has been detected. When this
11884 * function is invoked, it will need to stop all the I/Os and interrupt(s)
11885 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
11886 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
11887 *
11888 * Return codes
3772a991
JS
11889 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11890 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 11891 **/
3772a991 11892static pci_ers_result_t
da0436e9 11893lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 11894{
75baf696
JS
11895 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11896 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11897
11898 switch (state) {
11899 case pci_channel_io_normal:
11900 /* Non-fatal error, prepare for recovery */
11901 lpfc_sli4_prep_dev_for_recover(phba);
11902 return PCI_ERS_RESULT_CAN_RECOVER;
11903 case pci_channel_io_frozen:
11904 /* Fatal error, prepare for slot reset */
11905 lpfc_sli4_prep_dev_for_reset(phba);
11906 return PCI_ERS_RESULT_NEED_RESET;
11907 case pci_channel_io_perm_failure:
11908 /* Permanent failure, prepare for device down */
11909 lpfc_sli4_prep_dev_for_perm_failure(phba);
11910 return PCI_ERS_RESULT_DISCONNECT;
11911 default:
11912 /* Unknown state, prepare and request slot reset */
11913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11914 "2825 Unknown PCI error state: x%x\n", state);
11915 lpfc_sli4_prep_dev_for_reset(phba);
11916 return PCI_ERS_RESULT_NEED_RESET;
11917 }
8d63f375
LV
11918}
11919
11920/**
da0436e9 11921 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
11922 * @pdev: pointer to PCI device.
11923 *
da0436e9
JS
11924 * This routine is called from the PCI subsystem for error handling to device
11925 * with SLI-4 interface spec. It is called after PCI bus has been reset to
11926 * restart the PCI card from scratch, as if from a cold-boot. During the
11927 * PCI subsystem error recovery, after the driver returns
3772a991 11928 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
11929 * recovery and then call this routine before calling the .resume method to
11930 * recover the device. This function will initialize the HBA device, enable
11931 * the interrupt, but it will just put the HBA to offline state without
11932 * passing any I/O traffic.
8d63f375 11933 *
e59058c4 11934 * Return codes
3772a991
JS
11935 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11936 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 11937 */
3772a991 11938static pci_ers_result_t
da0436e9 11939lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 11940{
75baf696
JS
11941 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11942 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11943 struct lpfc_sli *psli = &phba->sli;
11944 uint32_t intr_mode;
11945
11946 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
11947 if (pci_enable_device_mem(pdev)) {
11948 printk(KERN_ERR "lpfc: Cannot re-enable "
11949 "PCI device after reset.\n");
11950 return PCI_ERS_RESULT_DISCONNECT;
11951 }
11952
11953 pci_restore_state(pdev);
0a96e975
JS
11954
11955 /*
11956 * As the new kernel behavior of pci_restore_state() API call clears
11957 * device saved_state flag, need to save the restored state again.
11958 */
11959 pci_save_state(pdev);
11960
75baf696
JS
11961 if (pdev->is_busmaster)
11962 pci_set_master(pdev);
11963
11964 spin_lock_irq(&phba->hbalock);
11965 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
11966 spin_unlock_irq(&phba->hbalock);
11967
11968 /* Configure and enable interrupt */
11969 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
11970 if (intr_mode == LPFC_INTR_ERROR) {
11971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11972 "2824 Cannot re-enable interrupt after "
11973 "slot reset.\n");
11974 return PCI_ERS_RESULT_DISCONNECT;
11975 } else
11976 phba->intr_mode = intr_mode;
11977
11978 /* Log the current active interrupt mode */
11979 lpfc_log_intr_mode(phba, phba->intr_mode);
11980
8d63f375
LV
11981 return PCI_ERS_RESULT_RECOVERED;
11982}
11983
11984/**
da0436e9 11985 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 11986 * @pdev: pointer to PCI device
8d63f375 11987 *
3772a991 11988 * This routine is called from the PCI subsystem for error handling to device
da0436e9 11989 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
11990 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
11991 * error recovery. After this call, traffic can start to flow from this device
11992 * again.
da0436e9 11993 **/
3772a991 11994static void
da0436e9 11995lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 11996{
75baf696
JS
11997 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11998 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11999
12000 /*
12001 * In case of slot reset, as function reset is performed through
12002 * mailbox command which needs DMA to be enabled, this operation
12003 * has to be moved to the io resume phase. Taking device offline
12004 * will perform the necessary cleanup.
12005 */
12006 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
12007 /* Perform device reset */
618a5230 12008 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
75baf696
JS
12009 lpfc_offline(phba);
12010 lpfc_sli_brdrestart(phba);
12011 /* Bring the device back online */
12012 lpfc_online(phba);
12013 }
12014
12015 /* Clean up Advanced Error Reporting (AER) if needed */
12016 if (phba->hba_flag & HBA_AER_ENABLED)
12017 pci_cleanup_aer_uncorrect_error_status(pdev);
8d63f375
LV
12018}
12019
3772a991
JS
12020/**
12021 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
12022 * @pdev: pointer to PCI device
12023 * @pid: pointer to PCI device identifier
12024 *
12025 * This routine is to be registered to the kernel's PCI subsystem. When an
12026 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
12027 * at PCI device-specific information of the device and driver to see if the
12028 * driver state that it can support this kind of device. If the match is
12029 * successful, the driver core invokes this routine. This routine dispatches
12030 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
12031 * do all the initialization that it needs to do to handle the HBA device
12032 * properly.
12033 *
12034 * Return code
12035 * 0 - driver can claim the device
12036 * negative value - driver can not claim the device
12037 **/
6f039790 12038static int
3772a991
JS
12039lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
12040{
12041 int rc;
8fa38513 12042 struct lpfc_sli_intf intf;
3772a991 12043
28baac74 12044 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
12045 return -ENODEV;
12046
8fa38513 12047 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 12048 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 12049 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 12050 else
3772a991 12051 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 12052
3772a991
JS
12053 return rc;
12054}
12055
12056/**
12057 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
12058 * @pdev: pointer to PCI device
12059 *
12060 * This routine is to be registered to the kernel's PCI subsystem. When an
12061 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
12062 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
12063 * remove routine, which will perform all the necessary cleanup for the
12064 * device to be removed from the PCI subsystem properly.
12065 **/
6f039790 12066static void
3772a991
JS
12067lpfc_pci_remove_one(struct pci_dev *pdev)
12068{
12069 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12070 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12071
12072 switch (phba->pci_dev_grp) {
12073 case LPFC_PCI_DEV_LP:
12074 lpfc_pci_remove_one_s3(pdev);
12075 break;
da0436e9
JS
12076 case LPFC_PCI_DEV_OC:
12077 lpfc_pci_remove_one_s4(pdev);
12078 break;
3772a991
JS
12079 default:
12080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12081 "1424 Invalid PCI device group: 0x%x\n",
12082 phba->pci_dev_grp);
12083 break;
12084 }
12085 return;
12086}
12087
12088/**
12089 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
12090 * @pdev: pointer to PCI device
12091 * @msg: power management message
12092 *
12093 * This routine is to be registered to the kernel's PCI subsystem to support
12094 * system Power Management (PM). When PM invokes this method, it dispatches
12095 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
12096 * suspend the device.
12097 *
12098 * Return code
12099 * 0 - driver suspended the device
12100 * Error otherwise
12101 **/
12102static int
12103lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
12104{
12105 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12106 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12107 int rc = -ENODEV;
12108
12109 switch (phba->pci_dev_grp) {
12110 case LPFC_PCI_DEV_LP:
12111 rc = lpfc_pci_suspend_one_s3(pdev, msg);
12112 break;
da0436e9
JS
12113 case LPFC_PCI_DEV_OC:
12114 rc = lpfc_pci_suspend_one_s4(pdev, msg);
12115 break;
3772a991
JS
12116 default:
12117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12118 "1425 Invalid PCI device group: 0x%x\n",
12119 phba->pci_dev_grp);
12120 break;
12121 }
12122 return rc;
12123}
12124
12125/**
12126 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
12127 * @pdev: pointer to PCI device
12128 *
12129 * This routine is to be registered to the kernel's PCI subsystem to support
12130 * system Power Management (PM). When PM invokes this method, it dispatches
12131 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
12132 * resume the device.
12133 *
12134 * Return code
12135 * 0 - driver suspended the device
12136 * Error otherwise
12137 **/
12138static int
12139lpfc_pci_resume_one(struct pci_dev *pdev)
12140{
12141 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12142 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12143 int rc = -ENODEV;
12144
12145 switch (phba->pci_dev_grp) {
12146 case LPFC_PCI_DEV_LP:
12147 rc = lpfc_pci_resume_one_s3(pdev);
12148 break;
da0436e9
JS
12149 case LPFC_PCI_DEV_OC:
12150 rc = lpfc_pci_resume_one_s4(pdev);
12151 break;
3772a991
JS
12152 default:
12153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12154 "1426 Invalid PCI device group: 0x%x\n",
12155 phba->pci_dev_grp);
12156 break;
12157 }
12158 return rc;
12159}
12160
12161/**
12162 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
12163 * @pdev: pointer to PCI device.
12164 * @state: the current PCI connection state.
12165 *
12166 * This routine is registered to the PCI subsystem for error handling. This
12167 * function is called by the PCI subsystem after a PCI bus error affecting
12168 * this device has been detected. When this routine is invoked, it dispatches
12169 * the action to the proper SLI-3 or SLI-4 device error detected handling
12170 * routine, which will perform the proper error detected operation.
12171 *
12172 * Return codes
12173 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12174 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12175 **/
12176static pci_ers_result_t
12177lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12178{
12179 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12180 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12181 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
12182
12183 switch (phba->pci_dev_grp) {
12184 case LPFC_PCI_DEV_LP:
12185 rc = lpfc_io_error_detected_s3(pdev, state);
12186 break;
da0436e9
JS
12187 case LPFC_PCI_DEV_OC:
12188 rc = lpfc_io_error_detected_s4(pdev, state);
12189 break;
3772a991
JS
12190 default:
12191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12192 "1427 Invalid PCI device group: 0x%x\n",
12193 phba->pci_dev_grp);
12194 break;
12195 }
12196 return rc;
12197}
12198
12199/**
12200 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
12201 * @pdev: pointer to PCI device.
12202 *
12203 * This routine is registered to the PCI subsystem for error handling. This
12204 * function is called after PCI bus has been reset to restart the PCI card
12205 * from scratch, as if from a cold-boot. When this routine is invoked, it
12206 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
12207 * routine, which will perform the proper device reset.
12208 *
12209 * Return codes
12210 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12211 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12212 **/
12213static pci_ers_result_t
12214lpfc_io_slot_reset(struct pci_dev *pdev)
12215{
12216 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12217 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12218 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
12219
12220 switch (phba->pci_dev_grp) {
12221 case LPFC_PCI_DEV_LP:
12222 rc = lpfc_io_slot_reset_s3(pdev);
12223 break;
da0436e9
JS
12224 case LPFC_PCI_DEV_OC:
12225 rc = lpfc_io_slot_reset_s4(pdev);
12226 break;
3772a991
JS
12227 default:
12228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12229 "1428 Invalid PCI device group: 0x%x\n",
12230 phba->pci_dev_grp);
12231 break;
12232 }
12233 return rc;
12234}
12235
12236/**
12237 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
12238 * @pdev: pointer to PCI device
12239 *
12240 * This routine is registered to the PCI subsystem for error handling. It
12241 * is called when kernel error recovery tells the lpfc driver that it is
12242 * OK to resume normal PCI operation after PCI bus error recovery. When
12243 * this routine is invoked, it dispatches the action to the proper SLI-3
12244 * or SLI-4 device io_resume routine, which will resume the device operation.
12245 **/
12246static void
12247lpfc_io_resume(struct pci_dev *pdev)
12248{
12249 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12250 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12251
12252 switch (phba->pci_dev_grp) {
12253 case LPFC_PCI_DEV_LP:
12254 lpfc_io_resume_s3(pdev);
12255 break;
da0436e9
JS
12256 case LPFC_PCI_DEV_OC:
12257 lpfc_io_resume_s4(pdev);
12258 break;
3772a991
JS
12259 default:
12260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12261 "1429 Invalid PCI device group: 0x%x\n",
12262 phba->pci_dev_grp);
12263 break;
12264 }
12265 return;
12266}
12267
1ba981fd
JS
12268/**
12269 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
12270 * @phba: pointer to lpfc hba data structure.
12271 *
12272 * This routine checks to see if OAS is supported for this adapter. If
12273 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
12274 * the enable oas flag is cleared and the pool created for OAS device data
12275 * is destroyed.
12276 *
12277 **/
12278void
12279lpfc_sli4_oas_verify(struct lpfc_hba *phba)
12280{
12281
12282 if (!phba->cfg_EnableXLane)
12283 return;
12284
12285 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
12286 phba->cfg_fof = 1;
12287 } else {
f38fa0bb 12288 phba->cfg_fof = 0;
1ba981fd
JS
12289 if (phba->device_data_mem_pool)
12290 mempool_destroy(phba->device_data_mem_pool);
12291 phba->device_data_mem_pool = NULL;
12292 }
12293
12294 return;
12295}
12296
12297/**
12298 * lpfc_fof_queue_setup - Set up all the fof queues
12299 * @phba: pointer to lpfc hba data structure.
12300 *
12301 * This routine is invoked to set up all the fof queues for the FC HBA
12302 * operation.
12303 *
12304 * Return codes
12305 * 0 - successful
12306 * -ENOMEM - No available memory
12307 **/
12308int
12309lpfc_fof_queue_setup(struct lpfc_hba *phba)
12310{
895427bd 12311 struct lpfc_sli_ring *pring;
1ba981fd
JS
12312 int rc;
12313
12314 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
12315 if (rc)
12316 return -ENOMEM;
12317
f38fa0bb 12318 if (phba->cfg_fof) {
1ba981fd
JS
12319
12320 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
12321 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
12322 if (rc)
12323 goto out_oas_cq;
12324
12325 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
12326 phba->sli4_hba.oas_cq, LPFC_FCP);
12327 if (rc)
12328 goto out_oas_wq;
12329
895427bd
JS
12330 /* Bind this CQ/WQ to the NVME ring */
12331 pring = phba->sli4_hba.oas_wq->pring;
12332 pring->sli.sli4.wqp =
12333 (void *)phba->sli4_hba.oas_wq;
12334 phba->sli4_hba.oas_cq->pring = pring;
1ba981fd
JS
12335 }
12336
12337 return 0;
12338
12339out_oas_wq:
f38fa0bb 12340 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
1ba981fd
JS
12341out_oas_cq:
12342 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
12343 return rc;
12344
12345}
12346
12347/**
12348 * lpfc_fof_queue_create - Create all the fof queues
12349 * @phba: pointer to lpfc hba data structure.
12350 *
12351 * This routine is invoked to allocate all the fof queues for the FC HBA
12352 * operation. For each SLI4 queue type, the parameters such as queue entry
12353 * count (queue depth) shall be taken from the module parameter. For now,
12354 * we just use some constant number as place holder.
12355 *
12356 * Return codes
12357 * 0 - successful
12358 * -ENOMEM - No availble memory
12359 * -EIO - The mailbox failed to complete successfully.
12360 **/
12361int
12362lpfc_fof_queue_create(struct lpfc_hba *phba)
12363{
12364 struct lpfc_queue *qdesc;
c176ffa0 12365 uint32_t wqesize;
1ba981fd
JS
12366
12367 /* Create FOF EQ */
81b96eda
JS
12368 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
12369 phba->sli4_hba.eq_esize,
1ba981fd
JS
12370 phba->sli4_hba.eq_ecount);
12371 if (!qdesc)
12372 goto out_error;
12373
12374 phba->sli4_hba.fof_eq = qdesc;
12375
f38fa0bb 12376 if (phba->cfg_fof) {
1ba981fd
JS
12377
12378 /* Create OAS CQ */
c176ffa0 12379 if (phba->enab_exp_wqcq_pages)
a51e41b6
JS
12380 qdesc = lpfc_sli4_queue_alloc(phba,
12381 LPFC_EXPANDED_PAGE_SIZE,
12382 phba->sli4_hba.cq_esize,
12383 LPFC_CQE_EXP_COUNT);
12384 else
12385 qdesc = lpfc_sli4_queue_alloc(phba,
12386 LPFC_DEFAULT_PAGE_SIZE,
12387 phba->sli4_hba.cq_esize,
12388 phba->sli4_hba.cq_ecount);
1ba981fd
JS
12389 if (!qdesc)
12390 goto out_error;
12391
12392 phba->sli4_hba.oas_cq = qdesc;
12393
12394 /* Create OAS WQ */
c176ffa0
JS
12395 if (phba->enab_exp_wqcq_pages) {
12396 wqesize = (phba->fcp_embed_io) ?
12397 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
a51e41b6
JS
12398 qdesc = lpfc_sli4_queue_alloc(phba,
12399 LPFC_EXPANDED_PAGE_SIZE,
c176ffa0 12400 wqesize,
a51e41b6 12401 LPFC_WQE_EXP_COUNT);
c176ffa0 12402 } else
a51e41b6
JS
12403 qdesc = lpfc_sli4_queue_alloc(phba,
12404 LPFC_DEFAULT_PAGE_SIZE,
12405 phba->sli4_hba.wq_esize,
12406 phba->sli4_hba.wq_ecount);
c176ffa0 12407
1ba981fd
JS
12408 if (!qdesc)
12409 goto out_error;
12410
12411 phba->sli4_hba.oas_wq = qdesc;
895427bd 12412 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
1ba981fd
JS
12413
12414 }
12415 return 0;
12416
12417out_error:
12418 lpfc_fof_queue_destroy(phba);
12419 return -ENOMEM;
12420}
12421
12422/**
12423 * lpfc_fof_queue_destroy - Destroy all the fof queues
12424 * @phba: pointer to lpfc hba data structure.
12425 *
12426 * This routine is invoked to release all the SLI4 queues with the FC HBA
12427 * operation.
12428 *
12429 * Return codes
12430 * 0 - successful
12431 **/
12432int
12433lpfc_fof_queue_destroy(struct lpfc_hba *phba)
12434{
12435 /* Release FOF Event queue */
12436 if (phba->sli4_hba.fof_eq != NULL) {
12437 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
12438 phba->sli4_hba.fof_eq = NULL;
12439 }
12440
12441 /* Release OAS Completion queue */
12442 if (phba->sli4_hba.oas_cq != NULL) {
12443 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
12444 phba->sli4_hba.oas_cq = NULL;
12445 }
12446
12447 /* Release OAS Work queue */
12448 if (phba->sli4_hba.oas_wq != NULL) {
12449 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
12450 phba->sli4_hba.oas_wq = NULL;
12451 }
12452 return 0;
12453}
12454
dea3101e 12455MODULE_DEVICE_TABLE(pci, lpfc_id_table);
12456
a55b2d21 12457static const struct pci_error_handlers lpfc_err_handler = {
8d63f375
LV
12458 .error_detected = lpfc_io_error_detected,
12459 .slot_reset = lpfc_io_slot_reset,
12460 .resume = lpfc_io_resume,
12461};
12462
dea3101e 12463static struct pci_driver lpfc_driver = {
12464 .name = LPFC_DRIVER_NAME,
12465 .id_table = lpfc_id_table,
12466 .probe = lpfc_pci_probe_one,
6f039790 12467 .remove = lpfc_pci_remove_one,
85e8a239 12468 .shutdown = lpfc_pci_remove_one,
3a55b532 12469 .suspend = lpfc_pci_suspend_one,
3772a991 12470 .resume = lpfc_pci_resume_one,
2e0fef85 12471 .err_handler = &lpfc_err_handler,
dea3101e 12472};
12473
3ef6d24c 12474static const struct file_operations lpfc_mgmt_fop = {
858feacd 12475 .owner = THIS_MODULE,
3ef6d24c
JS
12476};
12477
12478static struct miscdevice lpfc_mgmt_dev = {
12479 .minor = MISC_DYNAMIC_MINOR,
12480 .name = "lpfcmgmt",
12481 .fops = &lpfc_mgmt_fop,
12482};
12483
e59058c4 12484/**
3621a710 12485 * lpfc_init - lpfc module initialization routine
e59058c4
JS
12486 *
12487 * This routine is to be invoked when the lpfc module is loaded into the
12488 * kernel. The special kernel macro module_init() is used to indicate the
12489 * role of this routine to the kernel as lpfc module entry point.
12490 *
12491 * Return codes
12492 * 0 - successful
12493 * -ENOMEM - FC attach transport failed
12494 * all others - failed
12495 */
dea3101e 12496static int __init
12497lpfc_init(void)
12498{
12499 int error = 0;
12500
12501 printk(LPFC_MODULE_DESC "\n");
c44ce173 12502 printk(LPFC_COPYRIGHT "\n");
dea3101e 12503
3ef6d24c
JS
12504 error = misc_register(&lpfc_mgmt_dev);
12505 if (error)
12506 printk(KERN_ERR "Could not register lpfcmgmt device, "
12507 "misc_register returned with status %d", error);
12508
458c083e
JS
12509 lpfc_transport_functions.vport_create = lpfc_vport_create;
12510 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
dea3101e 12511 lpfc_transport_template =
12512 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 12513 if (lpfc_transport_template == NULL)
dea3101e 12514 return -ENOMEM;
458c083e
JS
12515 lpfc_vport_transport_template =
12516 fc_attach_transport(&lpfc_vport_transport_functions);
12517 if (lpfc_vport_transport_template == NULL) {
12518 fc_release_transport(lpfc_transport_template);
12519 return -ENOMEM;
7ee5d43e 12520 }
7bb03bbf
JS
12521
12522 /* Initialize in case vector mapping is needed */
b246de17 12523 lpfc_used_cpu = NULL;
2ea259ee 12524 lpfc_present_cpu = num_present_cpus();
7bb03bbf 12525
dea3101e 12526 error = pci_register_driver(&lpfc_driver);
92d7f7b0 12527 if (error) {
dea3101e 12528 fc_release_transport(lpfc_transport_template);
458c083e 12529 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 12530 }
dea3101e 12531
12532 return error;
12533}
12534
e59058c4 12535/**
3621a710 12536 * lpfc_exit - lpfc module removal routine
e59058c4
JS
12537 *
12538 * This routine is invoked when the lpfc module is removed from the kernel.
12539 * The special kernel macro module_exit() is used to indicate the role of
12540 * this routine to the kernel as lpfc module exit point.
12541 */
dea3101e 12542static void __exit
12543lpfc_exit(void)
12544{
3ef6d24c 12545 misc_deregister(&lpfc_mgmt_dev);
dea3101e 12546 pci_unregister_driver(&lpfc_driver);
12547 fc_release_transport(lpfc_transport_template);
458c083e 12548 fc_release_transport(lpfc_vport_transport_template);
81301a9b 12549 if (_dump_buf_data) {
6a9c52cf
JS
12550 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
12551 "_dump_buf_data at 0x%p\n",
81301a9b
JS
12552 (1L << _dump_buf_data_order), _dump_buf_data);
12553 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
12554 }
12555
12556 if (_dump_buf_dif) {
6a9c52cf
JS
12557 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
12558 "_dump_buf_dif at 0x%p\n",
81301a9b
JS
12559 (1L << _dump_buf_dif_order), _dump_buf_dif);
12560 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
12561 }
b246de17 12562 kfree(lpfc_used_cpu);
7973967f 12563 idr_destroy(&lpfc_hba_index);
dea3101e 12564}
12565
12566module_init(lpfc_init);
12567module_exit(lpfc_exit);
12568MODULE_LICENSE("GPL");
12569MODULE_DESCRIPTION(LPFC_MODULE_DESC);
d080abe0 12570MODULE_AUTHOR("Broadcom");
dea3101e 12571MODULE_VERSION("0:" LPFC_DRIVER_VERSION);