2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_dbg_level = 0xFEFF;
51 unsigned int csio_port_mask = 0xf;
53 /* Default FW event queue entries. */
54 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
56 /* Default MSI param level */
59 /* FCoE function instances */
62 /* FCoE Adapter types & its description */
63 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
88 static void csio_mgmtm_cleanup(struct csio_mgmtm *);
89 static void csio_hw_mbm_cleanup(struct csio_hw *);
91 /* State machine forward declarations */
92 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
93 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
94 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
95 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
96 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
97 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
98 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
99 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
102 static void csio_hw_initialize(struct csio_hw *hw);
103 static void csio_evtq_stop(struct csio_hw *hw);
104 static void csio_evtq_start(struct csio_hw *hw);
106 int csio_is_hw_ready(struct csio_hw *hw)
108 return csio_match_state(hw, csio_hws_ready);
111 int csio_is_hw_removing(struct csio_hw *hw)
113 return csio_match_state(hw, csio_hws_removing);
118 * csio_hw_wait_op_done_val - wait until an operation is completed
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
134 int polarity, int attempts, int delay, uint32_t *valp)
138 val = csio_rd_reg32(hw, reg);
140 if (!!(val & mask) == polarity) {
154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
156 * @addr: the indirect TP register address
157 * @mask: specifies the field within the register to modify
158 * @val: new value for the field
160 * Sets a field of an indirect TP register to the given value.
163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
164 unsigned int mask, unsigned int val)
166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
168 csio_wr_reg32(hw, val, TP_PIO_DATA_A);
172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
177 csio_wr_reg32(hw, val | value, reg);
179 csio_rd_reg32(hw, reg);
184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
193 #define EEPROM_MAX_RD_POLL 40
194 #define EEPROM_MAX_WR_POLL 6
195 #define EEPROM_STAT_ADDR 0x7bfc
196 #define VPD_BASE 0x400
197 #define VPD_BASE_OLD 0
199 #define VPD_INFO_FLD_HDR_SIZE 3
202 * csio_hw_seeprom_read - read a serial EEPROM location
204 * @addr: EEPROM virtual address
205 * @data: where to store the read data
207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
208 * VPD capability. Note that this function must be called with a virtual
212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
215 int attempts = EEPROM_MAX_RD_POLL;
216 uint32_t base = hw->params.pci.vpd_cap_addr;
218 if (addr >= EEPROMVSIZE || (addr & 3))
221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
226 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
228 if (!(val & PCI_VPD_ADDR_F)) {
229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
234 *data = le32_to_cpu(*(__le32 *)data);
240 * Partial EEPROM Vital Product Data structure. Includes only the ID and
252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
254 * @v: Pointer to buffered vpd data structure
255 * @kw: The keyword to search for
257 * Returns the value of the information field keyword or
261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
264 int32_t offset , len;
265 const uint8_t *buf = &v->id_tag;
266 const uint8_t *vpdr_len = &v->vpdr_tag;
267 offset = sizeof(struct t4_vpd_hdr);
268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
274 if (memcmp(buf + i , kw, 2) == 0) {
275 i += VPD_INFO_FLD_HDR_SIZE;
279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
288 *pos = pci_find_capability(pdev, cap);
296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
298 * @p: where to store the parameters
300 * Reads card parameters stored in VPD EEPROM.
303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
305 int i, ret, ec, sn, addr;
307 const struct t4_vpd_hdr *v;
308 /* To get around compilation warning from strstrip */
311 if (csio_is_valid_vpd(hw))
314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
315 &hw->params.pci.vpd_cap_addr);
319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
324 * Card information normally starts at VPD_BASE but early cards had
327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
330 for (i = 0; i < VPD_LEN; i += 4) {
331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
338 /* Reset the VPD flag! */
339 hw->flags &= (~CSIO_HWF_VPD_VALID);
341 v = (const struct t4_vpd_hdr *)vpd;
343 #define FIND_VPD_KW(var, name) do { \
344 var = csio_hw_get_vpd_keyword_val(v, name); \
346 csio_err(hw, "missing VPD keyword " name "\n"); \
352 FIND_VPD_KW(i, "RV");
353 for (csum = 0; i >= 0; i--)
357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
361 FIND_VPD_KW(ec, "EC");
362 FIND_VPD_KW(sn, "SN");
365 memcpy(p->id, v->id_data, ID_LEN);
367 memcpy(p->ec, vpd + ec, EC_LEN);
369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
373 csio_valid_vpd_copied(hw);
380 * csio_hw_sf1_read - read data from the serial flash
382 * @byte_cnt: number of bytes to read
383 * @cont: whether another operation will be chained
384 * @lock: whether to lock SF for PL access only
385 * @valp: where to store the read data
387 * Reads up to 4 bytes of data from the serial flash. The location of
388 * the read needs to be specified prior to calling this by issuing the
389 * appropriate commands to the serial flash.
392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
393 int32_t lock, uint32_t *valp)
397 if (!byte_cnt || byte_cnt > 4)
399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
403 BYTECNT_V(byte_cnt - 1), SF_OP_A);
404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
407 *valp = csio_rd_reg32(hw, SF_DATA_A);
412 * csio_hw_sf1_write - write data to the serial flash
414 * @byte_cnt: number of bytes to write
415 * @cont: whether another operation will be chained
416 * @lock: whether to lock SF for PL access only
417 * @val: value to write
419 * Writes up to 4 bytes of data to the serial flash. The location of
420 * the write needs to be specified prior to calling this by issuing the
421 * appropriate commands to the serial flash.
424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
425 int32_t lock, uint32_t val)
427 if (!byte_cnt || byte_cnt > 4)
429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
432 csio_wr_reg32(hw, val, SF_DATA_A);
433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
441 * csio_hw_flash_wait_op - wait for a flash operation to complete
443 * @attempts: max number of polls of the status register
444 * @delay: delay between polls in ms
446 * Wait for a flash operation to complete by polling the status register.
449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
473 * csio_hw_read_flash - read words from serial flash
475 * @addr: the start address for the read
476 * @nwords: how many 32-bit words to read
477 * @data: where to store the read data
478 * @byte_oriented: whether to store data as bytes or as words
480 * Read the specified number of 32-bit words from the serial flash.
481 * If @byte_oriented is set the read data is stored as a byte array
482 * (i.e., big-endian), otherwise as 32-bit words in the platform's
486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
487 uint32_t *data, int32_t byte_oriented)
491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
494 addr = swab32(addr) | SF_RD_DATA_FAST;
496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
504 for ( ; nwords; nwords--, data++) {
505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
511 *data = (__force __u32) htonl(*data);
517 * csio_hw_write_flash - write up to a page of data to the serial flash
519 * @addr: the start address to write
520 * @n: length of data to write in bytes
521 * @data: the data to write
523 * Writes up to a page of data (256 bytes) to the serial flash starting
524 * at the given address. All the data must be written to the same page.
527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
528 uint32_t n, const uint8_t *data)
532 uint32_t i, c, left, val, offset = addr & 0xff;
534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
537 val = swab32(addr) | SF_PROG_PAGE;
539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
547 for (left = n; left; left -= c) {
549 for (val = 0, i = 0; i < c; ++i)
550 val = (val << 8) + *data++;
552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
556 ret = csio_hw_flash_wait_op(hw, 8, 1);
560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
562 /* Read the page to verify the write succeeded */
563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
569 "failed to correctly write the flash page at %#x\n",
577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
582 * csio_hw_flash_erase_sectors - erase a range of flash sectors
584 * @start: the first sector to erase
585 * @end: the last sector to erase
587 * Erases the sectors in the given inclusive range.
590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
594 while (start <= end) {
596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
600 ret = csio_hw_sf1_write(hw, 4, 0, 1,
601 SF_ERASE_SECTOR | (start << 8));
605 ret = csio_hw_flash_wait_op(hw, 14, 500);
613 csio_err(hw, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw *hw, char *str)
622 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
624 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
625 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
626 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
630 * csio_hw_get_fw_version - read the firmware version
632 * @vers: where to place the version
634 * Reads the FW version from flash.
637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
639 return csio_hw_read_flash(hw, FLASH_FW_START +
640 offsetof(struct fw_hdr, fw_ver), 1,
645 * csio_hw_get_tp_version - read the TP microcode version
647 * @vers: where to place the version
649 * Reads the TP microcode version from flash.
652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
654 return csio_hw_read_flash(hw, FLASH_FW_START +
655 offsetof(struct fw_hdr, tp_microcode_ver), 1,
660 * csio_hw_fw_dload - download firmware.
662 * @fw_data: firmware image to write.
665 * Write the supplied firmware image to the card's serial flash.
668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
674 uint8_t first_page[SF_PAGE_SIZE];
675 const __be32 *p = (const __be32 *)fw_data;
676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
677 uint32_t sf_sec_size;
679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
680 csio_err(hw, "Serial Flash data invalid\n");
685 csio_err(hw, "FW image has no data\n");
690 csio_err(hw, "FW image size not multiple of 512 bytes\n");
694 if (ntohs(hdr->len512) * 512 != size) {
695 csio_err(hw, "FW image size differs from size in FW header\n");
699 if (size > FLASH_FW_MAX_SIZE) {
700 csio_err(hw, "FW image too large, max is %u bytes\n",
705 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
708 if (csum != 0xffffffff) {
709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
714 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1);
719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
720 FLASH_FW_START_SEC + i - 1);
722 csio_err(hw, "Flash Erase failed\n");
727 * We write the correct version at the end so the driver can see a bad
728 * version if the FW write fails. Start by writing a copy of the
729 * first page with a bad version.
731 memcpy(first_page, fw_data, SF_PAGE_SIZE);
732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
738 FW_IMG_START, FW_IMG_START + size);
740 addr = FLASH_FW_START;
741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
742 addr += SF_PAGE_SIZE;
743 fw_data += SF_PAGE_SIZE;
744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
749 ret = csio_hw_write_flash(hw,
751 offsetof(struct fw_hdr, fw_ver),
753 (const uint8_t *)&hdr->fw_ver);
757 csio_err(hw, "firmware download failed, error %d\n", ret);
762 csio_hw_get_flash_params(struct csio_hw *hw)
764 /* Table for non-Numonix supported flash parts. Numonix parts are left
765 * to the preexisting code. All flash parts have 64KB sectors.
767 static struct flash_desc {
768 u32 vendor_and_model_id;
770 } supported_flash[] = {
771 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
774 u32 part, manufacturer;
775 u32 density, size = 0;
779 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
781 ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
782 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
786 /* Check to see if it's one of our non-standard supported Flash parts.
788 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
789 if (supported_flash[part].vendor_and_model_id == flashid) {
790 hw->params.sf_size = supported_flash[part].size_mb;
792 hw->params.sf_size / SF_SEC_SIZE;
796 /* Decode Flash part size. The code below looks repetative with
797 * common encodings, but that's not guaranteed in the JEDEC
798 * specification for the Read JADEC ID command. The only thing that
799 * we're guaranteed by the JADEC specification is where the
800 * Manufacturer ID is in the returned result. After that each
801 * Manufacturer ~could~ encode things completely differently.
802 * Note, all Flash parts must have 64KB sectors.
804 manufacturer = flashid & 0xff;
805 switch (manufacturer) {
806 case 0x20: { /* Micron/Numonix */
807 /* This Density -> Size decoding table is taken from Micron
810 density = (flashid >> 16) & 0xff;
812 case 0x14 ... 0x19: /* 1MB - 32MB */
815 case 0x20: /* 64MB */
818 case 0x21: /* 128MB */
821 case 0x22: /* 256MB */
826 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
827 /* This Density -> Size decoding table is taken from ISSI
830 density = (flashid >> 16) & 0xff;
832 case 0x16: /* 32 MB */
835 case 0x17: /* 64MB */
840 case 0xc2: /* Macronix */
841 case 0xef: /* Winbond */ {
842 /* This Density -> Size decoding table is taken from
843 * Macronix and Winbond Data Sheets.
845 density = (flashid >> 16) & 0xff;
848 case 0x18: /* 16MB */
854 /* If we didn't recognize the FLASH part, that's no real issue: the
855 * Hardware/Software contract says that Hardware will _*ALWAYS*_
856 * use a FLASH part which is at least 4MB in size and has 64KB
857 * sectors. The unrecognized FLASH part is likely to be much larger
858 * than 4MB, but that's all we really need.
861 csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
866 /* Store decoded Flash size */
867 hw->params.sf_size = size;
868 hw->params.sf_nsec = size / SF_SEC_SIZE;
871 if (hw->params.sf_size < FLASH_MIN_SIZE)
872 csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
873 flashid, hw->params.sf_size, FLASH_MIN_SIZE);
877 /*****************************************************************************/
878 /* HW State machine assists */
879 /*****************************************************************************/
882 csio_hw_dev_ready(struct csio_hw *hw)
888 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
892 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
893 src_pf = SOURCEPF_G(reg);
895 src_pf = T6_SOURCEPF_G(reg);
897 if ((cnt == 0) && (((int32_t)(src_pf) < 0) ||
898 (src_pf >= CSIO_MAX_PFN))) {
899 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
909 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
911 * @state: Device state
913 * FW_HELLO_CMD has to be polled for completion.
916 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
920 enum fw_retval retval;
923 int retries = FW_CMD_HELLO_RETRIES;
925 memset(state_str, 0, sizeof(state_str));
927 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
930 CSIO_INC_STATS(hw, n_err_nomem);
935 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
936 hw->pfn, CSIO_MASTER_MAY, NULL);
938 rv = csio_mb_issue(hw, mbp);
940 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
944 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
945 if (retval != FW_SUCCESS) {
946 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
951 /* Firmware has designated us to be master */
952 if (hw->pfn == mpfn) {
953 hw->flags |= CSIO_HWF_MASTER;
954 } else if (*state == CSIO_DEV_STATE_UNINIT) {
956 * If we're not the Master PF then we need to wait around for
957 * the Master PF Driver to finish setting up the adapter.
959 * Note that we also do this wait if we're a non-Master-capable
960 * PF and there is no current Master PF; a Master PF may show up
961 * momentarily and we wouldn't want to fail pointlessly. (This
962 * can happen when an OS loads lots of different drivers rapidly
963 * at the same time). In this case, the Master PF returned by
964 * the firmware will be PCIE_FW_MASTER_MASK so the test below
968 int waiting = FW_CMD_HELLO_TIMEOUT;
971 * Wait for the firmware to either indicate an error or
972 * initialized state. If we see either of these we bail out
973 * and report the issue to the caller. If we exhaust the
974 * "hello timeout" and we haven't exhausted our retries, try
975 * again. Otherwise bail with a timeout error.
980 spin_unlock_irq(&hw->lock);
982 spin_lock_irq(&hw->lock);
986 * If neither Error nor Initialialized are indicated
987 * by the firmware keep waiting till we exaust our
988 * timeout ... and then retry if we haven't exhausted
991 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
992 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
1004 * We either have an Error or Initialized condition
1005 * report errors preferentially.
1008 if (pcie_fw & PCIE_FW_ERR_F) {
1009 *state = CSIO_DEV_STATE_ERR;
1011 } else if (pcie_fw & PCIE_FW_INIT_F)
1012 *state = CSIO_DEV_STATE_INIT;
1016 * If we arrived before a Master PF was selected and
1017 * there's not a valid Master PF, grab its identity
1020 if (mpfn == PCIE_FW_MASTER_M &&
1021 (pcie_fw & PCIE_FW_MASTER_VLD_F))
1022 mpfn = PCIE_FW_MASTER_G(pcie_fw);
1025 hw->flags &= ~CSIO_HWF_MASTER;
1029 case CSIO_DEV_STATE_UNINIT:
1030 strcpy(state_str, "Initializing");
1032 case CSIO_DEV_STATE_INIT:
1033 strcpy(state_str, "Initialized");
1035 case CSIO_DEV_STATE_ERR:
1036 strcpy(state_str, "Error");
1039 strcpy(state_str, "Unknown");
1043 if (hw->pfn == mpfn)
1044 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1045 hw->pfn, state_str);
1048 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1049 hw->pfn, mpfn, state_str);
1052 mempool_free(mbp, hw->mb_mempool);
1058 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1063 csio_do_bye(struct csio_hw *hw)
1065 struct csio_mb *mbp;
1066 enum fw_retval retval;
1068 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1070 CSIO_INC_STATS(hw, n_err_nomem);
1074 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1076 if (csio_mb_issue(hw, mbp)) {
1077 csio_err(hw, "Issue of BYE command failed\n");
1078 mempool_free(mbp, hw->mb_mempool);
1082 retval = csio_mb_fw_retval(mbp);
1083 if (retval != FW_SUCCESS) {
1084 mempool_free(mbp, hw->mb_mempool);
1088 mempool_free(mbp, hw->mb_mempool);
1094 * csio_do_reset- Perform the device reset.
1098 * If fw_rst is set, issues FW reset mbox cmd otherwise
1100 * Performs reset of the function.
1103 csio_do_reset(struct csio_hw *hw, bool fw_rst)
1105 struct csio_mb *mbp;
1106 enum fw_retval retval;
1110 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1115 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1117 CSIO_INC_STATS(hw, n_err_nomem);
1121 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1122 PIORSTMODE_F | PIORST_F, 0, NULL);
1124 if (csio_mb_issue(hw, mbp)) {
1125 csio_err(hw, "Issue of RESET command failed.n");
1126 mempool_free(mbp, hw->mb_mempool);
1130 retval = csio_mb_fw_retval(mbp);
1131 if (retval != FW_SUCCESS) {
1132 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1133 mempool_free(mbp, hw->mb_mempool);
1137 mempool_free(mbp, hw->mb_mempool);
1143 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1145 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1148 caps = ntohs(rsp->fcoecaps);
1150 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1151 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1155 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1156 csio_err(hw, "No FCoE Control Offload capability\n");
1164 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1165 * @hw: the HW module
1166 * @mbox: mailbox to use for the FW RESET command (if desired)
1167 * @force: force uP into RESET even if FW RESET command fails
1169 * Issues a RESET command to firmware (if desired) with a HALT indication
1170 * and then puts the microprocessor into RESET state. The RESET command
1171 * will only be issued if a legitimate mailbox is provided (mbox <=
1172 * PCIE_FW_MASTER_MASK).
1174 * This is generally used in order for the host to safely manipulate the
1175 * adapter without fear of conflicting with whatever the firmware might
1176 * be doing. The only way out of this state is to RESTART the firmware
1180 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1182 enum fw_retval retval = 0;
1185 * If a legitimate mailbox is provided, issue a RESET command
1186 * with a HALT indication.
1188 if (mbox <= PCIE_FW_MASTER_M) {
1189 struct csio_mb *mbp;
1191 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1193 CSIO_INC_STATS(hw, n_err_nomem);
1197 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1198 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
1201 if (csio_mb_issue(hw, mbp)) {
1202 csio_err(hw, "Issue of RESET command failed!\n");
1203 mempool_free(mbp, hw->mb_mempool);
1207 retval = csio_mb_fw_retval(mbp);
1208 mempool_free(mbp, hw->mb_mempool);
1212 * Normally we won't complete the operation if the firmware RESET
1213 * command fails but if our caller insists we'll go ahead and put the
1214 * uP into RESET. This can be useful if the firmware is hung or even
1215 * missing ... We'll have to take the risk of putting the uP into
1216 * RESET without the cooperation of firmware in that case.
1218 * We also force the firmware's HALT flag to be on in case we bypassed
1219 * the firmware RESET command above or we're dealing with old firmware
1220 * which doesn't have the HALT capability. This will serve as a flag
1221 * for the incoming firmware to know that it's coming out of a HALT
1222 * rather than a RESET ... if it's new enough to understand that ...
1224 if (retval == 0 || force) {
1225 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
1226 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1231 * And we always return the result of the firmware RESET command
1232 * even when we force the uP into RESET ...
1234 return retval ? -EINVAL : 0;
1238 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1239 * @hw: the HW module
1240 * @reset: if we want to do a RESET to restart things
1242 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1243 * return the previous PF Master remains as the new PF Master and there
1244 * is no need to issue a new HELLO command, etc.
1246 * We do this in two ways:
1248 * 1. If we're dealing with newer firmware we'll simply want to take
1249 * the chip's microprocessor out of RESET. This will cause the
1250 * firmware to start up from its start vector. And then we'll loop
1251 * until the firmware indicates it's started again (PCIE_FW.HALT
1252 * reset to 0) or we timeout.
1254 * 2. If we're dealing with older firmware then we'll need to RESET
1255 * the chip since older firmware won't recognize the PCIE_FW.HALT
1256 * flag and automatically RESET itself on startup.
1259 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1263 * Since we're directing the RESET instead of the firmware
1264 * doing it automatically, we need to clear the PCIE_FW.HALT
1267 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
1270 * If we've been given a valid mailbox, first try to get the
1271 * firmware to do the RESET. If that works, great and we can
1272 * return success. Otherwise, if we haven't been given a
1273 * valid mailbox or the RESET command failed, fall back to
1274 * hitting the chip with a hammer.
1276 if (mbox <= PCIE_FW_MASTER_M) {
1277 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1279 if (csio_do_reset(hw, true) == 0)
1283 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1288 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1289 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1290 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
1301 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1302 * @hw: the HW module
1303 * @mbox: mailbox to use for the FW RESET command (if desired)
1304 * @fw_data: the firmware image to write
1306 * @force: force upgrade even if firmware doesn't cooperate
1308 * Perform all of the steps necessary for upgrading an adapter's
1309 * firmware image. Normally this requires the cooperation of the
1310 * existing firmware in order to halt all existing activities
1311 * but if an invalid mailbox token is passed in we skip that step
1312 * (though we'll still put the adapter microprocessor into RESET in
1315 * On successful return the new firmware will have been loaded and
1316 * the adapter will have been fully RESET losing all previous setup
1317 * state. On unsuccessful return the adapter may be completely hosed ...
1318 * positive errno indicates that the adapter is ~probably~ intact, a
1319 * negative errno indicates that things are looking bad ...
1322 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1323 const u8 *fw_data, uint32_t size, int32_t force)
1325 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1328 ret = csio_hw_fw_halt(hw, mbox, force);
1329 if (ret != 0 && !force)
1332 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1337 * Older versions of the firmware don't understand the new
1338 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1339 * restart. So for newly loaded older firmware we'll have to do the
1340 * RESET for it so it starts up on a clean slate. We can tell if
1341 * the newly loaded firmware will handle this right by checking
1342 * its header flags to see if it advertises the capability.
1344 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1345 return csio_hw_fw_restart(hw, mbox, reset);
1349 * csio_get_device_params - Get device parameters.
1354 csio_get_device_params(struct csio_hw *hw)
1356 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1357 struct csio_mb *mbp;
1358 enum fw_retval retval;
1362 /* Initialize portids to -1 */
1363 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1364 hw->pport[i].portid = -1;
1366 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1368 CSIO_INC_STATS(hw, n_err_nomem);
1372 /* Get port vec information. */
1373 param[0] = FW_PARAM_DEV(PORTVEC);
1375 /* Get Core clock. */
1376 param[1] = FW_PARAM_DEV(CCLK);
1378 /* Get EQ id start and end. */
1379 param[2] = FW_PARAM_PFVF(EQ_START);
1380 param[3] = FW_PARAM_PFVF(EQ_END);
1382 /* Get IQ id start and end. */
1383 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1384 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1386 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1387 ARRAY_SIZE(param), param, NULL, false, NULL);
1388 if (csio_mb_issue(hw, mbp)) {
1389 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1390 mempool_free(mbp, hw->mb_mempool);
1394 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1395 ARRAY_SIZE(param), param);
1396 if (retval != FW_SUCCESS) {
1397 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1399 mempool_free(mbp, hw->mb_mempool);
1403 /* cache the information. */
1404 hw->port_vec = param[0];
1405 hw->vpd.cclk = param[1];
1406 wrm->fw_eq_start = param[2];
1407 wrm->fw_iq_start = param[4];
1409 /* Using FW configured max iqs & eqs */
1410 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1411 !csio_is_hw_master(hw)) {
1412 hw->cfg_niq = param[5] - param[4] + 1;
1413 hw->cfg_neq = param[3] - param[2] + 1;
1414 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1415 hw->cfg_niq, hw->cfg_neq);
1418 hw->port_vec &= csio_port_mask;
1420 hw->num_pports = hweight32(hw->port_vec);
1422 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1423 hw->port_vec, hw->num_pports);
1425 for (i = 0; i < hw->num_pports; i++) {
1426 while ((hw->port_vec & (1 << j)) == 0)
1428 hw->pport[i].portid = j++;
1429 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1431 mempool_free(mbp, hw->mb_mempool);
1438 * csio_config_device_caps - Get and set device capabilities.
1443 csio_config_device_caps(struct csio_hw *hw)
1445 struct csio_mb *mbp;
1446 enum fw_retval retval;
1449 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1451 CSIO_INC_STATS(hw, n_err_nomem);
1455 /* Get device capabilities */
1456 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1458 if (csio_mb_issue(hw, mbp)) {
1459 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1463 retval = csio_mb_fw_retval(mbp);
1464 if (retval != FW_SUCCESS) {
1465 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1469 /* Validate device capabilities */
1470 rv = csio_hw_validate_caps(hw, mbp);
1474 /* Don't config device capabilities if already configured */
1475 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1480 /* Write back desired device capabilities */
1481 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1484 if (csio_mb_issue(hw, mbp)) {
1485 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1489 retval = csio_mb_fw_retval(mbp);
1490 if (retval != FW_SUCCESS) {
1491 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1497 mempool_free(mbp, hw->mb_mempool);
1501 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
1503 enum cc_fec cc_fec = 0;
1505 if (fw_fec & FW_PORT_CAP32_FEC_RS)
1507 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1508 cc_fec |= FEC_BASER_RS;
1513 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
1515 fw_port_cap32_t fw_pause = 0;
1517 if (cc_pause & PAUSE_RX)
1518 fw_pause |= FW_PORT_CAP32_FC_RX;
1519 if (cc_pause & PAUSE_TX)
1520 fw_pause |= FW_PORT_CAP32_FC_TX;
1525 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
1527 fw_port_cap32_t fw_fec = 0;
1529 if (cc_fec & FEC_RS)
1530 fw_fec |= FW_PORT_CAP32_FEC_RS;
1531 if (cc_fec & FEC_BASER_RS)
1532 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
1538 * fwcap_to_fwspeed - return highest speed in Port Capabilities
1539 * @acaps: advertised Port Capabilities
1541 * Get the highest speed for the port from the advertised Port
1544 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
1546 #define TEST_SPEED_RETURN(__caps_speed) \
1548 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
1549 return FW_PORT_CAP32_SPEED_##__caps_speed; \
1552 TEST_SPEED_RETURN(400G);
1553 TEST_SPEED_RETURN(200G);
1554 TEST_SPEED_RETURN(100G);
1555 TEST_SPEED_RETURN(50G);
1556 TEST_SPEED_RETURN(40G);
1557 TEST_SPEED_RETURN(25G);
1558 TEST_SPEED_RETURN(10G);
1559 TEST_SPEED_RETURN(1G);
1560 TEST_SPEED_RETURN(100M);
1562 #undef TEST_SPEED_RETURN
1568 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
1569 * @caps16: a 16-bit Port Capabilities value
1571 * Returns the equivalent 32-bit Port Capabilities value.
1573 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
1575 fw_port_cap32_t caps32 = 0;
1577 #define CAP16_TO_CAP32(__cap) \
1579 if (caps16 & FW_PORT_CAP_##__cap) \
1580 caps32 |= FW_PORT_CAP32_##__cap; \
1583 CAP16_TO_CAP32(SPEED_100M);
1584 CAP16_TO_CAP32(SPEED_1G);
1585 CAP16_TO_CAP32(SPEED_25G);
1586 CAP16_TO_CAP32(SPEED_10G);
1587 CAP16_TO_CAP32(SPEED_40G);
1588 CAP16_TO_CAP32(SPEED_100G);
1589 CAP16_TO_CAP32(FC_RX);
1590 CAP16_TO_CAP32(FC_TX);
1591 CAP16_TO_CAP32(ANEG);
1592 CAP16_TO_CAP32(MDIAUTO);
1593 CAP16_TO_CAP32(MDISTRAIGHT);
1594 CAP16_TO_CAP32(FEC_RS);
1595 CAP16_TO_CAP32(FEC_BASER_RS);
1596 CAP16_TO_CAP32(802_3_PAUSE);
1597 CAP16_TO_CAP32(802_3_ASM_DIR);
1599 #undef CAP16_TO_CAP32
1605 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
1606 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
1608 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
1609 * 32-bit Port Capabilities value.
1611 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
1613 fw_port_cap32_t linkattr = 0;
1615 /* The format of the Link Status in the old
1616 * 16-bit Port Information message isn't the same as the
1617 * 16-bit Port Capabilities bitfield used everywhere else.
1619 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
1620 linkattr |= FW_PORT_CAP32_FC_RX;
1621 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
1622 linkattr |= FW_PORT_CAP32_FC_TX;
1623 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1624 linkattr |= FW_PORT_CAP32_SPEED_100M;
1625 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1626 linkattr |= FW_PORT_CAP32_SPEED_1G;
1627 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1628 linkattr |= FW_PORT_CAP32_SPEED_10G;
1629 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
1630 linkattr |= FW_PORT_CAP32_SPEED_25G;
1631 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1632 linkattr |= FW_PORT_CAP32_SPEED_40G;
1633 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
1634 linkattr |= FW_PORT_CAP32_SPEED_100G;
1640 * csio_init_link_config - initialize a link's SW state
1641 * @lc: pointer to structure holding the link state
1642 * @pcaps: link Port Capabilities
1643 * @acaps: link current Advertised Port Capabilities
1645 * Initializes the SW state maintained for each link, including the link's
1646 * capabilities and default speed/flow-control/autonegotiation settings.
1648 static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
1649 fw_port_cap32_t acaps)
1652 lc->def_acaps = acaps;
1656 lc->requested_fc = PAUSE_RX | PAUSE_TX;
1657 lc->fc = lc->requested_fc;
1660 * For Forward Error Control, we default to whatever the Firmware
1661 * tells us the Link is currently advertising.
1663 lc->requested_fec = FEC_AUTO;
1664 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
1666 /* If the Port is capable of Auto-Negtotiation, initialize it as
1667 * "enabled" and copy over all of the Physical Port Capabilities
1668 * to the Advertised Port Capabilities. Otherwise mark it as
1669 * Auto-Negotiate disabled and select the highest supported speed
1670 * for the link. Note parallel structure in t4_link_l1cfg_core()
1671 * and t4_handle_get_port_info().
1673 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
1674 lc->acaps = lc->pcaps & ADVERT_MASK;
1675 lc->autoneg = AUTONEG_ENABLE;
1676 lc->requested_fc |= PAUSE_AUTONEG;
1679 lc->autoneg = AUTONEG_DISABLE;
1683 static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
1686 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
1687 fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap;
1692 * Convert driver coding of Pause Frame Flow Control settings into the
1695 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
1698 * Convert Common Code Forward Error Control settings into the
1699 * Firmware's API. If the current Requested FEC has "Automatic"
1700 * (IEEE 802.3) specified, then we use whatever the Firmware
1701 * sent us as part of it's IEEE 802.3-based interpratation of
1702 * the Transceiver Module EPROM FEC parameters. Otherwise we
1703 * use whatever is in the current Requested FEC settings.
1705 if (lc->requested_fec & FEC_AUTO)
1706 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
1708 cc_fec = lc->requested_fec;
1709 fw_fec = cc_to_fwcap_fec(cc_fec);
1711 /* Figure out what our Requested Port Capabilities are going to be.
1712 * Note parallel structure in t4_handle_get_port_info() and
1713 * init_link_config().
1715 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
1716 lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
1717 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
1719 } else if (lc->autoneg == AUTONEG_DISABLE) {
1720 lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
1721 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
1724 lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
1731 * csio_enable_ports - Bring up all available ports.
1736 csio_enable_ports(struct csio_hw *hw)
1738 struct csio_mb *mbp;
1739 u16 fw_caps = FW_CAPS_UNKNOWN;
1740 enum fw_retval retval;
1742 fw_port_cap32_t pcaps, acaps, rcaps;
1745 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1747 CSIO_INC_STATS(hw, n_err_nomem);
1751 for (i = 0; i < hw->num_pports; i++) {
1752 portid = hw->pport[i].portid;
1754 if (fw_caps == FW_CAPS_UNKNOWN) {
1757 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
1758 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
1761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
1762 hw->pfn, 0, 1, ¶m, &val, false,
1765 if (csio_mb_issue(hw, mbp)) {
1766 csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n",
1768 mempool_free(mbp, hw->mb_mempool);
1772 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
1774 if (retval != FW_SUCCESS) {
1775 csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
1777 mempool_free(mbp, hw->mb_mempool);
1784 /* Read PORT information */
1785 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1786 false, 0, fw_caps, NULL);
1788 if (csio_mb_issue(hw, mbp)) {
1789 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1791 mempool_free(mbp, hw->mb_mempool);
1795 csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps,
1797 if (retval != FW_SUCCESS) {
1798 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1800 mempool_free(mbp, hw->mb_mempool);
1804 csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps);
1806 csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps);
1808 /* Write back PORT information */
1809 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1810 true, rcaps, fw_caps, NULL);
1812 if (csio_mb_issue(hw, mbp)) {
1813 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1815 mempool_free(mbp, hw->mb_mempool);
1819 retval = csio_mb_fw_retval(mbp);
1820 if (retval != FW_SUCCESS) {
1821 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1823 mempool_free(mbp, hw->mb_mempool);
1827 } /* For all ports */
1829 mempool_free(mbp, hw->mb_mempool);
1835 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1837 * Issued with lock held.
1840 csio_get_fcoe_resinfo(struct csio_hw *hw)
1842 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1843 struct fw_fcoe_res_info_cmd *rsp;
1844 struct csio_mb *mbp;
1845 enum fw_retval retval;
1847 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1849 CSIO_INC_STATS(hw, n_err_nomem);
1853 /* Get FCoE FW resource information */
1854 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1856 if (csio_mb_issue(hw, mbp)) {
1857 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1858 mempool_free(mbp, hw->mb_mempool);
1862 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
1863 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
1864 if (retval != FW_SUCCESS) {
1865 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1867 mempool_free(mbp, hw->mb_mempool);
1871 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1872 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1873 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1874 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1875 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1876 res_info->max_ssns = ntohl(rsp->max_ssns);
1877 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1878 res_info->used_ssns = ntohl(rsp->used_ssns);
1879 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1880 res_info->max_vnps = ntohl(rsp->max_vnps);
1881 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1882 res_info->used_vnps = ntohl(rsp->used_vnps);
1884 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1885 res_info->max_xchgs);
1886 mempool_free(mbp, hw->mb_mempool);
1892 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1894 struct csio_mb *mbp;
1895 enum fw_retval retval;
1898 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1900 CSIO_INC_STATS(hw, n_err_nomem);
1905 * Find out whether we're dealing with a version of
1906 * the firmware which has configuration file support.
1908 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1909 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
1911 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1912 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1913 if (csio_mb_issue(hw, mbp)) {
1914 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1915 mempool_free(mbp, hw->mb_mempool);
1919 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1920 ARRAY_SIZE(_param), _param);
1921 if (retval != FW_SUCCESS) {
1922 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1924 mempool_free(mbp, hw->mb_mempool);
1928 mempool_free(mbp, hw->mb_mempool);
1935 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1938 const struct firmware *cf;
1939 struct pci_dev *pci_dev = hw->pdev;
1940 struct device *dev = &pci_dev->dev;
1941 unsigned int mtype = 0, maddr = 0;
1943 int value_to_add = 0;
1944 const char *fw_cfg_file;
1946 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
1947 fw_cfg_file = FW_CFG_NAME_T5;
1949 fw_cfg_file = FW_CFG_NAME_T6;
1951 if (request_firmware(&cf, fw_cfg_file, dev) < 0) {
1952 csio_err(hw, "could not find config file %s, err: %d\n",
1957 if (cf->size%4 != 0)
1958 value_to_add = 4 - (cf->size % 4);
1960 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
1961 if (cfg_data == NULL) {
1966 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
1967 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1972 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1973 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
1975 ret = csio_memory_write(hw, mtype, maddr,
1976 cf->size + value_to_add, cfg_data);
1978 if ((ret == 0) && (value_to_add != 0)) {
1983 size_t size = cf->size & ~0x3;
1986 last.word = cfg_data[size >> 2];
1987 for (i = value_to_add; i < 4; i++)
1989 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1992 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
1993 snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file);
1998 release_firmware(cf);
2003 * HW initialization: contact FW, obtain config, perform basic init.
2005 * If the firmware we're dealing with has Configuration File support, then
2006 * we use that to perform all configuration -- either using the configuration
2007 * file stored in flash on the adapter or using a filesystem-local file
2010 * If we don't have configuration file support in the firmware, then we'll
2011 * have to set things up the old fashioned way with hard-coded register
2012 * writes and firmware commands ...
2016 * Attempt to initialize the HW via a Firmware Configuration File.
2019 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2021 struct csio_mb *mbp = NULL;
2022 struct fw_caps_config_cmd *caps_cmd;
2023 unsigned int mtype, maddr;
2025 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
2027 char *config_name = NULL;
2030 * Reset device if necessary
2033 rv = csio_do_reset(hw, true);
2039 * If we have a configuration file in host ,
2040 * then use that. Otherwise, use the configuration file stored
2041 * in the HW flash ...
2043 spin_unlock_irq(&hw->lock);
2044 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
2045 spin_lock_irq(&hw->lock);
2048 * config file was not found. Use default
2049 * config file from flash.
2051 config_name = "On FLASH";
2052 mtype = FW_MEMTYPE_CF_FLASH;
2053 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
2056 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
2057 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
2060 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2062 CSIO_INC_STATS(hw, n_err_nomem);
2066 * Tell the firmware to process the indicated Configuration File.
2067 * If there are no errors and the caller has provided return value
2068 * pointers for the [fini] section version, checksum and computed
2069 * checksum, pass those back to the caller.
2071 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
2072 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2073 caps_cmd->op_to_write =
2074 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2077 caps_cmd->cfvalid_to_len16 =
2078 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
2079 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
2080 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
2081 FW_LEN16(*caps_cmd));
2083 if (csio_mb_issue(hw, mbp)) {
2088 rv = csio_mb_fw_retval(mbp);
2089 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
2090 * Configuration File in FLASH), our last gasp effort is to use the
2091 * Firmware Configuration File which is embedded in the
2092 * firmware. A very few early versions of the firmware didn't
2093 * have one embedded but we can ignore those.
2096 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2097 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2100 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
2102 if (csio_mb_issue(hw, mbp)) {
2107 rv = csio_mb_fw_retval(mbp);
2108 config_name = "Firmware Default";
2110 if (rv != FW_SUCCESS)
2113 finiver = ntohl(caps_cmd->finiver);
2114 finicsum = ntohl(caps_cmd->finicsum);
2115 cfcsum = ntohl(caps_cmd->cfcsum);
2118 * And now tell the firmware to use the configuration we just loaded.
2120 caps_cmd->op_to_write =
2121 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2124 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
2126 if (csio_mb_issue(hw, mbp)) {
2131 rv = csio_mb_fw_retval(mbp);
2132 if (rv != FW_SUCCESS) {
2133 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
2137 if (finicsum != cfcsum) {
2139 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
2143 /* Validate device capabilities */
2144 rv = csio_hw_validate_caps(hw, mbp);
2148 mempool_free(mbp, hw->mb_mempool);
2152 * Note that we're operating with parameters
2153 * not supplied by the driver, rather than from hard-wired
2154 * initialization constants buried in the driver.
2156 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2158 /* device parameters */
2159 rv = csio_get_device_params(hw);
2164 csio_wr_sge_init(hw);
2167 * And finally tell the firmware to initialize itself using the
2168 * parameters from the Configuration File.
2170 /* Post event to notify completion of configuration */
2171 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2173 csio_info(hw, "Successfully configure using Firmware "
2174 "Configuration File %s, version %#x, computed checksum %#x\n",
2175 config_name, finiver, cfcsum);
2179 * Something bad happened. Return the error ...
2183 mempool_free(mbp, hw->mb_mempool);
2184 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
2185 csio_warn(hw, "Configuration file error %d\n", rv);
2189 /* Is the given firmware API compatible with the one the driver was compiled
2192 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2195 /* short circuit if it's the exact same firmware version */
2196 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2199 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2200 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2201 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
2208 /* The firmware in the filesystem is usable, but should it be installed?
2209 * This routine explains itself in detail if it indicates the filesystem
2210 * firmware should be installed.
2212 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
2217 if (!card_fw_usable) {
2218 reason = "incompatible or unusable";
2223 reason = "older than the version supported with this driver";
2230 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
2231 "installing firmware %u.%u.%u.%u on card.\n",
2232 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2233 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
2234 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2235 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2240 static struct fw_info fw_info_array[] = {
2243 .fs_name = FW_CFG_NAME_T5,
2244 .fw_mod_name = FW_FNAME_T5,
2246 .chip = FW_HDR_CHIP_T5,
2247 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
2248 .intfver_nic = FW_INTFVER(T5, NIC),
2249 .intfver_vnic = FW_INTFVER(T5, VNIC),
2250 .intfver_ri = FW_INTFVER(T5, RI),
2251 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2252 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2256 .fs_name = FW_CFG_NAME_T6,
2257 .fw_mod_name = FW_FNAME_T6,
2259 .chip = FW_HDR_CHIP_T6,
2260 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
2261 .intfver_nic = FW_INTFVER(T6, NIC),
2262 .intfver_vnic = FW_INTFVER(T6, VNIC),
2263 .intfver_ri = FW_INTFVER(T6, RI),
2264 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
2265 .intfver_fcoe = FW_INTFVER(T6, FCOE),
2270 static struct fw_info *find_fw_info(int chip)
2274 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
2275 if (fw_info_array[i].chip == chip)
2276 return &fw_info_array[i];
2281 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
2282 const u8 *fw_data, unsigned int fw_size,
2283 struct fw_hdr *card_fw, enum csio_dev_state state,
2286 int ret, card_fw_usable, fs_fw_usable;
2287 const struct fw_hdr *fs_fw;
2288 const struct fw_hdr *drv_fw;
2290 drv_fw = &fw_info->fw_hdr;
2292 /* Read the header of the firmware on the card */
2293 ret = csio_hw_read_flash(hw, FLASH_FW_START,
2294 sizeof(*card_fw) / sizeof(uint32_t),
2295 (uint32_t *)card_fw, 1);
2297 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
2300 "Unable to read card's firmware header: %d\n", ret);
2304 if (fw_data != NULL) {
2305 fs_fw = (const void *)fw_data;
2306 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
2312 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2313 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
2314 /* Common case: the firmware on the card is an exact match and
2315 * the filesystem one is an exact match too, or the filesystem
2316 * one is absent/incompatible.
2318 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
2319 csio_should_install_fs_fw(hw, card_fw_usable,
2320 be32_to_cpu(fs_fw->fw_ver),
2321 be32_to_cpu(card_fw->fw_ver))) {
2322 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
2326 "failed to install firmware: %d\n", ret);
2330 /* Installed successfully, update the cached header too. */
2331 memcpy(card_fw, fs_fw, sizeof(*card_fw));
2333 *reset = 0; /* already reset as part of load_fw */
2336 if (!card_fw_usable) {
2339 d = be32_to_cpu(drv_fw->fw_ver);
2340 c = be32_to_cpu(card_fw->fw_ver);
2341 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
2343 csio_err(hw, "Cannot find a usable firmware: "
2345 "driver compiled with %d.%d.%d.%d, "
2346 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2348 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
2349 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
2350 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2351 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
2352 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2353 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2358 /* We're using whatever's on the card and it's known to be good. */
2359 hw->fwrev = be32_to_cpu(card_fw->fw_ver);
2360 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2367 * Returns -EINVAL if attempts to flash the firmware failed
2369 * if flashing was not attempted because the card had the
2370 * latest firmware ECANCELED is returned
2373 csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2375 int ret = -ECANCELED;
2376 const struct firmware *fw;
2377 struct fw_info *fw_info;
2378 struct fw_hdr *card_fw;
2379 struct pci_dev *pci_dev = hw->pdev;
2380 struct device *dev = &pci_dev->dev ;
2381 const u8 *fw_data = NULL;
2382 unsigned int fw_size = 0;
2383 const char *fw_bin_file;
2385 /* This is the firmware whose headers the driver was compiled
2388 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
2389 if (fw_info == NULL) {
2391 "unable to get firmware info for chip %d.\n",
2392 CHELSIO_CHIP_VERSION(hw->chip_id));
2396 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2397 fw_bin_file = FW_FNAME_T5;
2399 fw_bin_file = FW_FNAME_T6;
2401 if (request_firmware(&fw, fw_bin_file, dev) < 0) {
2402 csio_err(hw, "could not find firmware image %s, err: %d\n",
2409 /* allocate memory to read the header of the firmware on the
2412 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2414 /* upgrade FW logic */
2415 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2416 hw->fw_state, reset);
2420 release_firmware(fw);
2425 static int csio_hw_check_fwver(struct csio_hw *hw)
2427 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
2428 (hw->fwrev < CSIO_MIN_T6_FW)) {
2429 csio_hw_print_fw_version(hw, "T6 unsupported fw");
2437 * csio_hw_configure - Configure HW
2442 csio_hw_configure(struct csio_hw *hw)
2448 rv = csio_hw_dev_ready(hw);
2450 CSIO_INC_STATS(hw, n_err_fatal);
2451 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2456 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
2458 /* Needed for FW download */
2459 rv = csio_hw_get_flash_params(hw);
2461 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2462 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2466 /* Set PCIe completion timeout to 4 seconds */
2467 if (pci_is_pcie(hw->pdev))
2468 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2469 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
2471 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2473 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2477 csio_hw_print_fw_version(hw, "Firmware revision");
2479 rv = csio_do_hello(hw, &hw->fw_state);
2481 CSIO_INC_STATS(hw, n_err_fatal);
2482 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2487 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2491 csio_hw_get_fw_version(hw, &hw->fwrev);
2492 csio_hw_get_tp_version(hw, &hw->tp_vers);
2493 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2495 /* Do firmware update */
2496 spin_unlock_irq(&hw->lock);
2497 rv = csio_hw_flash_fw(hw, &reset);
2498 spin_lock_irq(&hw->lock);
2503 rv = csio_hw_check_fwver(hw);
2507 /* If the firmware doesn't support Configuration Files,
2510 rv = csio_hw_check_fwconfig(hw, param);
2512 csio_info(hw, "Firmware doesn't support "
2513 "Firmware Configuration files\n");
2517 /* The firmware provides us with a memory buffer where we can
2518 * load a Configuration File from the host if we want to
2519 * override the Configuration File in flash.
2521 rv = csio_hw_use_fwconfig(hw, reset, param);
2522 if (rv == -ENOENT) {
2523 csio_info(hw, "Could not initialize "
2524 "adapter, error%d\n", rv);
2528 csio_info(hw, "Could not initialize "
2529 "adapter, error%d\n", rv);
2534 rv = csio_hw_check_fwver(hw);
2538 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2540 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2542 /* device parameters */
2543 rv = csio_get_device_params(hw);
2547 /* Get device capabilities */
2548 rv = csio_config_device_caps(hw);
2553 csio_wr_sge_init(hw);
2555 /* Post event to notify completion of configuration */
2556 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2559 } /* if not master */
2566 * csio_hw_initialize - Initialize HW
2571 csio_hw_initialize(struct csio_hw *hw)
2573 struct csio_mb *mbp;
2574 enum fw_retval retval;
2578 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2579 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2583 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2585 if (csio_mb_issue(hw, mbp)) {
2586 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2590 retval = csio_mb_fw_retval(mbp);
2591 if (retval != FW_SUCCESS) {
2592 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2597 mempool_free(mbp, hw->mb_mempool);
2600 rv = csio_get_fcoe_resinfo(hw);
2602 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2606 spin_unlock_irq(&hw->lock);
2607 rv = csio_config_queues(hw);
2608 spin_lock_irq(&hw->lock);
2611 csio_err(hw, "Config of queues failed!: %d\n", rv);
2615 for (i = 0; i < hw->num_pports; i++)
2616 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2618 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2619 rv = csio_enable_ports(hw);
2621 csio_err(hw, "Failed to enable ports: %d\n", rv);
2626 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2630 mempool_free(mbp, hw->mb_mempool);
2635 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2638 * csio_hw_intr_enable - Enable HW interrupts
2639 * @hw: Pointer to HW module.
2641 * Enable interrupts in HW registers.
2644 csio_hw_intr_enable(struct csio_hw *hw)
2646 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2648 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
2650 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2651 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2653 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2656 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2657 * by FW, so do nothing for INTX.
2659 if (hw->intr_mode == CSIO_IM_MSIX)
2660 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2661 AIVEC_V(AIVEC_M), vec);
2662 else if (hw->intr_mode == CSIO_IM_MSI)
2663 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2664 AIVEC_V(AIVEC_M), 0);
2666 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
2668 /* Turn on MB interrupts - this will internally flush PIO as well */
2669 csio_mb_intr_enable(hw);
2671 /* These are common registers - only a master can modify them */
2672 if (csio_is_hw_master(hw)) {
2674 * Disable the Serial FLASH interrupt, if enabled!
2677 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
2679 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2680 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
2681 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
2682 ERR_DATA_CPL_ON_HIGH_QID1_F |
2683 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2684 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2685 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2686 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
2688 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
2691 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2696 * csio_hw_intr_disable - Disable HW interrupts
2697 * @hw: Pointer to HW module.
2699 * Turn off Mailbox and PCI_PF_CFG interrupts.
2702 csio_hw_intr_disable(struct csio_hw *hw)
2706 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2707 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2709 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2711 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2714 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2716 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
2717 if (csio_is_hw_master(hw))
2718 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
2720 /* Turn off MB interrupts */
2721 csio_mb_intr_disable(hw);
2726 csio_hw_fatal_err(struct csio_hw *hw)
2728 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
2729 csio_hw_intr_disable(hw);
2731 /* Do not reset HW, we may need FW state for debugging */
2732 csio_fatal(hw, "HW Fatal error encountered!\n");
2735 /*****************************************************************************/
2737 /*****************************************************************************/
2739 * csio_hws_uninit - Uninit state
2745 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2747 hw->prev_evt = hw->cur_evt;
2749 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2753 csio_set_state(&hw->sm, csio_hws_configuring);
2754 csio_hw_configure(hw);
2758 CSIO_INC_STATS(hw, n_evt_unexp);
2764 * csio_hws_configuring - Configuring state
2770 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2772 hw->prev_evt = hw->cur_evt;
2774 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2778 csio_set_state(&hw->sm, csio_hws_initializing);
2779 csio_hw_initialize(hw);
2782 case CSIO_HWE_INIT_DONE:
2783 csio_set_state(&hw->sm, csio_hws_ready);
2784 /* Fan out event to all lnode SMs */
2785 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2788 case CSIO_HWE_FATAL:
2789 csio_set_state(&hw->sm, csio_hws_uninit);
2792 case CSIO_HWE_PCI_REMOVE:
2796 CSIO_INC_STATS(hw, n_evt_unexp);
2802 * csio_hws_initializing - Initialiazing state
2808 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2810 hw->prev_evt = hw->cur_evt;
2812 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2815 case CSIO_HWE_INIT_DONE:
2816 csio_set_state(&hw->sm, csio_hws_ready);
2818 /* Fan out event to all lnode SMs */
2819 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2821 /* Enable interrupts */
2822 csio_hw_intr_enable(hw);
2825 case CSIO_HWE_FATAL:
2826 csio_set_state(&hw->sm, csio_hws_uninit);
2829 case CSIO_HWE_PCI_REMOVE:
2834 CSIO_INC_STATS(hw, n_evt_unexp);
2840 * csio_hws_ready - Ready state
2846 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2848 /* Remember the event */
2851 hw->prev_evt = hw->cur_evt;
2853 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2856 case CSIO_HWE_HBA_RESET:
2857 case CSIO_HWE_FW_DLOAD:
2858 case CSIO_HWE_SUSPEND:
2859 case CSIO_HWE_PCI_REMOVE:
2860 case CSIO_HWE_PCIERR_DETECTED:
2861 csio_set_state(&hw->sm, csio_hws_quiescing);
2862 /* cleanup all outstanding cmds */
2863 if (evt == CSIO_HWE_HBA_RESET ||
2864 evt == CSIO_HWE_PCIERR_DETECTED)
2865 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2867 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2869 csio_hw_intr_disable(hw);
2870 csio_hw_mbm_cleanup(hw);
2872 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2873 csio_evtq_flush(hw);
2874 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2875 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2878 case CSIO_HWE_FATAL:
2879 csio_set_state(&hw->sm, csio_hws_uninit);
2883 CSIO_INC_STATS(hw, n_evt_unexp);
2889 * csio_hws_quiescing - Quiescing state
2895 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2897 hw->prev_evt = hw->cur_evt;
2899 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2902 case CSIO_HWE_QUIESCED:
2903 switch (hw->evtflag) {
2904 case CSIO_HWE_FW_DLOAD:
2905 csio_set_state(&hw->sm, csio_hws_resetting);
2906 /* Download firmware */
2909 case CSIO_HWE_HBA_RESET:
2910 csio_set_state(&hw->sm, csio_hws_resetting);
2911 /* Start reset of the HBA */
2912 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2913 csio_wr_destroy_queues(hw, false);
2914 csio_do_reset(hw, false);
2915 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2918 case CSIO_HWE_PCI_REMOVE:
2919 csio_set_state(&hw->sm, csio_hws_removing);
2920 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2921 csio_wr_destroy_queues(hw, true);
2922 /* Now send the bye command */
2926 case CSIO_HWE_SUSPEND:
2927 csio_set_state(&hw->sm, csio_hws_quiesced);
2930 case CSIO_HWE_PCIERR_DETECTED:
2931 csio_set_state(&hw->sm, csio_hws_pcierr);
2932 csio_wr_destroy_queues(hw, false);
2936 CSIO_INC_STATS(hw, n_evt_unexp);
2943 CSIO_INC_STATS(hw, n_evt_unexp);
2949 * csio_hws_quiesced - Quiesced state
2955 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2957 hw->prev_evt = hw->cur_evt;
2959 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2962 case CSIO_HWE_RESUME:
2963 csio_set_state(&hw->sm, csio_hws_configuring);
2964 csio_hw_configure(hw);
2968 CSIO_INC_STATS(hw, n_evt_unexp);
2974 * csio_hws_resetting - HW Resetting state
2980 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2982 hw->prev_evt = hw->cur_evt;
2984 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2987 case CSIO_HWE_HBA_RESET_DONE:
2988 csio_evtq_start(hw);
2989 csio_set_state(&hw->sm, csio_hws_configuring);
2990 csio_hw_configure(hw);
2994 CSIO_INC_STATS(hw, n_evt_unexp);
3000 * csio_hws_removing - PCI Hotplug removing state
3006 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
3008 hw->prev_evt = hw->cur_evt;
3010 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3013 case CSIO_HWE_HBA_RESET:
3014 if (!csio_is_hw_master(hw))
3017 * The BYE should have alerady been issued, so we cant
3018 * use the mailbox interface. Hence we use the PL_RST
3019 * register directly.
3021 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
3022 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
3026 /* Should never receive any new events */
3028 CSIO_INC_STATS(hw, n_evt_unexp);
3035 * csio_hws_pcierr - PCI Error state
3041 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
3043 hw->prev_evt = hw->cur_evt;
3045 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3048 case CSIO_HWE_PCIERR_SLOT_RESET:
3049 csio_evtq_start(hw);
3050 csio_set_state(&hw->sm, csio_hws_configuring);
3051 csio_hw_configure(hw);
3055 CSIO_INC_STATS(hw, n_evt_unexp);
3060 /*****************************************************************************/
3062 /*****************************************************************************/
3065 * csio_handle_intr_status - table driven interrupt handler
3067 * @reg: the interrupt status register to process
3068 * @acts: table of interrupt actions
3070 * A table driven interrupt handler that applies a set of masks to an
3071 * interrupt status word and performs the corresponding actions if the
3072 * interrupts described by the mask have occured. The actions include
3073 * optionally emitting a warning or alert message. The table is terminated
3074 * by an entry specifying mask 0. Returns the number of fatal interrupt
3078 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3079 const struct intr_info *acts)
3082 unsigned int mask = 0;
3083 unsigned int status = csio_rd_reg32(hw, reg);
3085 for ( ; acts->mask; ++acts) {
3086 if (!(status & acts->mask))
3090 csio_fatal(hw, "Fatal %s (0x%x)\n",
3091 acts->msg, status & acts->mask);
3092 } else if (acts->msg)
3093 csio_info(hw, "%s (0x%x)\n",
3094 acts->msg, status & acts->mask);
3098 if (status) /* clear processed interrupts */
3099 csio_wr_reg32(hw, status, reg);
3104 * TP interrupt handler.
3106 static void csio_tp_intr_handler(struct csio_hw *hw)
3108 static struct intr_info tp_intr_info[] = {
3109 { 0x3fffffff, "TP parity error", -1, 1 },
3110 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
3114 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
3115 csio_hw_fatal_err(hw);
3119 * SGE interrupt handler.
3121 static void csio_sge_intr_handler(struct csio_hw *hw)
3125 static struct intr_info sge_intr_info[] = {
3126 { ERR_CPL_EXCEED_IQE_SIZE_F,
3127 "SGE received CPL exceeding IQE size", -1, 1 },
3128 { ERR_INVALID_CIDX_INC_F,
3129 "SGE GTS CIDX increment too large", -1, 0 },
3130 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
3131 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
3132 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
3133 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3134 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
3136 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
3138 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
3140 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
3142 { ERR_ING_CTXT_PRIO_F,
3143 "SGE too many priority ingress contexts", -1, 0 },
3144 { ERR_EGR_CTXT_PRIO_F,
3145 "SGE too many priority egress contexts", -1, 0 },
3146 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
3147 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
3151 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
3152 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
3154 csio_fatal(hw, "SGE parity error (%#llx)\n",
3155 (unsigned long long)v);
3156 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
3158 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
3161 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
3163 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
3165 csio_hw_fatal_err(hw);
3168 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
3169 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
3170 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
3171 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
3174 * CIM interrupt handler.
3176 static void csio_cim_intr_handler(struct csio_hw *hw)
3178 static struct intr_info cim_intr_info[] = {
3179 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
3180 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3181 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3182 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
3183 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
3184 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
3185 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
3188 static struct intr_info cim_upintr_info[] = {
3189 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
3190 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
3191 { ILLWRINT_F, "CIM illegal write", -1, 1 },
3192 { ILLRDINT_F, "CIM illegal read", -1, 1 },
3193 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
3194 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
3195 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
3196 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
3197 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
3198 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
3199 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
3200 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
3201 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
3202 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
3203 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
3204 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
3205 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
3206 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
3207 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
3208 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
3209 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
3210 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
3211 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
3212 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
3213 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
3214 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
3215 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
3216 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
3222 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
3224 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
3227 csio_hw_fatal_err(hw);
3231 * ULP RX interrupt handler.
3233 static void csio_ulprx_intr_handler(struct csio_hw *hw)
3235 static struct intr_info ulprx_intr_info[] = {
3236 { 0x1800000, "ULPRX context error", -1, 1 },
3237 { 0x7fffff, "ULPRX parity error", -1, 1 },
3241 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
3242 csio_hw_fatal_err(hw);
3246 * ULP TX interrupt handler.
3248 static void csio_ulptx_intr_handler(struct csio_hw *hw)
3250 static struct intr_info ulptx_intr_info[] = {
3251 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
3253 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
3255 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
3257 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
3259 { 0xfffffff, "ULPTX parity error", -1, 1 },
3263 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3264 csio_hw_fatal_err(hw);
3268 * PM TX interrupt handler.
3270 static void csio_pmtx_intr_handler(struct csio_hw *hw)
3272 static struct intr_info pmtx_intr_info[] = {
3273 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
3274 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
3275 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
3276 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
3277 { 0xffffff0, "PMTX framing error", -1, 1 },
3278 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
3279 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
3281 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
3282 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
3286 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3287 csio_hw_fatal_err(hw);
3291 * PM RX interrupt handler.
3293 static void csio_pmrx_intr_handler(struct csio_hw *hw)
3295 static struct intr_info pmrx_intr_info[] = {
3296 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
3297 { 0x3ffff0, "PMRX framing error", -1, 1 },
3298 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
3299 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
3301 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
3302 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
3306 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3307 csio_hw_fatal_err(hw);
3311 * CPL switch interrupt handler.
3313 static void csio_cplsw_intr_handler(struct csio_hw *hw)
3315 static struct intr_info cplsw_intr_info[] = {
3316 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
3317 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
3318 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
3319 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
3320 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
3321 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
3325 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
3326 csio_hw_fatal_err(hw);
3330 * LE interrupt handler.
3332 static void csio_le_intr_handler(struct csio_hw *hw)
3334 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
3336 static struct intr_info le_intr_info[] = {
3337 { LIPMISS_F, "LE LIP miss", -1, 0 },
3338 { LIP0_F, "LE 0 LIP error", -1, 0 },
3339 { PARITYERR_F, "LE parity error", -1, 1 },
3340 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3341 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
3345 static struct intr_info t6_le_intr_info[] = {
3346 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
3347 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
3348 { TCAMINTPERR_F, "LE parity error", -1, 1 },
3349 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3350 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
3354 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
3355 (chip == CHELSIO_T5) ?
3356 le_intr_info : t6_le_intr_info))
3357 csio_hw_fatal_err(hw);
3361 * MPS interrupt handler.
3363 static void csio_mps_intr_handler(struct csio_hw *hw)
3365 static struct intr_info mps_rx_intr_info[] = {
3366 { 0xffffff, "MPS Rx parity error", -1, 1 },
3369 static struct intr_info mps_tx_intr_info[] = {
3370 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
3371 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3372 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
3374 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
3376 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
3377 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
3378 { FRMERR_F, "MPS Tx framing error", -1, 1 },
3381 static struct intr_info mps_trc_intr_info[] = {
3382 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
3383 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
3385 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
3388 static struct intr_info mps_stat_sram_intr_info[] = {
3389 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3392 static struct intr_info mps_stat_tx_intr_info[] = {
3393 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3396 static struct intr_info mps_stat_rx_intr_info[] = {
3397 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3400 static struct intr_info mps_cls_intr_info[] = {
3401 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
3402 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
3403 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
3409 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
3411 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
3413 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
3414 mps_trc_intr_info) +
3415 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3416 mps_stat_sram_intr_info) +
3417 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3418 mps_stat_tx_intr_info) +
3419 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3420 mps_stat_rx_intr_info) +
3421 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
3424 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
3425 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
3427 csio_hw_fatal_err(hw);
3430 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3434 * EDC/MC interrupt handler.
3436 static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3438 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
3440 unsigned int addr, cnt_addr, v;
3442 if (idx <= MEM_EDC1) {
3443 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3444 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
3446 addr = MC_INT_CAUSE_A;
3447 cnt_addr = MC_ECC_STATUS_A;
3450 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
3451 if (v & PERR_INT_CAUSE_F)
3452 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
3453 if (v & ECC_CE_INT_CAUSE_F) {
3454 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
3456 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
3457 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3458 cnt, name[idx], cnt > 1 ? "s" : "");
3460 if (v & ECC_UE_INT_CAUSE_F)
3461 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3463 csio_wr_reg32(hw, v, addr);
3464 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
3465 csio_hw_fatal_err(hw);
3469 * MA interrupt handler.
3471 static void csio_ma_intr_handler(struct csio_hw *hw)
3473 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
3475 if (status & MEM_PERR_INT_CAUSE_F)
3476 csio_fatal(hw, "MA parity error, parity status %#x\n",
3477 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3478 if (status & MEM_WRAP_INT_CAUSE_F) {
3479 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
3481 "MA address wrap-around error by client %u to address %#x\n",
3482 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
3484 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
3485 csio_hw_fatal_err(hw);
3489 * SMB interrupt handler.
3491 static void csio_smb_intr_handler(struct csio_hw *hw)
3493 static struct intr_info smb_intr_info[] = {
3494 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3495 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3496 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
3500 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
3501 csio_hw_fatal_err(hw);
3505 * NC-SI interrupt handler.
3507 static void csio_ncsi_intr_handler(struct csio_hw *hw)
3509 static struct intr_info ncsi_intr_info[] = {
3510 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3511 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3512 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3513 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
3517 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
3518 csio_hw_fatal_err(hw);
3522 * XGMAC interrupt handler.
3524 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3526 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3528 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
3532 if (v & TXFIFO_PRTY_ERR_F)
3533 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3534 if (v & RXFIFO_PRTY_ERR_F)
3535 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3536 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3537 csio_hw_fatal_err(hw);
3541 * PL interrupt handler.
3543 static void csio_pl_intr_handler(struct csio_hw *hw)
3545 static struct intr_info pl_intr_info[] = {
3546 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3547 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
3551 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
3552 csio_hw_fatal_err(hw);
3556 * csio_hw_slow_intr_handler - control path interrupt handler
3559 * Interrupt handler for non-data global interrupt events, e.g., errors.
3560 * The designation 'slow' is because it involves register reads, while
3561 * data interrupts typically don't involve any MMIOs.
3564 csio_hw_slow_intr_handler(struct csio_hw *hw)
3566 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
3568 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3569 CSIO_INC_STATS(hw, n_plint_unexp);
3573 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3575 CSIO_INC_STATS(hw, n_plint_cnt);
3578 csio_cim_intr_handler(hw);
3581 csio_mps_intr_handler(hw);
3584 csio_ncsi_intr_handler(hw);
3587 csio_pl_intr_handler(hw);
3590 csio_smb_intr_handler(hw);
3592 if (cause & XGMAC0_F)
3593 csio_xgmac_intr_handler(hw, 0);
3595 if (cause & XGMAC1_F)
3596 csio_xgmac_intr_handler(hw, 1);
3598 if (cause & XGMAC_KR0_F)
3599 csio_xgmac_intr_handler(hw, 2);
3601 if (cause & XGMAC_KR1_F)
3602 csio_xgmac_intr_handler(hw, 3);
3605 hw->chip_ops->chip_pcie_intr_handler(hw);
3608 csio_mem_intr_handler(hw, MEM_MC);
3611 csio_mem_intr_handler(hw, MEM_EDC0);
3614 csio_mem_intr_handler(hw, MEM_EDC1);
3617 csio_le_intr_handler(hw);
3620 csio_tp_intr_handler(hw);
3623 csio_ma_intr_handler(hw);
3625 if (cause & PM_TX_F)
3626 csio_pmtx_intr_handler(hw);
3628 if (cause & PM_RX_F)
3629 csio_pmrx_intr_handler(hw);
3631 if (cause & ULP_RX_F)
3632 csio_ulprx_intr_handler(hw);
3634 if (cause & CPL_SWITCH_F)
3635 csio_cplsw_intr_handler(hw);
3638 csio_sge_intr_handler(hw);
3640 if (cause & ULP_TX_F)
3641 csio_ulptx_intr_handler(hw);
3643 /* Clear the interrupts just processed for which we are the master. */
3644 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
3645 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
3650 /*****************************************************************************
3651 * HW <--> mailbox interfacing routines.
3652 ****************************************************************************/
3654 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3656 * @data: Private data pointer.
3658 * Called from worker thread context.
3661 csio_mberr_worker(void *data)
3663 struct csio_hw *hw = (struct csio_hw *)data;
3664 struct csio_mbm *mbm = &hw->mbm;
3666 struct csio_mb *mbp_next;
3669 del_timer_sync(&mbm->timer);
3671 spin_lock_irq(&hw->lock);
3672 if (list_empty(&mbm->cbfn_q)) {
3673 spin_unlock_irq(&hw->lock);
3677 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3678 mbm->stats.n_cbfnq = 0;
3680 /* Try to start waiting mailboxes */
3681 if (!list_empty(&mbm->req_q)) {
3682 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3683 list_del_init(&mbp_next->list);
3685 rv = csio_mb_issue(hw, mbp_next);
3687 list_add_tail(&mbp_next->list, &mbm->req_q);
3689 CSIO_DEC_STATS(mbm, n_activeq);
3691 spin_unlock_irq(&hw->lock);
3693 /* Now callback completions */
3694 csio_mb_completions(hw, &cbfn_q);
3698 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3700 * @data: private data pointer
3704 csio_hw_mb_timer(struct timer_list *t)
3706 struct csio_mbm *mbm = from_timer(mbm, t, timer);
3707 struct csio_hw *hw = mbm->hw;
3708 struct csio_mb *mbp = NULL;
3710 spin_lock_irq(&hw->lock);
3711 mbp = csio_mb_tmo_handler(hw);
3712 spin_unlock_irq(&hw->lock);
3714 /* Call back the function for the timed-out Mailbox */
3716 mbp->mb_cbfn(hw, mbp);
3721 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3724 * Called with lock held, should exit with lock held.
3725 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3726 * into a local queue. Drops lock and calls the completions. Holds
3730 csio_hw_mbm_cleanup(struct csio_hw *hw)
3734 csio_mb_cancel_all(hw, &cbfn_q);
3736 spin_unlock_irq(&hw->lock);
3737 csio_mb_completions(hw, &cbfn_q);
3738 spin_lock_irq(&hw->lock);
3741 /*****************************************************************************
3743 ****************************************************************************/
3745 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3748 struct csio_evt_msg *evt_entry = NULL;
3750 if (type >= CSIO_EVT_MAX)
3753 if (len > CSIO_EVT_MSG_SIZE)
3756 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3759 if (list_empty(&hw->evt_free_q)) {
3760 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3765 evt_entry = list_first_entry(&hw->evt_free_q,
3766 struct csio_evt_msg, list);
3767 list_del_init(&evt_entry->list);
3769 /* copy event msg and queue the event */
3770 evt_entry->type = type;
3771 memcpy((void *)evt_entry->data, evt_msg, len);
3772 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3774 CSIO_DEC_STATS(hw, n_evt_freeq);
3775 CSIO_INC_STATS(hw, n_evt_activeq);
3781 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3782 uint16_t len, bool msg_sg)
3784 struct csio_evt_msg *evt_entry = NULL;
3785 struct csio_fl_dma_buf *fl_sg;
3787 unsigned long flags;
3790 if (type >= CSIO_EVT_MAX)
3793 if (len > CSIO_EVT_MSG_SIZE)
3796 spin_lock_irqsave(&hw->lock, flags);
3797 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3802 if (list_empty(&hw->evt_free_q)) {
3803 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3809 evt_entry = list_first_entry(&hw->evt_free_q,
3810 struct csio_evt_msg, list);
3811 list_del_init(&evt_entry->list);
3813 /* copy event msg and queue the event */
3814 evt_entry->type = type;
3816 /* If Payload in SG list*/
3818 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3819 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3820 memcpy((void *)((uintptr_t)evt_entry->data + off),
3821 fl_sg->flbufs[n].vaddr,
3822 fl_sg->flbufs[n].len);
3823 off += fl_sg->flbufs[n].len;
3826 memcpy((void *)evt_entry->data, evt_msg, len);
3828 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3829 CSIO_DEC_STATS(hw, n_evt_freeq);
3830 CSIO_INC_STATS(hw, n_evt_activeq);
3832 spin_unlock_irqrestore(&hw->lock, flags);
3837 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3840 spin_lock_irq(&hw->lock);
3841 list_del_init(&evt_entry->list);
3842 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3843 CSIO_DEC_STATS(hw, n_evt_activeq);
3844 CSIO_INC_STATS(hw, n_evt_freeq);
3845 spin_unlock_irq(&hw->lock);
3850 csio_evtq_flush(struct csio_hw *hw)
3854 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3855 spin_unlock_irq(&hw->lock);
3857 spin_lock_irq(&hw->lock);
3860 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3864 csio_evtq_stop(struct csio_hw *hw)
3866 hw->flags |= CSIO_HWF_FWEVT_STOP;
3870 csio_evtq_start(struct csio_hw *hw)
3872 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3876 csio_evtq_cleanup(struct csio_hw *hw)
3878 struct list_head *evt_entry, *next_entry;
3880 /* Release outstanding events from activeq to freeq*/
3881 if (!list_empty(&hw->evt_active_q))
3882 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3884 hw->stats.n_evt_activeq = 0;
3885 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3887 /* Freeup event entry */
3888 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3890 CSIO_DEC_STATS(hw, n_evt_freeq);
3893 hw->stats.n_evt_freeq = 0;
3898 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3899 struct csio_fl_dma_buf *flb, void *priv)
3903 uint32_t msg_len = 0;
3906 op = ((struct rss_header *) wr)->opcode;
3907 if (op == CPL_FW6_PLD) {
3908 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3909 if (!flb || !flb->totlen) {
3910 CSIO_INC_STATS(hw, n_cpl_unexp);
3915 msg_len = flb->totlen;
3917 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3919 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3920 /* skip RSS header */
3921 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3922 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3923 sizeof(struct cpl_fw4_msg);
3925 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3926 CSIO_INC_STATS(hw, n_cpl_unexp);
3931 * Enqueue event to EventQ. Events processing happens
3932 * in Event worker thread context
3934 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3935 (uint16_t)msg_len, msg_sg))
3936 CSIO_INC_STATS(hw, n_evt_drop);
3940 csio_evtq_worker(struct work_struct *work)
3942 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3943 struct list_head *evt_entry, *next_entry;
3945 struct csio_evt_msg *evt_msg;
3946 struct cpl_fw6_msg *msg;
3947 struct csio_rnode *rn;
3949 uint8_t evtq_stop = 0;
3951 csio_dbg(hw, "event worker thread active evts#%d\n",
3952 hw->stats.n_evt_activeq);
3954 spin_lock_irq(&hw->lock);
3955 while (!list_empty(&hw->evt_active_q)) {
3956 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3957 spin_unlock_irq(&hw->lock);
3959 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3960 evt_msg = (struct csio_evt_msg *) evt_entry;
3962 /* Drop events if queue is STOPPED */
3963 spin_lock_irq(&hw->lock);
3964 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3966 spin_unlock_irq(&hw->lock);
3968 CSIO_INC_STATS(hw, n_evt_drop);
3972 switch (evt_msg->type) {
3974 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3976 if ((msg->opcode == CPL_FW6_MSG ||
3977 msg->opcode == CPL_FW4_MSG) &&
3979 rv = csio_mb_fwevt_handler(hw,
3983 /* Handle any remaining fw events */
3984 csio_fcoe_fwevt_handler(hw,
3985 msg->opcode, msg->data);
3986 } else if (msg->opcode == CPL_FW6_PLD) {
3988 csio_fcoe_fwevt_handler(hw,
3989 msg->opcode, msg->data);
3992 "Unhandled FW msg op %x type %x\n",
3993 msg->opcode, msg->type);
3994 CSIO_INC_STATS(hw, n_evt_drop);
3999 csio_mberr_worker(hw);
4002 case CSIO_EVT_DEV_LOSS:
4003 memcpy(&rn, evt_msg->data, sizeof(rn));
4004 csio_rnode_devloss_handler(rn);
4008 csio_warn(hw, "Unhandled event %x on evtq\n",
4010 CSIO_INC_STATS(hw, n_evt_unexp);
4014 csio_free_evt(hw, evt_msg);
4017 spin_lock_irq(&hw->lock);
4019 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
4020 spin_unlock_irq(&hw->lock);
4024 csio_fwevtq_handler(struct csio_hw *hw)
4028 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
4029 CSIO_INC_STATS(hw, n_int_stray);
4033 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
4034 csio_process_fwevtq_entry, NULL);
4038 /****************************************************************************
4040 ****************************************************************************/
4042 /* Management module */
4044 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
4045 * mgmt - mgmt module
4046 * @io_req - io request
4048 * Return - 0:if given IO Req exists in active Q.
4049 * -EINVAL :if lookup fails.
4052 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
4054 struct list_head *tmp;
4056 /* Lookup ioreq in the ACTIVEQ */
4057 list_for_each(tmp, &mgmtm->active_q) {
4058 if (io_req == (struct csio_ioreq *)tmp)
4064 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
4067 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
4068 * @data - Event data.
4073 csio_mgmt_tmo_handler(struct timer_list *t)
4075 struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer);
4076 struct list_head *tmp;
4077 struct csio_ioreq *io_req;
4079 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
4081 spin_lock_irq(&mgmtm->hw->lock);
4083 list_for_each(tmp, &mgmtm->active_q) {
4084 io_req = (struct csio_ioreq *) tmp;
4085 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
4088 /* Dequeue the request from retry Q. */
4089 tmp = csio_list_prev(tmp);
4090 list_del_init(&io_req->sm.sm_list);
4091 if (io_req->io_cbfn) {
4092 /* io_req will be freed by completion handler */
4093 io_req->wr_status = -ETIMEDOUT;
4094 io_req->io_cbfn(mgmtm->hw, io_req);
4101 /* If retry queue is not empty, re-arm timer */
4102 if (!list_empty(&mgmtm->active_q))
4103 mod_timer(&mgmtm->mgmt_timer,
4104 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
4105 spin_unlock_irq(&mgmtm->hw->lock);
4109 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
4111 struct csio_hw *hw = mgmtm->hw;
4112 struct csio_ioreq *io_req;
4113 struct list_head *tmp;
4117 /* Wait for all outstanding req to complete gracefully */
4118 while ((!list_empty(&mgmtm->active_q)) && count--) {
4119 spin_unlock_irq(&hw->lock);
4121 spin_lock_irq(&hw->lock);
4124 /* release outstanding req from ACTIVEQ */
4125 list_for_each(tmp, &mgmtm->active_q) {
4126 io_req = (struct csio_ioreq *) tmp;
4127 tmp = csio_list_prev(tmp);
4128 list_del_init(&io_req->sm.sm_list);
4129 mgmtm->stats.n_active--;
4130 if (io_req->io_cbfn) {
4131 /* io_req will be freed by completion handler */
4132 io_req->wr_status = -ETIMEDOUT;
4133 io_req->io_cbfn(mgmtm->hw, io_req);
4139 * csio_mgmt_init - Mgmt module init entry point
4140 * @mgmtsm - mgmt module
4143 * Initialize mgmt timer, resource wait queue, active queue,
4144 * completion q. Allocate Egress and Ingress
4145 * WR queues and save off the queue index returned by the WR
4146 * module for future use. Allocate and save off mgmt reqs in the
4147 * mgmt_req_freelist for future use. Make sure their SM is initialized
4149 * Returns: 0 - on success
4150 * -ENOMEM - on error.
4153 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
4155 timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0);
4157 INIT_LIST_HEAD(&mgmtm->active_q);
4158 INIT_LIST_HEAD(&mgmtm->cbfn_q);
4161 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
4167 * csio_mgmtm_exit - MGMT module exit entry point
4168 * @mgmtsm - mgmt module
4170 * This function called during MGMT module uninit.
4171 * Stop timers, free ioreqs allocated.
4176 csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
4178 del_timer_sync(&mgmtm->mgmt_timer);
4183 * csio_hw_start - Kicks off the HW State machine
4184 * @hw: Pointer to HW module.
4186 * It is assumed that the initialization is a synchronous operation.
4187 * So when we return afer posting the event, the HW SM should be in
4188 * the ready state, if there were no errors during init.
4191 csio_hw_start(struct csio_hw *hw)
4193 spin_lock_irq(&hw->lock);
4194 csio_post_event(&hw->sm, CSIO_HWE_CFG);
4195 spin_unlock_irq(&hw->lock);
4197 if (csio_is_hw_ready(hw))
4199 else if (csio_match_state(hw, csio_hws_uninit))
4206 csio_hw_stop(struct csio_hw *hw)
4208 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
4210 if (csio_is_hw_removing(hw))
4216 /* Max reset retries */
4217 #define CSIO_MAX_RESET_RETRIES 3
4220 * csio_hw_reset - Reset the hardware
4223 * Caller should hold lock across this function.
4226 csio_hw_reset(struct csio_hw *hw)
4228 if (!csio_is_hw_master(hw))
4231 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
4232 csio_dbg(hw, "Max hw reset attempts reached..");
4237 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
4239 if (csio_is_hw_ready(hw)) {
4240 hw->rst_retries = 0;
4241 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
4248 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
4252 csio_hw_get_device_id(struct csio_hw *hw)
4254 /* Is the adapter device id cached already ?*/
4255 if (csio_is_dev_id_cached(hw))
4258 /* Get the PCI vendor & device id */
4259 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
4260 &hw->params.pci.vendor_id);
4261 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
4262 &hw->params.pci.device_id);
4264 csio_dev_id_cached(hw);
4265 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
4267 } /* csio_hw_get_device_id */
4270 * csio_hw_set_description - Set the model, description of the hw.
4272 * @ven_id: PCI Vendor ID
4273 * @dev_id: PCI Device ID
4276 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
4278 uint32_t adap_type, prot_type;
4280 if (ven_id == CSIO_VENDOR_ID) {
4281 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
4282 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
4284 if (prot_type == CSIO_T5_FCOE_ASIC) {
4286 csio_t5_fcoe_adapters[adap_type].model_no, 16);
4287 memcpy(hw->model_desc,
4288 csio_t5_fcoe_adapters[adap_type].description,
4291 char tempName[32] = "Chelsio FCoE Controller";
4292 memcpy(hw->model_desc, tempName, 32);
4295 } /* csio_hw_set_description */
4298 * csio_hw_init - Initialize HW module.
4299 * @hw: Pointer to HW module.
4301 * Initialize the members of the HW module.
4304 csio_hw_init(struct csio_hw *hw)
4308 uint16_t ven_id, dev_id;
4309 struct csio_evt_msg *evt_entry;
4311 INIT_LIST_HEAD(&hw->sm.sm_list);
4312 csio_init_state(&hw->sm, csio_hws_uninit);
4313 spin_lock_init(&hw->lock);
4314 INIT_LIST_HEAD(&hw->sln_head);
4316 /* Get the PCI vendor & device id */
4317 csio_hw_get_device_id(hw);
4319 strcpy(hw->name, CSIO_HW_NAME);
4321 /* Initialize the HW chip ops T5 specific ops */
4322 hw->chip_ops = &t5_ops;
4324 /* Set the model & its description */
4326 ven_id = hw->params.pci.vendor_id;
4327 dev_id = hw->params.pci.device_id;
4329 csio_hw_set_description(hw, ven_id, dev_id);
4331 /* Initialize default log level */
4332 hw->params.log_level = (uint32_t) csio_dbg_level;
4334 csio_set_fwevt_intr_idx(hw, -1);
4335 csio_set_nondata_intr_idx(hw, -1);
4337 /* Init all the modules: Mailbox, WorkRequest and Transport */
4338 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
4341 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
4345 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
4349 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
4351 goto err_scsim_exit;
4352 /* Pre-allocate evtq and initialize them */
4353 INIT_LIST_HEAD(&hw->evt_active_q);
4354 INIT_LIST_HEAD(&hw->evt_free_q);
4355 for (i = 0; i < csio_evtq_sz; i++) {
4357 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
4360 csio_err(hw, "Failed to initialize eventq");
4361 goto err_evtq_cleanup;
4364 list_add_tail(&evt_entry->list, &hw->evt_free_q);
4365 CSIO_INC_STATS(hw, n_evt_freeq);
4368 hw->dev_num = dev_num;
4374 csio_evtq_cleanup(hw);
4375 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4377 csio_scsim_exit(csio_hw_to_scsim(hw));
4379 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4381 csio_mbm_exit(csio_hw_to_mbm(hw));
4387 * csio_hw_exit - Un-initialize HW module.
4388 * @hw: Pointer to HW module.
4392 csio_hw_exit(struct csio_hw *hw)
4394 csio_evtq_cleanup(hw);
4395 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4396 csio_scsim_exit(csio_hw_to_scsim(hw));
4397 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4398 csio_mbm_exit(csio_hw_to_mbm(hw));