2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
246 static const struct mdio_ops mi1_mdio_ops = {
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
302 mutex_unlock(&adapter->mdio_lock);
306 static const struct mdio_ops mi1_mdio_ext_ops = {
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
328 ret = mdio_read(phy, mmd, reg, &val);
331 ret = mdio_write(phy, mmd, reg, val | set);
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
362 } while (ctl && --wait);
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_set_phy_speed_duplex - force PHY speed and duplex
412 * @phy: the PHY to operate on
413 * @speed: requested PHY speed
414 * @duplex: requested PHY duplex
416 * Force a 10/100/1000 PHY's speed and duplex. This also disables
417 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
419 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
424 err = mdio_read(phy, 0, MII_BMCR, &ctl);
429 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
430 if (speed == SPEED_100)
431 ctl |= BMCR_SPEED100;
432 else if (speed == SPEED_1000)
433 ctl |= BMCR_SPEED1000;
436 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
437 if (duplex == DUPLEX_FULL)
438 ctl |= BMCR_FULLDPLX;
440 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
441 ctl |= BMCR_ANENABLE;
442 return mdio_write(phy, 0, MII_BMCR, ctl);
445 int t3_phy_lasi_intr_enable(struct cphy *phy)
447 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
450 int t3_phy_lasi_intr_disable(struct cphy *phy)
452 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
455 int t3_phy_lasi_intr_clear(struct cphy *phy)
459 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
462 int t3_phy_lasi_intr_handler(struct cphy *phy)
465 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
469 return (status & 1) ? cphy_cause_link_change : 0;
472 static const struct adapter_info t3_adap_info[] = {
474 F_GPIO2_OEN | F_GPIO4_OEN |
475 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
476 &mi1_mdio_ops, "Chelsio PE9000"},
478 F_GPIO2_OEN | F_GPIO4_OEN |
479 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
480 &mi1_mdio_ops, "Chelsio T302"},
482 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
483 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
484 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
485 &mi1_mdio_ext_ops, "Chelsio T310"},
487 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
488 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
489 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
490 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
491 &mi1_mdio_ext_ops, "Chelsio T320"},
495 * Return the adapter_info structure with a given index. Out-of-range indices
498 const struct adapter_info *t3_get_adapter_info(unsigned int id)
500 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
503 struct port_type_info {
504 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
505 int phy_addr, const struct mdio_ops *ops);
508 static const struct port_type_info port_types[] = {
510 { t3_ael1002_phy_prep },
511 { t3_vsc8211_phy_prep },
513 { t3_xaui_direct_phy_prep },
515 { t3_qt2045_phy_prep },
516 { t3_ael1006_phy_prep },
520 #define VPD_ENTRY(name, len) \
521 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
524 * Partial EEPROM Vital Product Data structure. Includes only the ID and
533 VPD_ENTRY(pn, 16); /* part number */
534 VPD_ENTRY(ec, 16); /* EC level */
535 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
536 VPD_ENTRY(na, 12); /* MAC address base */
537 VPD_ENTRY(cclk, 6); /* core clock */
538 VPD_ENTRY(mclk, 6); /* mem clock */
539 VPD_ENTRY(uclk, 6); /* uP clk */
540 VPD_ENTRY(mdc, 6); /* MDIO clk */
541 VPD_ENTRY(mt, 2); /* mem timing */
542 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
543 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
544 VPD_ENTRY(port0, 2); /* PHY0 complex */
545 VPD_ENTRY(port1, 2); /* PHY1 complex */
546 VPD_ENTRY(port2, 2); /* PHY2 complex */
547 VPD_ENTRY(port3, 2); /* PHY3 complex */
548 VPD_ENTRY(rv, 1); /* csum */
549 u32 pad; /* for multiple-of-4 sizing and alignment */
552 #define EEPROM_MAX_POLL 4
553 #define EEPROM_STAT_ADDR 0x4000
554 #define VPD_BASE 0xc00
557 * t3_seeprom_read - read a VPD EEPROM location
558 * @adapter: adapter to read
559 * @addr: EEPROM address
560 * @data: where to store the read data
562 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
563 * VPD ROM capability. A zero is written to the flag bit when the
564 * addres is written to the control register. The hardware device will
565 * set the flag to 1 when 4 bytes have been read into the data register.
567 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
570 int attempts = EEPROM_MAX_POLL;
572 unsigned int base = adapter->params.pci.vpd_cap_addr;
574 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
577 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
580 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
581 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
583 if (!(val & PCI_VPD_ADDR_F)) {
584 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
587 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
588 *data = cpu_to_le32(v);
593 * t3_seeprom_write - write a VPD EEPROM location
594 * @adapter: adapter to write
595 * @addr: EEPROM address
596 * @data: value to write
598 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
599 * VPD ROM capability.
601 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
604 int attempts = EEPROM_MAX_POLL;
605 unsigned int base = adapter->params.pci.vpd_cap_addr;
607 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
610 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
612 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
613 addr | PCI_VPD_ADDR_F);
616 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
617 } while ((val & PCI_VPD_ADDR_F) && --attempts);
619 if (val & PCI_VPD_ADDR_F) {
620 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
627 * t3_seeprom_wp - enable/disable EEPROM write protection
628 * @adapter: the adapter
629 * @enable: 1 to enable write protection, 0 to disable it
631 * Enables or disables write protection on the serial EEPROM.
633 int t3_seeprom_wp(struct adapter *adapter, int enable)
635 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
639 * Convert a character holding a hex digit to a number.
641 static unsigned int hex2int(unsigned char c)
643 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
647 * get_vpd_params - read VPD parameters from VPD EEPROM
648 * @adapter: adapter to read
649 * @p: where to store the parameters
651 * Reads card parameters stored in VPD EEPROM.
653 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
659 * Card information is normally at VPD_BASE but some early cards had
662 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
665 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
667 for (i = 0; i < sizeof(vpd); i += 4) {
668 ret = t3_seeprom_read(adapter, addr + i,
669 (__le32 *)((u8 *)&vpd + i));
674 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
675 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
676 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
677 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
678 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
679 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
681 /* Old eeproms didn't have port information */
682 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
683 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
684 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
686 p->port_type[0] = hex2int(vpd.port0_data[0]);
687 p->port_type[1] = hex2int(vpd.port1_data[0]);
688 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
689 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
692 for (i = 0; i < 6; i++)
693 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
694 hex2int(vpd.na_data[2 * i + 1]);
698 /* serial flash and firmware constants */
700 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
701 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
702 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
704 /* flash command opcodes */
705 SF_PROG_PAGE = 2, /* program page */
706 SF_WR_DISABLE = 4, /* disable writes */
707 SF_RD_STATUS = 5, /* read status register */
708 SF_WR_ENABLE = 6, /* enable writes */
709 SF_RD_DATA_FAST = 0xb, /* read flash */
710 SF_ERASE_SECTOR = 0xd8, /* erase sector */
712 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
713 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
714 FW_MIN_SIZE = 8 /* at least version and csum */
718 * sf1_read - read data from the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to read
721 * @cont: whether another operation will be chained
722 * @valp: where to store the read data
724 * Reads up to 4 bytes of data from the serial flash. The location of
725 * the read needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
728 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
733 if (!byte_cnt || byte_cnt > 4)
735 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
737 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
738 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
740 *valp = t3_read_reg(adapter, A_SF_DATA);
745 * sf1_write - write data to the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to write
748 * @cont: whether another operation will be chained
749 * @val: value to write
751 * Writes up to 4 bytes of data to the serial flash. The location of
752 * the write needs to be specified prior to calling this by issuing the
753 * appropriate commands to the serial flash.
755 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 if (!byte_cnt || byte_cnt > 4)
760 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
762 t3_write_reg(adapter, A_SF_DATA, val);
763 t3_write_reg(adapter, A_SF_OP,
764 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
765 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
769 * flash_wait_op - wait for a flash operation to complete
770 * @adapter: the adapter
771 * @attempts: max number of polls of the status register
772 * @delay: delay between polls in ms
774 * Wait for a flash operation to complete by polling the status register.
776 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
782 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
783 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
795 * t3_read_flash - read words from serial flash
796 * @adapter: the adapter
797 * @addr: the start address for the read
798 * @nwords: how many 32-bit words to read
799 * @data: where to store the read data
800 * @byte_oriented: whether to store data as bytes or as words
802 * Read the specified number of 32-bit words from the serial flash.
803 * If @byte_oriented is set the read data is stored as a byte array
804 * (i.e., big-endian), otherwise as 32-bit words in the platform's
807 int t3_read_flash(struct adapter *adapter, unsigned int addr,
808 unsigned int nwords, u32 *data, int byte_oriented)
812 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
815 addr = swab32(addr) | SF_RD_DATA_FAST;
817 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
818 (ret = sf1_read(adapter, 1, 1, data)) != 0)
821 for (; nwords; nwords--, data++) {
822 ret = sf1_read(adapter, 4, nwords > 1, data);
826 *data = htonl(*data);
832 * t3_write_flash - write up to a page of data to the serial flash
833 * @adapter: the adapter
834 * @addr: the start address to write
835 * @n: length of data to write
836 * @data: the data to write
838 * Writes up to a page of data (256 bytes) to the serial flash starting
839 * at the given address.
841 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
842 unsigned int n, const u8 *data)
846 unsigned int i, c, left, val, offset = addr & 0xff;
848 if (addr + n > SF_SIZE || offset + n > 256)
851 val = swab32(addr) | SF_PROG_PAGE;
853 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
854 (ret = sf1_write(adapter, 4, 1, val)) != 0)
857 for (left = n; left; left -= c) {
859 for (val = 0, i = 0; i < c; ++i)
860 val = (val << 8) + *data++;
862 ret = sf1_write(adapter, c, c != left, val);
866 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
869 /* Read the page to verify the write succeeded */
870 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
874 if (memcmp(data - n, (u8 *) buf + offset, n))
880 * t3_get_tp_version - read the tp sram version
881 * @adapter: the adapter
882 * @vers: where to place the version
884 * Reads the protocol sram version from sram.
886 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
890 /* Get version loaded in SRAM */
891 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
892 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
897 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
903 * t3_check_tpsram_version - read the tp sram version
904 * @adapter: the adapter
905 * @must_load: set to 1 if loading a new microcode image is required
907 * Reads the protocol sram version from flash.
909 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
913 unsigned int major, minor;
915 if (adapter->params.rev == T3_REV_A)
920 ret = t3_get_tp_version(adapter, &vers);
924 major = G_TP_VERSION_MAJOR(vers);
925 minor = G_TP_VERSION_MINOR(vers);
927 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
930 if (major != TP_VERSION_MAJOR)
931 CH_ERR(adapter, "found wrong TP version (%u.%u), "
932 "driver needs version %d.%d\n", major, minor,
933 TP_VERSION_MAJOR, TP_VERSION_MINOR);
936 CH_ERR(adapter, "found wrong TP version (%u.%u), "
937 "driver compiled for version %d.%d\n", major, minor,
938 TP_VERSION_MAJOR, TP_VERSION_MINOR);
944 * t3_check_tpsram - check if provided protocol SRAM
945 * is compatible with this driver
946 * @adapter: the adapter
947 * @tp_sram: the firmware image to write
950 * Checks if an adapter's tp sram is compatible with the driver.
951 * Returns 0 if the versions are compatible, a negative error otherwise.
953 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
958 const __be32 *p = (const __be32 *)tp_sram;
960 /* Verify checksum */
961 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
963 if (csum != 0xffffffff) {
964 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
972 enum fw_version_type {
978 * t3_get_fw_version - read the firmware version
979 * @adapter: the adapter
980 * @vers: where to place the version
982 * Reads the FW version from flash.
984 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
986 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
990 * t3_check_fw_version - check if the FW is compatible with this driver
991 * @adapter: the adapter
992 * @must_load: set to 1 if loading a new FW image is required
994 * Checks if an adapter's FW is compatible with the driver. Returns 0
995 * if the versions are compatible, a negative error otherwise.
997 int t3_check_fw_version(struct adapter *adapter, int *must_load)
1001 unsigned int type, major, minor;
1004 ret = t3_get_fw_version(adapter, &vers);
1008 type = G_FW_VERSION_TYPE(vers);
1009 major = G_FW_VERSION_MAJOR(vers);
1010 minor = G_FW_VERSION_MINOR(vers);
1012 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1013 minor == FW_VERSION_MINOR)
1016 if (major != FW_VERSION_MAJOR)
1017 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1018 "driver needs version %u.%u\n", major, minor,
1019 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1020 else if (minor < FW_VERSION_MINOR) {
1022 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1023 "driver compiled for version %u.%u\n", major, minor,
1024 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1026 CH_WARN(adapter, "found newer FW version(%u.%u), "
1027 "driver compiled for version %u.%u\n", major, minor,
1028 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1035 * t3_flash_erase_sectors - erase a range of flash sectors
1036 * @adapter: the adapter
1037 * @start: the first sector to erase
1038 * @end: the last sector to erase
1040 * Erases the sectors in the given range.
1042 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1044 while (start <= end) {
1047 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1048 (ret = sf1_write(adapter, 4, 0,
1049 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1050 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1058 * t3_load_fw - download firmware
1059 * @adapter: the adapter
1060 * @fw_data: the firmware image to write
1063 * Write the supplied firmware image to the card's serial flash.
1064 * The FW image has the following sections: @size - 8 bytes of code and
1065 * data, followed by 4 bytes of FW version, followed by the 32-bit
1066 * 1's complement checksum of the whole image.
1068 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1072 const __be32 *p = (const __be32 *)fw_data;
1073 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1075 if ((size & 3) || size < FW_MIN_SIZE)
1077 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1080 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1081 csum += ntohl(p[i]);
1082 if (csum != 0xffffffff) {
1083 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1088 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1092 size -= 8; /* trim off version and checksum */
1093 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1094 unsigned int chunk_size = min(size, 256U);
1096 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1101 fw_data += chunk_size;
1105 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1108 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1112 #define CIM_CTL_BASE 0x2000
1115 * t3_cim_ctl_blk_read - read a block from CIM control region
1117 * @adap: the adapter
1118 * @addr: the start address within the CIM control region
1119 * @n: number of words to read
1120 * @valp: where to store the result
1122 * Reads a block of 4-byte words from the CIM control region.
1124 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1125 unsigned int n, unsigned int *valp)
1129 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1132 for ( ; !ret && n--; addr += 4) {
1133 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1134 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1137 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1144 * t3_link_changed - handle interface link changes
1145 * @adapter: the adapter
1146 * @port_id: the port index that changed link state
1148 * Called when a port's link settings change to propagate the new values
1149 * to the associated PHY and MAC. After performing the common tasks it
1150 * invokes an OS-specific handler.
1152 void t3_link_changed(struct adapter *adapter, int port_id)
1154 int link_ok, speed, duplex, fc;
1155 struct port_info *pi = adap2pinfo(adapter, port_id);
1156 struct cphy *phy = &pi->phy;
1157 struct cmac *mac = &pi->mac;
1158 struct link_config *lc = &pi->link_config;
1160 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1162 if (lc->requested_fc & PAUSE_AUTONEG)
1163 fc &= lc->requested_fc;
1165 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1167 if (link_ok == lc->link_ok && speed == lc->speed &&
1168 duplex == lc->duplex && fc == lc->fc)
1169 return; /* nothing changed */
1171 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1172 uses_xaui(adapter)) {
1175 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1176 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1178 lc->link_ok = link_ok;
1179 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1180 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1182 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1183 /* Set MAC speed, duplex, and flow control to match PHY. */
1184 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1188 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1192 * t3_link_start - apply link configuration to MAC/PHY
1193 * @phy: the PHY to setup
1194 * @mac: the MAC to setup
1195 * @lc: the requested link configuration
1197 * Set up a port's MAC and PHY according to a desired link configuration.
1198 * - If the PHY can auto-negotiate first decide what to advertise, then
1199 * enable/disable auto-negotiation as desired, and reset.
1200 * - If the PHY does not auto-negotiate just reset it.
1201 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1202 * otherwise do it later based on the outcome of auto-negotiation.
1204 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1206 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1209 if (lc->supported & SUPPORTED_Autoneg) {
1210 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1212 lc->advertising |= ADVERTISED_Asym_Pause;
1214 lc->advertising |= ADVERTISED_Pause;
1216 phy->ops->advertise(phy, lc->advertising);
1218 if (lc->autoneg == AUTONEG_DISABLE) {
1219 lc->speed = lc->requested_speed;
1220 lc->duplex = lc->requested_duplex;
1221 lc->fc = (unsigned char)fc;
1222 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1224 /* Also disables autoneg */
1225 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1227 phy->ops->autoneg_enable(phy);
1229 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1230 lc->fc = (unsigned char)fc;
1231 phy->ops->reset(phy, 0);
1237 * t3_set_vlan_accel - control HW VLAN extraction
1238 * @adapter: the adapter
1239 * @ports: bitmap of adapter ports to operate on
1240 * @on: enable (1) or disable (0) HW VLAN extraction
1242 * Enables or disables HW extraction of VLAN tags for the given port.
1244 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1246 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1247 ports << S_VLANEXTRACTIONENABLE,
1248 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1252 unsigned int mask; /* bits to check in interrupt status */
1253 const char *msg; /* message to print or NULL */
1254 short stat_idx; /* stat counter to increment or -1 */
1255 unsigned short fatal; /* whether the condition reported is fatal */
1259 * t3_handle_intr_status - table driven interrupt handler
1260 * @adapter: the adapter that generated the interrupt
1261 * @reg: the interrupt status register to process
1262 * @mask: a mask to apply to the interrupt status
1263 * @acts: table of interrupt actions
1264 * @stats: statistics counters tracking interrupt occurences
1266 * A table driven interrupt handler that applies a set of masks to an
1267 * interrupt status word and performs the corresponding actions if the
1268 * interrupts described by the mask have occured. The actions include
1269 * optionally printing a warning or alert message, and optionally
1270 * incrementing a stat counter. The table is terminated by an entry
1271 * specifying mask 0. Returns the number of fatal interrupt conditions.
1273 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1275 const struct intr_info *acts,
1276 unsigned long *stats)
1279 unsigned int status = t3_read_reg(adapter, reg) & mask;
1281 for (; acts->mask; ++acts) {
1282 if (!(status & acts->mask))
1286 CH_ALERT(adapter, "%s (0x%x)\n",
1287 acts->msg, status & acts->mask);
1288 } else if (acts->msg)
1289 CH_WARN(adapter, "%s (0x%x)\n",
1290 acts->msg, status & acts->mask);
1291 if (acts->stat_idx >= 0)
1292 stats[acts->stat_idx]++;
1294 if (status) /* clear processed interrupts */
1295 t3_write_reg(adapter, reg, status);
1299 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1300 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1301 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1302 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1303 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1304 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1306 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1307 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1309 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1310 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1311 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1312 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1313 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1314 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1315 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1316 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1317 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1318 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1319 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1320 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1321 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1322 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1323 F_TXPARERR | V_BISTERR(M_BISTERR))
1324 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1325 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1326 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1327 #define ULPTX_INTR_MASK 0xfc
1328 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1329 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1330 F_ZERO_SWITCH_ERROR)
1331 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1332 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1333 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1334 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1335 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1336 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1337 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1338 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1339 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1340 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1341 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1342 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1343 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1344 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1345 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1346 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1347 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1348 V_MCAPARERRENB(M_MCAPARERRENB))
1349 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1350 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1351 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1352 F_MPS0 | F_CPL_SWITCH)
1355 * Interrupt handler for the PCIX1 module.
1357 static void pci_intr_handler(struct adapter *adapter)
1359 static const struct intr_info pcix1_intr_info[] = {
1360 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1361 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1362 {F_RCVTARABT, "PCI received target abort", -1, 1},
1363 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1364 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1365 {F_DETPARERR, "PCI detected parity error", -1, 1},
1366 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1367 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1368 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1370 {F_DETCORECCERR, "PCI correctable ECC error",
1371 STAT_PCI_CORR_ECC, 0},
1372 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1373 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1374 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1376 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1378 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1380 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1385 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1386 pcix1_intr_info, adapter->irq_stats))
1387 t3_fatal_err(adapter);
1391 * Interrupt handler for the PCIE module.
1393 static void pcie_intr_handler(struct adapter *adapter)
1395 static const struct intr_info pcie_intr_info[] = {
1396 {F_PEXERR, "PCI PEX error", -1, 1},
1398 "PCI unexpected split completion DMA read error", -1, 1},
1400 "PCI unexpected split completion DMA command error", -1, 1},
1401 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1402 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1403 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1404 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1405 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1406 "PCI MSI-X table/PBA parity error", -1, 1},
1407 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1408 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1409 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1410 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1411 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1415 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1416 CH_ALERT(adapter, "PEX error code 0x%x\n",
1417 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1419 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1420 pcie_intr_info, adapter->irq_stats))
1421 t3_fatal_err(adapter);
1425 * TP interrupt handler.
1427 static void tp_intr_handler(struct adapter *adapter)
1429 static const struct intr_info tp_intr_info[] = {
1430 {0xffffff, "TP parity error", -1, 1},
1431 {0x1000000, "TP out of Rx pages", -1, 1},
1432 {0x2000000, "TP out of Tx pages", -1, 1},
1436 static struct intr_info tp_intr_info_t3c[] = {
1437 {0x1fffffff, "TP parity error", -1, 1},
1438 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1439 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1443 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1444 adapter->params.rev < T3_REV_C ?
1445 tp_intr_info : tp_intr_info_t3c, NULL))
1446 t3_fatal_err(adapter);
1450 * CIM interrupt handler.
1452 static void cim_intr_handler(struct adapter *adapter)
1454 static const struct intr_info cim_intr_info[] = {
1455 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1456 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1457 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1458 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1459 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1460 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1461 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1462 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1463 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1464 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1465 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1466 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1467 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1468 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1469 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1470 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1471 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1472 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1473 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1474 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1475 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1476 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1477 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1478 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1482 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1483 cim_intr_info, NULL))
1484 t3_fatal_err(adapter);
1488 * ULP RX interrupt handler.
1490 static void ulprx_intr_handler(struct adapter *adapter)
1492 static const struct intr_info ulprx_intr_info[] = {
1493 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1494 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1495 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1496 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1497 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1498 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1499 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1500 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1504 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1505 ulprx_intr_info, NULL))
1506 t3_fatal_err(adapter);
1510 * ULP TX interrupt handler.
1512 static void ulptx_intr_handler(struct adapter *adapter)
1514 static const struct intr_info ulptx_intr_info[] = {
1515 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1516 STAT_ULP_CH0_PBL_OOB, 0},
1517 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1518 STAT_ULP_CH1_PBL_OOB, 0},
1519 {0xfc, "ULP TX parity error", -1, 1},
1523 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1524 ulptx_intr_info, adapter->irq_stats))
1525 t3_fatal_err(adapter);
1528 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1529 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1530 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1531 F_ICSPI1_TX_FRAMING_ERROR)
1532 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1533 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1534 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1535 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1538 * PM TX interrupt handler.
1540 static void pmtx_intr_handler(struct adapter *adapter)
1542 static const struct intr_info pmtx_intr_info[] = {
1543 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1544 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1545 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1546 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1547 "PMTX ispi parity error", -1, 1},
1548 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1549 "PMTX ospi parity error", -1, 1},
1553 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1554 pmtx_intr_info, NULL))
1555 t3_fatal_err(adapter);
1558 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1559 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1560 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1561 F_IESPI1_TX_FRAMING_ERROR)
1562 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1563 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1564 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1565 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1568 * PM RX interrupt handler.
1570 static void pmrx_intr_handler(struct adapter *adapter)
1572 static const struct intr_info pmrx_intr_info[] = {
1573 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1574 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1575 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1576 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1577 "PMRX ispi parity error", -1, 1},
1578 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1579 "PMRX ospi parity error", -1, 1},
1583 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1584 pmrx_intr_info, NULL))
1585 t3_fatal_err(adapter);
1589 * CPL switch interrupt handler.
1591 static void cplsw_intr_handler(struct adapter *adapter)
1593 static const struct intr_info cplsw_intr_info[] = {
1594 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1595 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1596 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1597 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1598 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1599 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1603 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1604 cplsw_intr_info, NULL))
1605 t3_fatal_err(adapter);
1609 * MPS interrupt handler.
1611 static void mps_intr_handler(struct adapter *adapter)
1613 static const struct intr_info mps_intr_info[] = {
1614 {0x1ff, "MPS parity error", -1, 1},
1618 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1619 mps_intr_info, NULL))
1620 t3_fatal_err(adapter);
1623 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1626 * MC7 interrupt handler.
1628 static void mc7_intr_handler(struct mc7 *mc7)
1630 struct adapter *adapter = mc7->adapter;
1631 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1634 mc7->stats.corr_err++;
1635 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1636 "data 0x%x 0x%x 0x%x\n", mc7->name,
1637 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1638 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1639 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1640 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1644 mc7->stats.uncorr_err++;
1645 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1646 "data 0x%x 0x%x 0x%x\n", mc7->name,
1647 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1648 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1649 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1650 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1654 mc7->stats.parity_err++;
1655 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1656 mc7->name, G_PE(cause));
1662 if (adapter->params.rev > 0)
1663 addr = t3_read_reg(adapter,
1664 mc7->offset + A_MC7_ERR_ADDR);
1665 mc7->stats.addr_err++;
1666 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1670 if (cause & MC7_INTR_FATAL)
1671 t3_fatal_err(adapter);
1673 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1676 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1677 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1679 * XGMAC interrupt handler.
1681 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1683 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1684 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1686 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1687 mac->stats.tx_fifo_parity_err++;
1688 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1690 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1691 mac->stats.rx_fifo_parity_err++;
1692 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1694 if (cause & F_TXFIFO_UNDERRUN)
1695 mac->stats.tx_fifo_urun++;
1696 if (cause & F_RXFIFO_OVERFLOW)
1697 mac->stats.rx_fifo_ovfl++;
1698 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1699 mac->stats.serdes_signal_loss++;
1700 if (cause & F_XAUIPCSCTCERR)
1701 mac->stats.xaui_pcs_ctc_err++;
1702 if (cause & F_XAUIPCSALIGNCHANGE)
1703 mac->stats.xaui_pcs_align_change++;
1705 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1706 if (cause & XGM_INTR_FATAL)
1712 * Interrupt handler for PHY events.
1714 int t3_phy_intr_handler(struct adapter *adapter)
1716 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1718 for_each_port(adapter, i) {
1719 struct port_info *p = adap2pinfo(adapter, i);
1721 if (!(p->phy.caps & SUPPORTED_IRQ))
1724 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1725 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1727 if (phy_cause & cphy_cause_link_change)
1728 t3_link_changed(adapter, i);
1729 if (phy_cause & cphy_cause_fifo_error)
1730 p->phy.fifo_errors++;
1734 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1739 * T3 slow path (non-data) interrupt handler.
1741 int t3_slow_intr_handler(struct adapter *adapter)
1743 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1745 cause &= adapter->slow_intr_mask;
1748 if (cause & F_PCIM0) {
1749 if (is_pcie(adapter))
1750 pcie_intr_handler(adapter);
1752 pci_intr_handler(adapter);
1755 t3_sge_err_intr_handler(adapter);
1756 if (cause & F_MC7_PMRX)
1757 mc7_intr_handler(&adapter->pmrx);
1758 if (cause & F_MC7_PMTX)
1759 mc7_intr_handler(&adapter->pmtx);
1760 if (cause & F_MC7_CM)
1761 mc7_intr_handler(&adapter->cm);
1763 cim_intr_handler(adapter);
1765 tp_intr_handler(adapter);
1766 if (cause & F_ULP2_RX)
1767 ulprx_intr_handler(adapter);
1768 if (cause & F_ULP2_TX)
1769 ulptx_intr_handler(adapter);
1770 if (cause & F_PM1_RX)
1771 pmrx_intr_handler(adapter);
1772 if (cause & F_PM1_TX)
1773 pmtx_intr_handler(adapter);
1774 if (cause & F_CPL_SWITCH)
1775 cplsw_intr_handler(adapter);
1777 mps_intr_handler(adapter);
1779 t3_mc5_intr_handler(&adapter->mc5);
1780 if (cause & F_XGMAC0_0)
1781 mac_intr_handler(adapter, 0);
1782 if (cause & F_XGMAC0_1)
1783 mac_intr_handler(adapter, 1);
1784 if (cause & F_T3DBG)
1785 t3_os_ext_intr_handler(adapter);
1787 /* Clear the interrupts just processed. */
1788 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1789 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1793 static unsigned int calc_gpio_intr(struct adapter *adap)
1795 unsigned int i, gpi_intr = 0;
1797 for_each_port(adap, i)
1798 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1799 adapter_info(adap)->gpio_intr[i])
1800 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1805 * t3_intr_enable - enable interrupts
1806 * @adapter: the adapter whose interrupts should be enabled
1808 * Enable interrupts by setting the interrupt enable registers of the
1809 * various HW modules and then enabling the top-level interrupt
1812 void t3_intr_enable(struct adapter *adapter)
1814 static const struct addr_val_pair intr_en_avp[] = {
1815 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1816 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1817 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1819 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1821 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1822 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1823 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1824 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1825 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1826 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1829 adapter->slow_intr_mask = PL_INTR_MASK;
1831 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1832 t3_write_reg(adapter, A_TP_INT_ENABLE,
1833 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1835 if (adapter->params.rev > 0) {
1836 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1837 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1838 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1839 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1840 F_PBL_BOUND_ERR_CH1);
1842 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1843 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1846 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1848 if (is_pcie(adapter))
1849 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1851 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1852 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1853 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1857 * t3_intr_disable - disable a card's interrupts
1858 * @adapter: the adapter whose interrupts should be disabled
1860 * Disable interrupts. We only disable the top-level interrupt
1861 * concentrator and the SGE data interrupts.
1863 void t3_intr_disable(struct adapter *adapter)
1865 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1866 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1867 adapter->slow_intr_mask = 0;
1871 * t3_intr_clear - clear all interrupts
1872 * @adapter: the adapter whose interrupts should be cleared
1874 * Clears all interrupts.
1876 void t3_intr_clear(struct adapter *adapter)
1878 static const unsigned int cause_reg_addr[] = {
1880 A_SG_RSPQ_FL_STATUS,
1883 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1884 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1885 A_CIM_HOST_INT_CAUSE,
1898 /* Clear PHY and MAC interrupts for each port. */
1899 for_each_port(adapter, i)
1900 t3_port_intr_clear(adapter, i);
1902 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1903 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1905 if (is_pcie(adapter))
1906 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1907 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1908 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1912 * t3_port_intr_enable - enable port-specific interrupts
1913 * @adapter: associated adapter
1914 * @idx: index of port whose interrupts should be enabled
1916 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1919 void t3_port_intr_enable(struct adapter *adapter, int idx)
1921 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1923 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1924 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1925 phy->ops->intr_enable(phy);
1929 * t3_port_intr_disable - disable port-specific interrupts
1930 * @adapter: associated adapter
1931 * @idx: index of port whose interrupts should be disabled
1933 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1936 void t3_port_intr_disable(struct adapter *adapter, int idx)
1938 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1940 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1941 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1942 phy->ops->intr_disable(phy);
1946 * t3_port_intr_clear - clear port-specific interrupts
1947 * @adapter: associated adapter
1948 * @idx: index of port whose interrupts to clear
1950 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1953 void t3_port_intr_clear(struct adapter *adapter, int idx)
1955 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1957 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1958 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1959 phy->ops->intr_clear(phy);
1962 #define SG_CONTEXT_CMD_ATTEMPTS 100
1965 * t3_sge_write_context - write an SGE context
1966 * @adapter: the adapter
1967 * @id: the context id
1968 * @type: the context type
1970 * Program an SGE context with the values already loaded in the
1971 * CONTEXT_DATA? registers.
1973 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1986 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1989 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1990 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1991 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1992 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1993 return t3_sge_write_context(adap, id, type);
1997 * t3_sge_init_ecntxt - initialize an SGE egress context
1998 * @adapter: the adapter to configure
1999 * @id: the context id
2000 * @gts_enable: whether to enable GTS for the context
2001 * @type: the egress context type
2002 * @respq: associated response queue
2003 * @base_addr: base address of queue
2004 * @size: number of queue entries
2006 * @gen: initial generation value for the context
2007 * @cidx: consumer pointer
2009 * Initialize an SGE egress context and make it ready for use. If the
2010 * platform allows concurrent context operations, the caller is
2011 * responsible for appropriate locking.
2013 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2014 enum sge_context_type type, int respq, u64 base_addr,
2015 unsigned int size, unsigned int token, int gen,
2018 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2020 if (base_addr & 0xfff) /* must be 4K aligned */
2022 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2026 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2027 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2029 V_EC_BASE_LO(base_addr & 0xffff));
2031 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2034 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2035 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2037 return t3_sge_write_context(adapter, id, F_EGRESS);
2041 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2042 * @adapter: the adapter to configure
2043 * @id: the context id
2044 * @gts_enable: whether to enable GTS for the context
2045 * @base_addr: base address of queue
2046 * @size: number of queue entries
2047 * @bsize: size of each buffer for this queue
2048 * @cong_thres: threshold to signal congestion to upstream producers
2049 * @gen: initial generation value for the context
2050 * @cidx: consumer pointer
2052 * Initialize an SGE free list context and make it ready for use. The
2053 * caller is responsible for ensuring only one context operation occurs
2056 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2057 int gts_enable, u64 base_addr, unsigned int size,
2058 unsigned int bsize, unsigned int cong_thres, int gen,
2061 if (base_addr & 0xfff) /* must be 4K aligned */
2063 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2067 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2069 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2070 V_FL_BASE_HI((u32) base_addr) |
2071 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2072 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2073 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2074 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2075 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2076 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2077 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2078 return t3_sge_write_context(adapter, id, F_FREELIST);
2082 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2083 * @adapter: the adapter to configure
2084 * @id: the context id
2085 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2086 * @base_addr: base address of queue
2087 * @size: number of queue entries
2088 * @fl_thres: threshold for selecting the normal or jumbo free list
2089 * @gen: initial generation value for the context
2090 * @cidx: consumer pointer
2092 * Initialize an SGE response queue context and make it ready for use.
2093 * The caller is responsible for ensuring only one context operation
2096 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2097 int irq_vec_idx, u64 base_addr, unsigned int size,
2098 unsigned int fl_thres, int gen, unsigned int cidx)
2100 unsigned int intr = 0;
2102 if (base_addr & 0xfff) /* must be 4K aligned */
2104 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2108 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2110 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2112 if (irq_vec_idx >= 0)
2113 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2114 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2115 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2116 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2117 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2121 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2122 * @adapter: the adapter to configure
2123 * @id: the context id
2124 * @base_addr: base address of queue
2125 * @size: number of queue entries
2126 * @rspq: response queue for async notifications
2127 * @ovfl_mode: CQ overflow mode
2128 * @credits: completion queue credits
2129 * @credit_thres: the credit threshold
2131 * Initialize an SGE completion queue context and make it ready for use.
2132 * The caller is responsible for ensuring only one context operation
2135 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2136 unsigned int size, int rspq, int ovfl_mode,
2137 unsigned int credits, unsigned int credit_thres)
2139 if (base_addr & 0xfff) /* must be 4K aligned */
2141 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2145 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2146 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2148 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2149 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2150 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2151 V_CQ_ERR(ovfl_mode));
2152 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2153 V_CQ_CREDIT_THRES(credit_thres));
2154 return t3_sge_write_context(adapter, id, F_CQ);
2158 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2159 * @adapter: the adapter
2160 * @id: the egress context id
2161 * @enable: enable (1) or disable (0) the context
2163 * Enable or disable an SGE egress context. The caller is responsible for
2164 * ensuring only one context operation occurs at a time.
2166 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2168 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2171 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2172 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2173 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2174 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2175 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2176 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2177 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2178 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2179 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2183 * t3_sge_disable_fl - disable an SGE free-buffer list
2184 * @adapter: the adapter
2185 * @id: the free list context id
2187 * Disable an SGE free-buffer list. The caller is responsible for
2188 * ensuring only one context operation occurs at a time.
2190 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2192 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2195 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2196 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2197 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2198 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2199 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2200 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2201 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2202 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2203 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2207 * t3_sge_disable_rspcntxt - disable an SGE response queue
2208 * @adapter: the adapter
2209 * @id: the response queue context id
2211 * Disable an SGE response queue. The caller is responsible for
2212 * ensuring only one context operation occurs at a time.
2214 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2216 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2219 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2220 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2221 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2222 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2223 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2224 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2225 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2226 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2227 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2231 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2232 * @adapter: the adapter
2233 * @id: the completion queue context id
2235 * Disable an SGE completion queue. The caller is responsible for
2236 * ensuring only one context operation occurs at a time.
2238 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2240 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2243 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2244 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2245 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2246 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2247 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2248 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2249 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2250 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2251 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2255 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2256 * @adapter: the adapter
2257 * @id: the context id
2258 * @op: the operation to perform
2260 * Perform the selected operation on an SGE completion queue context.
2261 * The caller is responsible for ensuring only one context operation
2264 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2265 unsigned int credits)
2269 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2272 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2273 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2274 V_CONTEXT(id) | F_CQ);
2275 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2276 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2279 if (op >= 2 && op < 7) {
2280 if (adapter->params.rev > 0)
2281 return G_CQ_INDEX(val);
2283 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2284 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2285 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2286 F_CONTEXT_CMD_BUSY, 0,
2287 SG_CONTEXT_CMD_ATTEMPTS, 1))
2289 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2295 * t3_sge_read_context - read an SGE context
2296 * @type: the context type
2297 * @adapter: the adapter
2298 * @id: the context id
2299 * @data: holds the retrieved context
2301 * Read an SGE egress context. The caller is responsible for ensuring
2302 * only one context operation occurs at a time.
2304 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2305 unsigned int id, u32 data[4])
2307 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2310 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2311 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2312 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2313 SG_CONTEXT_CMD_ATTEMPTS, 1))
2315 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2316 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2317 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2318 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2323 * t3_sge_read_ecntxt - read an SGE egress context
2324 * @adapter: the adapter
2325 * @id: the context id
2326 * @data: holds the retrieved context
2328 * Read an SGE egress context. The caller is responsible for ensuring
2329 * only one context operation occurs at a time.
2331 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2335 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2339 * t3_sge_read_cq - read an SGE CQ context
2340 * @adapter: the adapter
2341 * @id: the context id
2342 * @data: holds the retrieved context
2344 * Read an SGE CQ context. The caller is responsible for ensuring
2345 * only one context operation occurs at a time.
2347 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2351 return t3_sge_read_context(F_CQ, adapter, id, data);
2355 * t3_sge_read_fl - read an SGE free-list context
2356 * @adapter: the adapter
2357 * @id: the context id
2358 * @data: holds the retrieved context
2360 * Read an SGE free-list context. The caller is responsible for ensuring
2361 * only one context operation occurs at a time.
2363 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2365 if (id >= SGE_QSETS * 2)
2367 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2371 * t3_sge_read_rspq - read an SGE response queue context
2372 * @adapter: the adapter
2373 * @id: the context id
2374 * @data: holds the retrieved context
2376 * Read an SGE response queue context. The caller is responsible for
2377 * ensuring only one context operation occurs at a time.
2379 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2381 if (id >= SGE_QSETS)
2383 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2387 * t3_config_rss - configure Rx packet steering
2388 * @adapter: the adapter
2389 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2390 * @cpus: values for the CPU lookup table (0xff terminated)
2391 * @rspq: values for the response queue lookup table (0xffff terminated)
2393 * Programs the receive packet steering logic. @cpus and @rspq provide
2394 * the values for the CPU and response queue lookup tables. If they
2395 * provide fewer values than the size of the tables the supplied values
2396 * are used repeatedly until the tables are fully populated.
2398 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2399 const u8 * cpus, const u16 *rspq)
2401 int i, j, cpu_idx = 0, q_idx = 0;
2404 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2407 for (j = 0; j < 2; ++j) {
2408 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2409 if (cpus[cpu_idx] == 0xff)
2412 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2416 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2417 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2418 (i << 16) | rspq[q_idx++]);
2419 if (rspq[q_idx] == 0xffff)
2423 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2427 * t3_read_rss - read the contents of the RSS tables
2428 * @adapter: the adapter
2429 * @lkup: holds the contents of the RSS lookup table
2430 * @map: holds the contents of the RSS map table
2432 * Reads the contents of the receive packet steering tables.
2434 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2440 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2441 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2443 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2444 if (!(val & 0x80000000))
2447 *lkup++ = (val >> 8);
2451 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2452 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2454 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2455 if (!(val & 0x80000000))
2463 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2464 * @adap: the adapter
2465 * @enable: 1 to select offload mode, 0 for regular NIC
2467 * Switches TP to NIC/offload mode.
2469 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2471 if (is_offload(adap) || !enable)
2472 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2473 V_NICMODE(!enable));
2477 * pm_num_pages - calculate the number of pages of the payload memory
2478 * @mem_size: the size of the payload memory
2479 * @pg_size: the size of each payload memory page
2481 * Calculate the number of pages, each of the given size, that fit in a
2482 * memory of the specified size, respecting the HW requirement that the
2483 * number of pages must be a multiple of 24.
2485 static inline unsigned int pm_num_pages(unsigned int mem_size,
2486 unsigned int pg_size)
2488 unsigned int n = mem_size / pg_size;
2493 #define mem_region(adap, start, size, reg) \
2494 t3_write_reg((adap), A_ ## reg, (start)); \
2498 * partition_mem - partition memory and configure TP memory settings
2499 * @adap: the adapter
2500 * @p: the TP parameters
2502 * Partitions context and payload memory and configures TP's memory
2505 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2507 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2508 unsigned int timers = 0, timers_shift = 22;
2510 if (adap->params.rev > 0) {
2511 if (tids <= 16 * 1024) {
2514 } else if (tids <= 64 * 1024) {
2517 } else if (tids <= 256 * 1024) {
2523 t3_write_reg(adap, A_TP_PMM_SIZE,
2524 p->chan_rx_size | (p->chan_tx_size >> 16));
2526 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2527 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2528 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2529 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2530 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2532 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2533 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2534 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2536 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2537 /* Add a bit of headroom and make multiple of 24 */
2539 pstructs -= pstructs % 24;
2540 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2542 m = tids * TCB_SIZE;
2543 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2544 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2545 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2546 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2547 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2548 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2549 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2550 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2552 m = (m + 4095) & ~0xfff;
2553 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2554 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2556 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2557 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2558 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2560 adap->params.mc5.nservers += m - tids;
2563 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2566 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2567 t3_write_reg(adap, A_TP_PIO_DATA, val);
2570 static void tp_config(struct adapter *adap, const struct tp_params *p)
2572 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2573 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2574 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2575 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2576 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2577 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2578 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2579 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2580 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2581 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2582 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2583 F_IPV6ENABLE | F_NICMODE);
2584 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2585 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2586 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2587 adap->params.rev > 0 ? F_ENABLEESND :
2590 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2592 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2593 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2594 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2595 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2596 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2597 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2598 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2600 if (adap->params.rev > 0) {
2601 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2602 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2604 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2605 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2607 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2609 if (adap->params.rev == T3_REV_C)
2610 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2611 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2612 V_TABLELATENCYDELTA(4));
2614 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2615 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2616 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2617 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2620 /* Desired TP timer resolution in usec */
2621 #define TP_TMR_RES 50
2623 /* TCP timer values in ms */
2624 #define TP_DACK_TIMER 50
2625 #define TP_RTO_MIN 250
2628 * tp_set_timers - set TP timing parameters
2629 * @adap: the adapter to set
2630 * @core_clk: the core clock frequency in Hz
2632 * Set TP's timing parameters, such as the various timer resolutions and
2633 * the TCP timer values.
2635 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2637 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2638 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2639 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2640 unsigned int tps = core_clk >> tre;
2642 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2643 V_DELAYEDACKRESOLUTION(dack_re) |
2644 V_TIMESTAMPRESOLUTION(tstamp_re));
2645 t3_write_reg(adap, A_TP_DACK_TIMER,
2646 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2647 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2648 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2649 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2650 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2651 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2652 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2653 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2656 #define SECONDS * tps
2658 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2659 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2660 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2661 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2662 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2663 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2664 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2665 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2666 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2672 * t3_tp_set_coalescing_size - set receive coalescing size
2673 * @adap: the adapter
2674 * @size: the receive coalescing size
2675 * @psh: whether a set PSH bit should deliver coalesced data
2677 * Set the receive coalescing size and PSH bit handling.
2679 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2683 if (size > MAX_RX_COALESCING_LEN)
2686 val = t3_read_reg(adap, A_TP_PARA_REG3);
2687 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2690 val |= F_RXCOALESCEENABLE;
2692 val |= F_RXCOALESCEPSHEN;
2693 size = min(MAX_RX_COALESCING_LEN, size);
2694 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2695 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2697 t3_write_reg(adap, A_TP_PARA_REG3, val);
2702 * t3_tp_set_max_rxsize - set the max receive size
2703 * @adap: the adapter
2704 * @size: the max receive size
2706 * Set TP's max receive size. This is the limit that applies when
2707 * receive coalescing is disabled.
2709 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2711 t3_write_reg(adap, A_TP_PARA_REG7,
2712 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2715 static void init_mtus(unsigned short mtus[])
2718 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2719 * it can accomodate max size TCP/IP headers when SACK and timestamps
2720 * are enabled and still have at least 8 bytes of payload.
2741 * Initial congestion control parameters.
2743 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2745 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2770 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2773 b[13] = b[14] = b[15] = b[16] = 3;
2774 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2775 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2780 /* The minimum additive increment value for the congestion control table */
2781 #define CC_MIN_INCR 2U
2784 * t3_load_mtus - write the MTU and congestion control HW tables
2785 * @adap: the adapter
2786 * @mtus: the unrestricted values for the MTU table
2787 * @alphs: the values for the congestion control alpha parameter
2788 * @beta: the values for the congestion control beta parameter
2789 * @mtu_cap: the maximum permitted effective MTU
2791 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2792 * Update the high-speed congestion control table with the supplied alpha,
2795 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2796 unsigned short alpha[NCCTRL_WIN],
2797 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2799 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2800 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2801 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2802 28672, 40960, 57344, 81920, 114688, 163840, 229376
2807 for (i = 0; i < NMTUS; ++i) {
2808 unsigned int mtu = min(mtus[i], mtu_cap);
2809 unsigned int log2 = fls(mtu);
2811 if (!(mtu & ((1 << log2) >> 2))) /* round */
2813 t3_write_reg(adap, A_TP_MTU_TABLE,
2814 (i << 24) | (log2 << 16) | mtu);
2816 for (w = 0; w < NCCTRL_WIN; ++w) {
2819 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2822 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2823 (w << 16) | (beta[w] << 13) | inc);
2829 * t3_read_hw_mtus - returns the values in the HW MTU table
2830 * @adap: the adapter
2831 * @mtus: where to store the HW MTU values
2833 * Reads the HW MTU table.
2835 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2839 for (i = 0; i < NMTUS; ++i) {
2842 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2843 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2844 mtus[i] = val & 0x3fff;
2849 * t3_get_cong_cntl_tab - reads the congestion control table
2850 * @adap: the adapter
2851 * @incr: where to store the alpha values
2853 * Reads the additive increments programmed into the HW congestion
2856 void t3_get_cong_cntl_tab(struct adapter *adap,
2857 unsigned short incr[NMTUS][NCCTRL_WIN])
2859 unsigned int mtu, w;
2861 for (mtu = 0; mtu < NMTUS; ++mtu)
2862 for (w = 0; w < NCCTRL_WIN; ++w) {
2863 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2864 0xffff0000 | (mtu << 5) | w);
2865 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2871 * t3_tp_get_mib_stats - read TP's MIB counters
2872 * @adap: the adapter
2873 * @tps: holds the returned counter values
2875 * Returns the values of TP's MIB counters.
2877 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2879 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2880 sizeof(*tps) / sizeof(u32), 0);
2883 #define ulp_region(adap, name, start, len) \
2884 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2885 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2886 (start) + (len) - 1); \
2889 #define ulptx_region(adap, name, start, len) \
2890 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2891 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2892 (start) + (len) - 1)
2894 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2896 unsigned int m = p->chan_rx_size;
2898 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2899 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2900 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2901 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2902 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2903 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2904 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2905 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2909 * t3_set_proto_sram - set the contents of the protocol sram
2910 * @adapter: the adapter
2911 * @data: the protocol image
2913 * Write the contents of the protocol SRAM.
2915 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2918 const __be32 *buf = (const __be32 *)data;
2920 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2921 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2922 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2923 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2924 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2925 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2927 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2928 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2931 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2936 void t3_config_trace_filter(struct adapter *adapter,
2937 const struct trace_params *tp, int filter_index,
2938 int invert, int enable)
2940 u32 addr, key[4], mask[4];
2942 key[0] = tp->sport | (tp->sip << 16);
2943 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2945 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2947 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2948 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2949 mask[2] = tp->dip_mask;
2950 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2953 key[3] |= (1 << 29);
2955 key[3] |= (1 << 28);
2957 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2958 tp_wr_indirect(adapter, addr++, key[0]);
2959 tp_wr_indirect(adapter, addr++, mask[0]);
2960 tp_wr_indirect(adapter, addr++, key[1]);
2961 tp_wr_indirect(adapter, addr++, mask[1]);
2962 tp_wr_indirect(adapter, addr++, key[2]);
2963 tp_wr_indirect(adapter, addr++, mask[2]);
2964 tp_wr_indirect(adapter, addr++, key[3]);
2965 tp_wr_indirect(adapter, addr, mask[3]);
2966 t3_read_reg(adapter, A_TP_PIO_DATA);
2970 * t3_config_sched - configure a HW traffic scheduler
2971 * @adap: the adapter
2972 * @kbps: target rate in Kbps
2973 * @sched: the scheduler index
2975 * Configure a HW scheduler for the target rate
2977 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2979 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2980 unsigned int clk = adap->params.vpd.cclk * 1000;
2981 unsigned int selected_cpt = 0, selected_bpt = 0;
2984 kbps *= 125; /* -> bytes */
2985 for (cpt = 1; cpt <= 255; cpt++) {
2987 bpt = (kbps + tps / 2) / tps;
2988 if (bpt > 0 && bpt <= 255) {
2990 delta = v >= kbps ? v - kbps : kbps - v;
2991 if (delta <= mindelta) {
2996 } else if (selected_cpt)
3002 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3003 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3004 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3006 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3008 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3009 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3013 static int tp_init(struct adapter *adap, const struct tp_params *p)
3018 t3_set_vlan_accel(adap, 3, 0);
3020 if (is_offload(adap)) {
3021 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3022 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3023 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3026 CH_ERR(adap, "TP initialization timed out\n");
3030 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3034 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3036 if (port_mask & ~((1 << adap->params.nports) - 1))
3038 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3039 port_mask << S_PORT0ACTIVE);
3044 * Perform the bits of HW initialization that are dependent on the number
3045 * of available ports.
3047 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3052 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3053 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3054 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3055 F_PORT0ACTIVE | F_ENFORCEPKT);
3056 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3058 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3059 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3060 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3061 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3062 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3063 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3065 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3066 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3067 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3068 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3069 for (i = 0; i < 16; i++)
3070 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3071 (i << 16) | 0x1010);
3075 static int calibrate_xgm(struct adapter *adapter)
3077 if (uses_xaui(adapter)) {
3080 for (i = 0; i < 5; ++i) {
3081 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3082 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3084 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3085 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3086 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3087 V_XAUIIMP(G_CALIMP(v) >> 2));
3091 CH_ERR(adapter, "MAC calibration failed\n");
3094 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3095 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3096 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3097 F_XGM_IMPSETUPDATE);
3102 static void calibrate_xgm_t3b(struct adapter *adapter)
3104 if (!uses_xaui(adapter)) {
3105 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3106 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3107 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3108 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3109 F_XGM_IMPSETUPDATE);
3110 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3112 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3113 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3117 struct mc7_timing_params {
3118 unsigned char ActToPreDly;
3119 unsigned char ActToRdWrDly;
3120 unsigned char PreCyc;
3121 unsigned char RefCyc[5];
3122 unsigned char BkCyc;
3123 unsigned char WrToRdDly;
3124 unsigned char RdToWrDly;
3128 * Write a value to a register and check that the write completed. These
3129 * writes normally complete in a cycle or two, so one read should suffice.
3130 * The very first read exists to flush the posted write to the device.
3132 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3134 t3_write_reg(adapter, addr, val);
3135 t3_read_reg(adapter, addr); /* flush */
3136 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3138 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3142 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3144 static const unsigned int mc7_mode[] = {
3145 0x632, 0x642, 0x652, 0x432, 0x442
3147 static const struct mc7_timing_params mc7_timings[] = {
3148 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3149 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3150 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3151 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3152 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3156 unsigned int width, density, slow, attempts;
3157 struct adapter *adapter = mc7->adapter;
3158 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3163 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3164 slow = val & F_SLOW;
3165 width = G_WIDTH(val);
3166 density = G_DEN(val);
3168 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3169 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3173 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3174 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3176 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3177 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3178 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3184 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3185 V_ACTTOPREDLY(p->ActToPreDly) |
3186 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3187 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3188 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3190 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3191 val | F_CLKEN | F_TERM150);
3192 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3195 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3200 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3201 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3202 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3203 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3207 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3208 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3212 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3213 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3214 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3215 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3216 mc7_mode[mem_type]) ||
3217 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3218 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3221 /* clock value is in KHz */
3222 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3223 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3225 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3226 F_PERREFEN | V_PREREFDIV(mc7_clock));
3227 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3229 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3230 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3231 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3232 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3233 (mc7->size << width) - 1);
3234 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3235 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3240 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3241 } while ((val & F_BUSY) && --attempts);
3243 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3247 /* Enable normal memory accesses. */
3248 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3255 static void config_pcie(struct adapter *adap)
3257 static const u16 ack_lat[4][6] = {
3258 {237, 416, 559, 1071, 2095, 4143},
3259 {128, 217, 289, 545, 1057, 2081},
3260 {73, 118, 154, 282, 538, 1050},
3261 {67, 107, 86, 150, 278, 534}
3263 static const u16 rpl_tmr[4][6] = {
3264 {711, 1248, 1677, 3213, 6285, 12429},
3265 {384, 651, 867, 1635, 3171, 6243},
3266 {219, 354, 462, 846, 1614, 3150},
3267 {201, 321, 258, 450, 834, 1602}
3271 unsigned int log2_width, pldsize;
3272 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3274 pci_read_config_word(adap->pdev,
3275 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3277 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3278 pci_read_config_word(adap->pdev,
3279 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3282 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3283 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3284 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3285 log2_width = fls(adap->params.pci.width) - 1;
3286 acklat = ack_lat[log2_width][pldsize];
3287 if (val & 1) /* check LOsEnable */
3288 acklat += fst_trn_tx * 4;
3289 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3291 if (adap->params.rev == 0)
3292 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3293 V_T3A_ACKLAT(M_T3A_ACKLAT),
3294 V_T3A_ACKLAT(acklat));
3296 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3299 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3300 V_REPLAYLMT(rpllmt));
3302 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3303 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3304 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3305 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3309 * Initialize and configure T3 HW modules. This performs the
3310 * initialization steps that need to be done once after a card is reset.
3311 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3313 * fw_params are passed to FW and their value is platform dependent. Only the
3314 * top 8 bits are available for use, the rest must be 0.
3316 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3318 int err = -EIO, attempts, i;
3319 const struct vpd_params *vpd = &adapter->params.vpd;
3321 if (adapter->params.rev > 0)
3322 calibrate_xgm_t3b(adapter);
3323 else if (calibrate_xgm(adapter))
3327 partition_mem(adapter, &adapter->params.tp);
3329 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3330 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3331 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3332 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3333 adapter->params.mc5.nfilters,
3334 adapter->params.mc5.nroutes))
3337 for (i = 0; i < 32; i++)
3338 if (clear_sge_ctxt(adapter, i, F_CQ))
3342 if (tp_init(adapter, &adapter->params.tp))
3345 t3_tp_set_coalescing_size(adapter,
3346 min(adapter->params.sge.max_pkt_size,
3347 MAX_RX_COALESCING_LEN), 1);
3348 t3_tp_set_max_rxsize(adapter,
3349 min(adapter->params.sge.max_pkt_size, 16384U));
3350 ulp_config(adapter, &adapter->params.tp);
3352 if (is_pcie(adapter))
3353 config_pcie(adapter);
3355 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3356 F_DMASTOPEN | F_CLIDECEN);
3358 if (adapter->params.rev == T3_REV_C)
3359 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3360 F_CFG_CQE_SOP_MASK);
3362 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3363 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3364 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3365 init_hw_for_avail_ports(adapter, adapter->params.nports);
3366 t3_sge_init(adapter, &adapter->params.sge);
3368 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3370 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3371 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3372 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3373 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3376 do { /* wait for uP to initialize */
3378 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3380 CH_ERR(adapter, "uP initialization timed out\n");
3390 * get_pci_mode - determine a card's PCI mode
3391 * @adapter: the adapter
3392 * @p: where to store the PCI settings
3394 * Determines a card's PCI mode and associated parameters, such as speed
3397 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3399 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3400 u32 pci_mode, pcie_cap;
3402 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3406 p->variant = PCI_VARIANT_PCIE;
3407 p->pcie_cap_addr = pcie_cap;
3408 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3410 p->width = (val >> 4) & 0x3f;
3414 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3415 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3416 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3417 pci_mode = G_PCIXINITPAT(pci_mode);
3419 p->variant = PCI_VARIANT_PCI;
3420 else if (pci_mode < 4)
3421 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3422 else if (pci_mode < 8)
3423 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3425 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3429 * init_link_config - initialize a link's SW state
3430 * @lc: structure holding the link state
3431 * @ai: information about the current card
3433 * Initializes the SW state maintained for each link, including the link's
3434 * capabilities and default speed/duplex/flow-control/autonegotiation
3437 static void init_link_config(struct link_config *lc, unsigned int caps)
3439 lc->supported = caps;
3440 lc->requested_speed = lc->speed = SPEED_INVALID;
3441 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3442 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3443 if (lc->supported & SUPPORTED_Autoneg) {
3444 lc->advertising = lc->supported;
3445 lc->autoneg = AUTONEG_ENABLE;
3446 lc->requested_fc |= PAUSE_AUTONEG;
3448 lc->advertising = 0;
3449 lc->autoneg = AUTONEG_DISABLE;
3454 * mc7_calc_size - calculate MC7 memory size
3455 * @cfg: the MC7 configuration
3457 * Calculates the size of an MC7 memory in bytes from the value of its
3458 * configuration register.
3460 static unsigned int mc7_calc_size(u32 cfg)
3462 unsigned int width = G_WIDTH(cfg);
3463 unsigned int banks = !!(cfg & F_BKS) + 1;
3464 unsigned int org = !!(cfg & F_ORG) + 1;
3465 unsigned int density = G_DEN(cfg);
3466 unsigned int MBs = ((256 << density) * banks) / (org << width);
3471 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3472 unsigned int base_addr, const char *name)
3476 mc7->adapter = adapter;
3478 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3479 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3480 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3481 mc7->width = G_WIDTH(cfg);
3484 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3486 mac->adapter = adapter;
3487 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3490 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3491 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3492 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3493 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3498 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3500 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3502 mi1_init(adapter, ai);
3503 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3504 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3505 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3506 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3507 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3508 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3510 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3513 /* Enable MAC clocks so we can access the registers */
3514 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3515 t3_read_reg(adapter, A_XGM_PORT_CFG);
3517 val |= F_CLKDIVRESET_;
3518 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3519 t3_read_reg(adapter, A_XGM_PORT_CFG);
3520 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3521 t3_read_reg(adapter, A_XGM_PORT_CFG);
3525 * Reset the adapter.
3526 * Older PCIe cards lose their config space during reset, PCI-X
3529 int t3_reset_adapter(struct adapter *adapter)
3531 int i, save_and_restore_pcie =
3532 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3535 if (save_and_restore_pcie)
3536 pci_save_state(adapter->pdev);
3537 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3540 * Delay. Give Some time to device to reset fully.
3541 * XXX The delay time should be modified.
3543 for (i = 0; i < 10; i++) {
3545 pci_read_config_word(adapter->pdev, 0x00, &devid);
3546 if (devid == 0x1425)
3550 if (devid != 0x1425)
3553 if (save_and_restore_pcie)
3554 pci_restore_state(adapter->pdev);
3558 static int init_parity(struct adapter *adap)
3562 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3565 for (err = i = 0; !err && i < 16; i++)
3566 err = clear_sge_ctxt(adap, i, F_EGRESS);
3567 for (i = 0xfff0; !err && i <= 0xffff; i++)
3568 err = clear_sge_ctxt(adap, i, F_EGRESS);
3569 for (i = 0; !err && i < SGE_QSETS; i++)
3570 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3574 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3575 for (i = 0; i < 4; i++)
3576 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3577 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3578 F_IBQDBGWR | V_IBQDBGQID(i) |
3579 V_IBQDBGADDR(addr));
3580 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3581 F_IBQDBGBUSY, 0, 2, 1);
3589 * Initialize adapter SW state for the various HW modules, set initial values
3590 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3593 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3597 unsigned int i, j = -1;
3599 get_pci_mode(adapter, &adapter->params.pci);
3601 adapter->params.info = ai;
3602 adapter->params.nports = ai->nports;
3603 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3604 adapter->params.linkpoll_period = 0;
3605 adapter->params.stats_update_period = is_10G(adapter) ?
3606 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3607 adapter->params.pci.vpd_cap_addr =
3608 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3609 ret = get_vpd_params(adapter, &adapter->params.vpd);
3613 if (reset && t3_reset_adapter(adapter))
3616 t3_sge_prep(adapter, &adapter->params.sge);
3618 if (adapter->params.vpd.mclk) {
3619 struct tp_params *p = &adapter->params.tp;
3621 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3622 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3623 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3625 p->nchan = ai->nports;
3626 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3627 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3628 p->cm_size = t3_mc7_size(&adapter->cm);
3629 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3630 p->chan_tx_size = p->pmtx_size / p->nchan;
3631 p->rx_pg_size = 64 * 1024;
3632 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3633 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3634 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3635 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3636 adapter->params.rev > 0 ? 12 : 6;
3639 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3640 t3_mc7_size(&adapter->pmtx) &&
3641 t3_mc7_size(&adapter->cm);
3643 if (is_offload(adapter)) {
3644 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3645 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3646 DEFAULT_NFILTERS : 0;
3647 adapter->params.mc5.nroutes = 0;
3648 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3650 init_mtus(adapter->params.mtus);
3651 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3654 early_hw_init(adapter, ai);
3655 ret = init_parity(adapter);
3659 for_each_port(adapter, i) {
3661 const struct port_type_info *pti;
3662 struct port_info *p = adap2pinfo(adapter, i);
3664 while (!adapter->params.vpd.port_type[++j])
3667 pti = &port_types[adapter->params.vpd.port_type[j]];
3668 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3672 mac_prep(&p->mac, adapter, j);
3675 * The VPD EEPROM stores the base Ethernet address for the
3676 * card. A port's address is derived from the base by adding
3677 * the port's index to the base's low octet.
3679 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3680 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3682 memcpy(adapter->port[i]->dev_addr, hw_addr,
3684 memcpy(adapter->port[i]->perm_addr, hw_addr,
3686 init_link_config(&p->link_config, p->phy.caps);
3687 p->phy.ops->power_down(&p->phy, 1);
3688 if (!(p->phy.caps & SUPPORTED_IRQ))
3689 adapter->params.linkpoll_period = 10;
3695 void t3_led_ready(struct adapter *adapter)
3697 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3701 int t3_replay_prep_adapter(struct adapter *adapter)
3703 const struct adapter_info *ai = adapter->params.info;
3704 unsigned int i, j = -1;
3707 early_hw_init(adapter, ai);
3708 ret = init_parity(adapter);
3712 for_each_port(adapter, i) {
3713 const struct port_type_info *pti;
3714 struct port_info *p = adap2pinfo(adapter, i);
3716 while (!adapter->params.vpd.port_type[++j])
3719 pti = &port_types[adapter->params.vpd.port_type[j]];
3720 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3723 p->phy.ops->power_down(&p->phy, 1);