Merge branches 'core/softlockup', 'core/softirq', 'core/resources', 'core/printk...
[linux-2.6-block.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
04497982
DLR
207static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
4d22de3e
DLR
209{
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
212
213 if (mmd_addr)
214 return -EINVAL;
215
216 mutex_lock(&adapter->mdio_lock);
04497982 217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
04497982 227static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
4d22de3e
DLR
228 int reg_addr, unsigned int val)
229{
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
232
233 if (mmd_addr)
234 return -EINVAL;
235
236 mutex_lock(&adapter->mdio_lock);
04497982 237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
04497982
DLR
247 t3_mi1_read,
248 t3_mi1_write
4d22de3e
DLR
249};
250
04497982
DLR
251/*
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
254 */
255static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
257{
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
266}
267
4d22de3e
DLR
268/*
269 * MI1 read/write operations for indirect-addressed PHYs.
270 */
271static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
273{
274 int ret;
4d22de3e
DLR
275
276 mutex_lock(&adapter->mdio_lock);
04497982 277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 281 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
284 }
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
287}
288
289static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
291{
292 int ret;
4d22de3e
DLR
293
294 mutex_lock(&adapter->mdio_lock);
04497982 295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 300 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
301 }
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
304}
305
306static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
309};
310
311/**
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
318 *
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
321 */
322int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
324{
325 int ret;
326 unsigned int val;
327
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
332 }
333 return ret;
334}
335
336/**
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
341 *
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
345 */
346int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347{
348 int err;
349 unsigned int ctl;
350
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
354
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
363
364 return ctl ? -1 : 0;
365}
366
367/**
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
371 *
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
374 */
375int t3_phy_advertise(struct cphy *phy, unsigned int advert)
376{
377 int err;
378 unsigned int val = 0;
379
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
383
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
389
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
393
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
408}
409
0ce2f03b
DLR
410/**
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
414 *
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
417 */
418int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
419{
420 unsigned int val = 0;
421
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
431}
432
4d22de3e
DLR
433/**
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
438 *
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
441 */
442int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443{
444 int err;
445 unsigned int ctl;
446
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 if (err)
449 return err;
450
451 if (speed >= 0) {
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
457 }
458 if (duplex >= 0) {
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
462 }
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
466}
467
9b1e3656
DLR
468int t3_phy_lasi_intr_enable(struct cphy *phy)
469{
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
471}
472
473int t3_phy_lasi_intr_disable(struct cphy *phy)
474{
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
476}
477
478int t3_phy_lasi_intr_clear(struct cphy *phy)
479{
480 u32 val;
481
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
483}
484
485int t3_phy_lasi_intr_handler(struct cphy *phy)
486{
487 unsigned int status;
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
489
490 if (err)
491 return err;
492 return (status & 1) ? cphy_cause_link_change : 0;
493}
494
4d22de3e 495static const struct adapter_info t3_adap_info[] = {
04497982 496 {2, 0,
4d22de3e 497 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 499 &mi1_mdio_ops, "Chelsio PE9000"},
04497982 500 {2, 0,
4d22de3e 501 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 503 &mi1_mdio_ops, "Chelsio T302"},
04497982 504 {1, 0,
4d22de3e 505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 508 &mi1_mdio_ext_ops, "Chelsio T310"},
04497982 509 {2, 0,
4d22de3e
DLR
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
514 &mi1_mdio_ext_ops, "Chelsio T320"},
515};
516
517/*
518 * Return the adapter_info structure with a given index. Out-of-range indices
519 * return NULL.
520 */
521const struct adapter_info *t3_get_adapter_info(unsigned int id)
522{
523 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
524}
525
04497982
DLR
526struct port_type_info {
527 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
528 int phy_addr, const struct mdio_ops *ops);
529};
4d22de3e
DLR
530
531static const struct port_type_info port_types[] = {
04497982
DLR
532 { NULL },
533 { t3_ael1002_phy_prep },
534 { t3_vsc8211_phy_prep },
535 { NULL},
536 { t3_xaui_direct_phy_prep },
1e882025 537 { t3_ael2005_phy_prep },
04497982
DLR
538 { t3_qt2045_phy_prep },
539 { t3_ael1006_phy_prep },
540 { NULL },
4d22de3e
DLR
541};
542
4d22de3e
DLR
543#define VPD_ENTRY(name, len) \
544 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
545
546/*
547 * Partial EEPROM Vital Product Data structure. Includes only the ID and
548 * VPD-R sections.
549 */
550struct t3_vpd {
551 u8 id_tag;
552 u8 id_len[2];
553 u8 id_data[16];
554 u8 vpdr_tag;
555 u8 vpdr_len[2];
556 VPD_ENTRY(pn, 16); /* part number */
557 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 558 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
559 VPD_ENTRY(na, 12); /* MAC address base */
560 VPD_ENTRY(cclk, 6); /* core clock */
561 VPD_ENTRY(mclk, 6); /* mem clock */
562 VPD_ENTRY(uclk, 6); /* uP clk */
563 VPD_ENTRY(mdc, 6); /* MDIO clk */
564 VPD_ENTRY(mt, 2); /* mem timing */
565 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
566 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
567 VPD_ENTRY(port0, 2); /* PHY0 complex */
568 VPD_ENTRY(port1, 2); /* PHY1 complex */
569 VPD_ENTRY(port2, 2); /* PHY2 complex */
570 VPD_ENTRY(port3, 2); /* PHY3 complex */
571 VPD_ENTRY(rv, 1); /* csum */
572 u32 pad; /* for multiple-of-4 sizing and alignment */
573};
574
575#define EEPROM_MAX_POLL 4
576#define EEPROM_STAT_ADDR 0x4000
577#define VPD_BASE 0xc00
578
579/**
580 * t3_seeprom_read - read a VPD EEPROM location
581 * @adapter: adapter to read
582 * @addr: EEPROM address
583 * @data: where to store the read data
584 *
585 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
586 * VPD ROM capability. A zero is written to the flag bit when the
587 * addres is written to the control register. The hardware device will
588 * set the flag to 1 when 4 bytes have been read into the data register.
589 */
05e5c116 590int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
591{
592 u16 val;
593 int attempts = EEPROM_MAX_POLL;
05e5c116 594 u32 v;
4d22de3e
DLR
595 unsigned int base = adapter->params.pci.vpd_cap_addr;
596
597 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
598 return -EINVAL;
599
600 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
601 do {
602 udelay(10);
603 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
604 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
605
606 if (!(val & PCI_VPD_ADDR_F)) {
607 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
608 return -EIO;
609 }
05e5c116
AV
610 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
611 *data = cpu_to_le32(v);
4d22de3e
DLR
612 return 0;
613}
614
615/**
616 * t3_seeprom_write - write a VPD EEPROM location
617 * @adapter: adapter to write
618 * @addr: EEPROM address
619 * @data: value to write
620 *
621 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
622 * VPD ROM capability.
623 */
05e5c116 624int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
625{
626 u16 val;
627 int attempts = EEPROM_MAX_POLL;
628 unsigned int base = adapter->params.pci.vpd_cap_addr;
629
630 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
631 return -EINVAL;
632
633 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 634 le32_to_cpu(data));
4d22de3e
DLR
635 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
636 addr | PCI_VPD_ADDR_F);
637 do {
638 msleep(1);
639 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
640 } while ((val & PCI_VPD_ADDR_F) && --attempts);
641
642 if (val & PCI_VPD_ADDR_F) {
643 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
644 return -EIO;
645 }
646 return 0;
647}
648
649/**
650 * t3_seeprom_wp - enable/disable EEPROM write protection
651 * @adapter: the adapter
652 * @enable: 1 to enable write protection, 0 to disable it
653 *
654 * Enables or disables write protection on the serial EEPROM.
655 */
656int t3_seeprom_wp(struct adapter *adapter, int enable)
657{
658 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
659}
660
661/*
662 * Convert a character holding a hex digit to a number.
663 */
664static unsigned int hex2int(unsigned char c)
665{
666 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
667}
668
669/**
670 * get_vpd_params - read VPD parameters from VPD EEPROM
671 * @adapter: adapter to read
672 * @p: where to store the parameters
673 *
674 * Reads card parameters stored in VPD EEPROM.
675 */
676static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
677{
678 int i, addr, ret;
679 struct t3_vpd vpd;
680
681 /*
682 * Card information is normally at VPD_BASE but some early cards had
683 * it at 0.
684 */
05e5c116 685 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
686 if (ret)
687 return ret;
688 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
689
690 for (i = 0; i < sizeof(vpd); i += 4) {
691 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 692 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
693 if (ret)
694 return ret;
695 }
696
697 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
698 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
699 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
700 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
701 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 702 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
703
704 /* Old eeproms didn't have port information */
705 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
706 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
707 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
708 } else {
709 p->port_type[0] = hex2int(vpd.port0_data[0]);
710 p->port_type[1] = hex2int(vpd.port1_data[0]);
711 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
712 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
713 }
714
715 for (i = 0; i < 6; i++)
716 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
717 hex2int(vpd.na_data[2 * i + 1]);
718 return 0;
719}
720
721/* serial flash and firmware constants */
722enum {
723 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
724 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
725 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
726
727 /* flash command opcodes */
728 SF_PROG_PAGE = 2, /* program page */
729 SF_WR_DISABLE = 4, /* disable writes */
730 SF_RD_STATUS = 5, /* read status register */
731 SF_WR_ENABLE = 6, /* enable writes */
732 SF_RD_DATA_FAST = 0xb, /* read flash */
733 SF_ERASE_SECTOR = 0xd8, /* erase sector */
734
735 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 736 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 737 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
738};
739
740/**
741 * sf1_read - read data from the serial flash
742 * @adapter: the adapter
743 * @byte_cnt: number of bytes to read
744 * @cont: whether another operation will be chained
745 * @valp: where to store the read data
746 *
747 * Reads up to 4 bytes of data from the serial flash. The location of
748 * the read needs to be specified prior to calling this by issuing the
749 * appropriate commands to the serial flash.
750 */
751static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
752 u32 *valp)
753{
754 int ret;
755
756 if (!byte_cnt || byte_cnt > 4)
757 return -EINVAL;
758 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
759 return -EBUSY;
760 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
761 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
762 if (!ret)
763 *valp = t3_read_reg(adapter, A_SF_DATA);
764 return ret;
765}
766
767/**
768 * sf1_write - write data to the serial flash
769 * @adapter: the adapter
770 * @byte_cnt: number of bytes to write
771 * @cont: whether another operation will be chained
772 * @val: value to write
773 *
774 * Writes up to 4 bytes of data to the serial flash. The location of
775 * the write needs to be specified prior to calling this by issuing the
776 * appropriate commands to the serial flash.
777 */
778static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
779 u32 val)
780{
781 if (!byte_cnt || byte_cnt > 4)
782 return -EINVAL;
783 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
784 return -EBUSY;
785 t3_write_reg(adapter, A_SF_DATA, val);
786 t3_write_reg(adapter, A_SF_OP,
787 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
788 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
789}
790
791/**
792 * flash_wait_op - wait for a flash operation to complete
793 * @adapter: the adapter
794 * @attempts: max number of polls of the status register
795 * @delay: delay between polls in ms
796 *
797 * Wait for a flash operation to complete by polling the status register.
798 */
799static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
800{
801 int ret;
802 u32 status;
803
804 while (1) {
805 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
806 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
807 return ret;
808 if (!(status & 1))
809 return 0;
810 if (--attempts == 0)
811 return -EAGAIN;
812 if (delay)
813 msleep(delay);
814 }
815}
816
817/**
818 * t3_read_flash - read words from serial flash
819 * @adapter: the adapter
820 * @addr: the start address for the read
821 * @nwords: how many 32-bit words to read
822 * @data: where to store the read data
823 * @byte_oriented: whether to store data as bytes or as words
824 *
825 * Read the specified number of 32-bit words from the serial flash.
826 * If @byte_oriented is set the read data is stored as a byte array
827 * (i.e., big-endian), otherwise as 32-bit words in the platform's
828 * natural endianess.
829 */
830int t3_read_flash(struct adapter *adapter, unsigned int addr,
831 unsigned int nwords, u32 *data, int byte_oriented)
832{
833 int ret;
834
835 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
836 return -EINVAL;
837
838 addr = swab32(addr) | SF_RD_DATA_FAST;
839
840 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
841 (ret = sf1_read(adapter, 1, 1, data)) != 0)
842 return ret;
843
844 for (; nwords; nwords--, data++) {
845 ret = sf1_read(adapter, 4, nwords > 1, data);
846 if (ret)
847 return ret;
848 if (byte_oriented)
849 *data = htonl(*data);
850 }
851 return 0;
852}
853
854/**
855 * t3_write_flash - write up to a page of data to the serial flash
856 * @adapter: the adapter
857 * @addr: the start address to write
858 * @n: length of data to write
859 * @data: the data to write
860 *
861 * Writes up to a page of data (256 bytes) to the serial flash starting
862 * at the given address.
863 */
864static int t3_write_flash(struct adapter *adapter, unsigned int addr,
865 unsigned int n, const u8 *data)
866{
867 int ret;
868 u32 buf[64];
869 unsigned int i, c, left, val, offset = addr & 0xff;
870
871 if (addr + n > SF_SIZE || offset + n > 256)
872 return -EINVAL;
873
874 val = swab32(addr) | SF_PROG_PAGE;
875
876 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
877 (ret = sf1_write(adapter, 4, 1, val)) != 0)
878 return ret;
879
880 for (left = n; left; left -= c) {
881 c = min(left, 4U);
882 for (val = 0, i = 0; i < c; ++i)
883 val = (val << 8) + *data++;
884
885 ret = sf1_write(adapter, c, c != left, val);
886 if (ret)
887 return ret;
888 }
889 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
890 return ret;
891
892 /* Read the page to verify the write succeeded */
893 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
894 if (ret)
895 return ret;
896
897 if (memcmp(data - n, (u8 *) buf + offset, n))
898 return -EIO;
899 return 0;
900}
901
480fe1a3 902/**
47330077 903 * t3_get_tp_version - read the tp sram version
480fe1a3 904 * @adapter: the adapter
47330077 905 * @vers: where to place the version
480fe1a3 906 *
47330077 907 * Reads the protocol sram version from sram.
480fe1a3 908 */
47330077 909int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
910{
911 int ret;
480fe1a3
DLR
912
913 /* Get version loaded in SRAM */
914 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
915 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
916 1, 1, 5, 1);
917 if (ret)
918 return ret;
2eab17ab 919
47330077
DLR
920 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
921
922 return 0;
923}
924
925/**
926 * t3_check_tpsram_version - read the tp sram version
927 * @adapter: the adapter
928 * @must_load: set to 1 if loading a new microcode image is required
929 *
930 * Reads the protocol sram version from flash.
931 */
932int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
933{
934 int ret;
935 u32 vers;
936 unsigned int major, minor;
937
938 if (adapter->params.rev == T3_REV_A)
939 return 0;
940
941 *must_load = 1;
942
943 ret = t3_get_tp_version(adapter, &vers);
944 if (ret)
945 return ret;
480fe1a3
DLR
946
947 major = G_TP_VERSION_MAJOR(vers);
948 minor = G_TP_VERSION_MINOR(vers);
949
2eab17ab 950 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3
DLR
951 return 0;
952
47330077
DLR
953 if (major != TP_VERSION_MAJOR)
954 CH_ERR(adapter, "found wrong TP version (%u.%u), "
955 "driver needs version %d.%d\n", major, minor,
956 TP_VERSION_MAJOR, TP_VERSION_MINOR);
957 else {
958 *must_load = 0;
959 CH_ERR(adapter, "found wrong TP version (%u.%u), "
960 "driver compiled for version %d.%d\n", major, minor,
961 TP_VERSION_MAJOR, TP_VERSION_MINOR);
962 }
480fe1a3
DLR
963 return -EINVAL;
964}
965
966/**
2eab17ab 967 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
968 * is compatible with this driver
969 * @adapter: the adapter
970 * @tp_sram: the firmware image to write
971 * @size: image size
972 *
973 * Checks if an adapter's tp sram is compatible with the driver.
974 * Returns 0 if the versions are compatible, a negative error otherwise.
975 */
2c733a16
DW
976int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
977 unsigned int size)
480fe1a3
DLR
978{
979 u32 csum;
980 unsigned int i;
05e5c116 981 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
982
983 /* Verify checksum */
984 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
985 csum += ntohl(p[i]);
986 if (csum != 0xffffffff) {
987 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
988 csum);
989 return -EINVAL;
990 }
991
992 return 0;
993}
994
4aac3899
DLR
995enum fw_version_type {
996 FW_VERSION_N3,
997 FW_VERSION_T3
998};
999
4d22de3e
DLR
1000/**
1001 * t3_get_fw_version - read the firmware version
1002 * @adapter: the adapter
1003 * @vers: where to place the version
1004 *
1005 * Reads the FW version from flash.
1006 */
1007int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1008{
1009 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1010}
1011
1012/**
1013 * t3_check_fw_version - check if the FW is compatible with this driver
1014 * @adapter: the adapter
a5a3b460
DLR
1015 * @must_load: set to 1 if loading a new FW image is required
1016
4d22de3e
DLR
1017 * Checks if an adapter's FW is compatible with the driver. Returns 0
1018 * if the versions are compatible, a negative error otherwise.
1019 */
a5a3b460 1020int t3_check_fw_version(struct adapter *adapter, int *must_load)
4d22de3e
DLR
1021{
1022 int ret;
1023 u32 vers;
4aac3899 1024 unsigned int type, major, minor;
4d22de3e 1025
a5a3b460 1026 *must_load = 1;
4d22de3e
DLR
1027 ret = t3_get_fw_version(adapter, &vers);
1028 if (ret)
1029 return ret;
1030
4aac3899
DLR
1031 type = G_FW_VERSION_TYPE(vers);
1032 major = G_FW_VERSION_MAJOR(vers);
1033 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1034
75d8626f
DLR
1035 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1036 minor == FW_VERSION_MINOR)
4d22de3e
DLR
1037 return 0;
1038
a5a3b460
DLR
1039 if (major != FW_VERSION_MAJOR)
1040 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1041 "driver needs version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1043 else if (minor < FW_VERSION_MINOR) {
a5a3b460 1044 *must_load = 0;
273fa904
DLR
1045 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1046 "driver compiled for version %u.%u\n", major, minor,
1047 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1048 } else {
1049 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1050 "driver compiled for version %u.%u\n", major, minor,
1051 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1052 return 0;
a5a3b460 1053 }
4d22de3e
DLR
1054 return -EINVAL;
1055}
1056
1057/**
1058 * t3_flash_erase_sectors - erase a range of flash sectors
1059 * @adapter: the adapter
1060 * @start: the first sector to erase
1061 * @end: the last sector to erase
1062 *
1063 * Erases the sectors in the given range.
1064 */
1065static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1066{
1067 while (start <= end) {
1068 int ret;
1069
1070 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1071 (ret = sf1_write(adapter, 4, 0,
1072 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1073 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1074 return ret;
1075 start++;
1076 }
1077 return 0;
1078}
1079
1080/*
1081 * t3_load_fw - download firmware
1082 * @adapter: the adapter
8a9fab22 1083 * @fw_data: the firmware image to write
4d22de3e
DLR
1084 * @size: image size
1085 *
1086 * Write the supplied firmware image to the card's serial flash.
1087 * The FW image has the following sections: @size - 8 bytes of code and
1088 * data, followed by 4 bytes of FW version, followed by the 32-bit
1089 * 1's complement checksum of the whole image.
1090 */
1091int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1092{
1093 u32 csum;
1094 unsigned int i;
05e5c116 1095 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1096 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1097
2e283962 1098 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1099 return -EINVAL;
1100 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1101 return -EFBIG;
1102
1103 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1104 csum += ntohl(p[i]);
1105 if (csum != 0xffffffff) {
1106 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1107 csum);
1108 return -EINVAL;
1109 }
1110
1111 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1112 if (ret)
1113 goto out;
1114
1115 size -= 8; /* trim off version and checksum */
1116 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1117 unsigned int chunk_size = min(size, 256U);
1118
1119 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1120 if (ret)
1121 goto out;
1122
1123 addr += chunk_size;
1124 fw_data += chunk_size;
1125 size -= chunk_size;
1126 }
1127
1128 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1129out:
1130 if (ret)
1131 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1132 return ret;
1133}
1134
1135#define CIM_CTL_BASE 0x2000
1136
1137/**
1138 * t3_cim_ctl_blk_read - read a block from CIM control region
1139 *
1140 * @adap: the adapter
1141 * @addr: the start address within the CIM control region
1142 * @n: number of words to read
1143 * @valp: where to store the result
1144 *
1145 * Reads a block of 4-byte words from the CIM control region.
1146 */
1147int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1148 unsigned int n, unsigned int *valp)
1149{
1150 int ret = 0;
1151
1152 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1153 return -EBUSY;
1154
1155 for ( ; !ret && n--; addr += 4) {
1156 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1157 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1158 0, 5, 2);
1159 if (!ret)
1160 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1161 }
1162 return ret;
1163}
1164
1165
1166/**
1167 * t3_link_changed - handle interface link changes
1168 * @adapter: the adapter
1169 * @port_id: the port index that changed link state
1170 *
1171 * Called when a port's link settings change to propagate the new values
1172 * to the associated PHY and MAC. After performing the common tasks it
1173 * invokes an OS-specific handler.
1174 */
1175void t3_link_changed(struct adapter *adapter, int port_id)
1176{
1177 int link_ok, speed, duplex, fc;
1178 struct port_info *pi = adap2pinfo(adapter, port_id);
1179 struct cphy *phy = &pi->phy;
1180 struct cmac *mac = &pi->mac;
1181 struct link_config *lc = &pi->link_config;
1182
1183 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1184
9b1e3656
DLR
1185 if (lc->requested_fc & PAUSE_AUTONEG)
1186 fc &= lc->requested_fc;
1187 else
1188 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1189
1190 if (link_ok == lc->link_ok && speed == lc->speed &&
1191 duplex == lc->duplex && fc == lc->fc)
1192 return; /* nothing changed */
1193
4d22de3e
DLR
1194 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1195 uses_xaui(adapter)) {
1196 if (link_ok)
1197 t3b_pcs_reset(mac);
1198 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1199 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1200 }
1201 lc->link_ok = link_ok;
1202 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1203 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1204
1205 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1206 /* Set MAC speed, duplex, and flow control to match PHY. */
1207 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1208 lc->fc = fc;
1209 }
1210
1211 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1212}
1213
1214/**
1215 * t3_link_start - apply link configuration to MAC/PHY
1216 * @phy: the PHY to setup
1217 * @mac: the MAC to setup
1218 * @lc: the requested link configuration
1219 *
1220 * Set up a port's MAC and PHY according to a desired link configuration.
1221 * - If the PHY can auto-negotiate first decide what to advertise, then
1222 * enable/disable auto-negotiation as desired, and reset.
1223 * - If the PHY does not auto-negotiate just reset it.
1224 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1225 * otherwise do it later based on the outcome of auto-negotiation.
1226 */
1227int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1228{
1229 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1230
1231 lc->link_ok = 0;
1232 if (lc->supported & SUPPORTED_Autoneg) {
1233 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1234 if (fc) {
1235 lc->advertising |= ADVERTISED_Asym_Pause;
1236 if (fc & PAUSE_RX)
1237 lc->advertising |= ADVERTISED_Pause;
1238 }
1239 phy->ops->advertise(phy, lc->advertising);
1240
1241 if (lc->autoneg == AUTONEG_DISABLE) {
1242 lc->speed = lc->requested_speed;
1243 lc->duplex = lc->requested_duplex;
1244 lc->fc = (unsigned char)fc;
1245 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1246 fc);
1247 /* Also disables autoneg */
1248 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1249 } else
1250 phy->ops->autoneg_enable(phy);
1251 } else {
1252 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1253 lc->fc = (unsigned char)fc;
1254 phy->ops->reset(phy, 0);
1255 }
1256 return 0;
1257}
1258
1259/**
1260 * t3_set_vlan_accel - control HW VLAN extraction
1261 * @adapter: the adapter
1262 * @ports: bitmap of adapter ports to operate on
1263 * @on: enable (1) or disable (0) HW VLAN extraction
1264 *
1265 * Enables or disables HW extraction of VLAN tags for the given port.
1266 */
1267void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1268{
1269 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1270 ports << S_VLANEXTRACTIONENABLE,
1271 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1272}
1273
1274struct intr_info {
1275 unsigned int mask; /* bits to check in interrupt status */
1276 const char *msg; /* message to print or NULL */
1277 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1278 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1279};
1280
1281/**
1282 * t3_handle_intr_status - table driven interrupt handler
1283 * @adapter: the adapter that generated the interrupt
1284 * @reg: the interrupt status register to process
1285 * @mask: a mask to apply to the interrupt status
1286 * @acts: table of interrupt actions
1287 * @stats: statistics counters tracking interrupt occurences
1288 *
1289 * A table driven interrupt handler that applies a set of masks to an
1290 * interrupt status word and performs the corresponding actions if the
1291 * interrupts described by the mask have occured. The actions include
1292 * optionally printing a warning or alert message, and optionally
1293 * incrementing a stat counter. The table is terminated by an entry
1294 * specifying mask 0. Returns the number of fatal interrupt conditions.
1295 */
1296static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1297 unsigned int mask,
1298 const struct intr_info *acts,
1299 unsigned long *stats)
1300{
1301 int fatal = 0;
1302 unsigned int status = t3_read_reg(adapter, reg) & mask;
1303
1304 for (; acts->mask; ++acts) {
1305 if (!(status & acts->mask))
1306 continue;
1307 if (acts->fatal) {
1308 fatal++;
1309 CH_ALERT(adapter, "%s (0x%x)\n",
1310 acts->msg, status & acts->mask);
1311 } else if (acts->msg)
1312 CH_WARN(adapter, "%s (0x%x)\n",
1313 acts->msg, status & acts->mask);
1314 if (acts->stat_idx >= 0)
1315 stats[acts->stat_idx]++;
1316 }
1317 if (status) /* clear processed interrupts */
1318 t3_write_reg(adapter, reg, status);
1319 return fatal;
1320}
1321
b881955b
DLR
1322#define SGE_INTR_MASK (F_RSPQDISABLED | \
1323 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1324 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1325 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1326 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1327 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1328 F_HIRCQPARITYERROR)
4d22de3e
DLR
1329#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1330 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1331 F_NFASRCHFAIL)
1332#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1333#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1334 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1335 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1336#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1337 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1338 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1339 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1340 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1341 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1342#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1343 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1344 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1345 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1346 F_TXPARERR | V_BISTERR(M_BISTERR))
1347#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1348 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1349 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1350#define ULPTX_INTR_MASK 0xfc
1351#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1352 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1353 F_ZERO_SWITCH_ERROR)
1354#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1355 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1356 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1357 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1358 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1359 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1360 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1361 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1362#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1363 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1364 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1365#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1366 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1367 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1368#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1369 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1370 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1371 V_MCAPARERRENB(M_MCAPARERRENB))
1372#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1373 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1374 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1375 F_MPS0 | F_CPL_SWITCH)
1376
1377/*
1378 * Interrupt handler for the PCIX1 module.
1379 */
1380static void pci_intr_handler(struct adapter *adapter)
1381{
1382 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1383 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1384 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1385 {F_RCVTARABT, "PCI received target abort", -1, 1},
1386 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1387 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1388 {F_DETPARERR, "PCI detected parity error", -1, 1},
1389 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1390 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1391 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1392 1},
1393 {F_DETCORECCERR, "PCI correctable ECC error",
1394 STAT_PCI_CORR_ECC, 0},
1395 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1396 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1397 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1398 1},
1399 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1400 1},
1401 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1402 1},
1403 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1404 "error", -1, 1},
1405 {0}
1406 };
1407
1408 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1409 pcix1_intr_info, adapter->irq_stats))
1410 t3_fatal_err(adapter);
1411}
1412
1413/*
1414 * Interrupt handler for the PCIE module.
1415 */
1416static void pcie_intr_handler(struct adapter *adapter)
1417{
1418 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1419 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1420 {F_UNXSPLCPLERRR,
1421 "PCI unexpected split completion DMA read error", -1, 1},
1422 {F_UNXSPLCPLERRC,
1423 "PCI unexpected split completion DMA command error", -1, 1},
1424 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1425 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1426 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1427 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1428 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1429 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1430 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1431 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1432 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1433 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1434 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1435 {0}
1436 };
1437
3eea3337
DLR
1438 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1439 CH_ALERT(adapter, "PEX error code 0x%x\n",
1440 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1441
4d22de3e
DLR
1442 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1443 pcie_intr_info, adapter->irq_stats))
1444 t3_fatal_err(adapter);
1445}
1446
1447/*
1448 * TP interrupt handler.
1449 */
1450static void tp_intr_handler(struct adapter *adapter)
1451{
1452 static const struct intr_info tp_intr_info[] = {
1453 {0xffffff, "TP parity error", -1, 1},
1454 {0x1000000, "TP out of Rx pages", -1, 1},
1455 {0x2000000, "TP out of Tx pages", -1, 1},
1456 {0}
1457 };
1458
a2604be5 1459 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1460 {0x1fffffff, "TP parity error", -1, 1},
1461 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1462 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1463 {0}
a2604be5
DLR
1464 };
1465
4d22de3e 1466 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1467 adapter->params.rev < T3_REV_C ?
b881955b 1468 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1469 t3_fatal_err(adapter);
1470}
1471
1472/*
1473 * CIM interrupt handler.
1474 */
1475static void cim_intr_handler(struct adapter *adapter)
1476{
1477 static const struct intr_info cim_intr_info[] = {
1478 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1479 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1480 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1481 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1482 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1483 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1484 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1485 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1486 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1487 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1488 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1489 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1490 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1491 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1492 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1493 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1494 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1495 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1496 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1497 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1498 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1499 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1500 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1501 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1502 {0}
1503 };
1504
1505 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1506 cim_intr_info, NULL))
1507 t3_fatal_err(adapter);
1508}
1509
1510/*
1511 * ULP RX interrupt handler.
1512 */
1513static void ulprx_intr_handler(struct adapter *adapter)
1514{
1515 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1516 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1517 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1518 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1519 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1520 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1521 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1522 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1523 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1524 {0}
1525 };
1526
1527 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1528 ulprx_intr_info, NULL))
1529 t3_fatal_err(adapter);
1530}
1531
1532/*
1533 * ULP TX interrupt handler.
1534 */
1535static void ulptx_intr_handler(struct adapter *adapter)
1536{
1537 static const struct intr_info ulptx_intr_info[] = {
1538 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1539 STAT_ULP_CH0_PBL_OOB, 0},
1540 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1541 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1542 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1543 {0}
1544 };
1545
1546 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1547 ulptx_intr_info, adapter->irq_stats))
1548 t3_fatal_err(adapter);
1549}
1550
1551#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1552 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1553 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1554 F_ICSPI1_TX_FRAMING_ERROR)
1555#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1556 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1557 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1558 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1559
1560/*
1561 * PM TX interrupt handler.
1562 */
1563static void pmtx_intr_handler(struct adapter *adapter)
1564{
1565 static const struct intr_info pmtx_intr_info[] = {
1566 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1567 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1568 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1569 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1570 "PMTX ispi parity error", -1, 1},
1571 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1572 "PMTX ospi parity error", -1, 1},
1573 {0}
1574 };
1575
1576 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1577 pmtx_intr_info, NULL))
1578 t3_fatal_err(adapter);
1579}
1580
1581#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1582 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1583 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1584 F_IESPI1_TX_FRAMING_ERROR)
1585#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1586 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1587 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1588 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1589
1590/*
1591 * PM RX interrupt handler.
1592 */
1593static void pmrx_intr_handler(struct adapter *adapter)
1594{
1595 static const struct intr_info pmrx_intr_info[] = {
1596 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1597 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1598 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1599 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1600 "PMRX ispi parity error", -1, 1},
1601 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1602 "PMRX ospi parity error", -1, 1},
1603 {0}
1604 };
1605
1606 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1607 pmrx_intr_info, NULL))
1608 t3_fatal_err(adapter);
1609}
1610
1611/*
1612 * CPL switch interrupt handler.
1613 */
1614static void cplsw_intr_handler(struct adapter *adapter)
1615{
1616 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1617 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1618 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1619 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1620 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1621 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1622 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1623 {0}
1624 };
1625
1626 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1627 cplsw_intr_info, NULL))
1628 t3_fatal_err(adapter);
1629}
1630
1631/*
1632 * MPS interrupt handler.
1633 */
1634static void mps_intr_handler(struct adapter *adapter)
1635{
1636 static const struct intr_info mps_intr_info[] = {
1637 {0x1ff, "MPS parity error", -1, 1},
1638 {0}
1639 };
1640
1641 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1642 mps_intr_info, NULL))
1643 t3_fatal_err(adapter);
1644}
1645
1646#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1647
1648/*
1649 * MC7 interrupt handler.
1650 */
1651static void mc7_intr_handler(struct mc7 *mc7)
1652{
1653 struct adapter *adapter = mc7->adapter;
1654 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1655
1656 if (cause & F_CE) {
1657 mc7->stats.corr_err++;
1658 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1659 "data 0x%x 0x%x 0x%x\n", mc7->name,
1660 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1661 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1662 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1663 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1664 }
1665
1666 if (cause & F_UE) {
1667 mc7->stats.uncorr_err++;
1668 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1669 "data 0x%x 0x%x 0x%x\n", mc7->name,
1670 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1671 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1672 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1673 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1674 }
1675
1676 if (G_PE(cause)) {
1677 mc7->stats.parity_err++;
1678 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1679 mc7->name, G_PE(cause));
1680 }
1681
1682 if (cause & F_AE) {
1683 u32 addr = 0;
1684
1685 if (adapter->params.rev > 0)
1686 addr = t3_read_reg(adapter,
1687 mc7->offset + A_MC7_ERR_ADDR);
1688 mc7->stats.addr_err++;
1689 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1690 mc7->name, addr);
1691 }
1692
1693 if (cause & MC7_INTR_FATAL)
1694 t3_fatal_err(adapter);
1695
1696 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1697}
1698
1699#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1700 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1701/*
1702 * XGMAC interrupt handler.
1703 */
1704static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1705{
1706 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1707 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1708
1709 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1710 mac->stats.tx_fifo_parity_err++;
1711 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1712 }
1713 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1714 mac->stats.rx_fifo_parity_err++;
1715 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1716 }
1717 if (cause & F_TXFIFO_UNDERRUN)
1718 mac->stats.tx_fifo_urun++;
1719 if (cause & F_RXFIFO_OVERFLOW)
1720 mac->stats.rx_fifo_ovfl++;
1721 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1722 mac->stats.serdes_signal_loss++;
1723 if (cause & F_XAUIPCSCTCERR)
1724 mac->stats.xaui_pcs_ctc_err++;
1725 if (cause & F_XAUIPCSALIGNCHANGE)
1726 mac->stats.xaui_pcs_align_change++;
1727
1728 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1729 if (cause & XGM_INTR_FATAL)
1730 t3_fatal_err(adap);
1731 return cause != 0;
1732}
1733
1734/*
1735 * Interrupt handler for PHY events.
1736 */
1737int t3_phy_intr_handler(struct adapter *adapter)
1738{
4d22de3e
DLR
1739 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1740
1741 for_each_port(adapter, i) {
1ca03cbc
DLR
1742 struct port_info *p = adap2pinfo(adapter, i);
1743
04497982 1744 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1745 continue;
1746
f231e0a5 1747 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1748 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1749
1750 if (phy_cause & cphy_cause_link_change)
1751 t3_link_changed(adapter, i);
1752 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1753 p->phy.fifo_errors++;
1e882025
DLR
1754 if (phy_cause & cphy_cause_module_change)
1755 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1756 }
1757 }
1758
1759 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1760 return 0;
1761}
1762
1763/*
1764 * T3 slow path (non-data) interrupt handler.
1765 */
1766int t3_slow_intr_handler(struct adapter *adapter)
1767{
1768 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1769
1770 cause &= adapter->slow_intr_mask;
1771 if (!cause)
1772 return 0;
1773 if (cause & F_PCIM0) {
1774 if (is_pcie(adapter))
1775 pcie_intr_handler(adapter);
1776 else
1777 pci_intr_handler(adapter);
1778 }
1779 if (cause & F_SGE3)
1780 t3_sge_err_intr_handler(adapter);
1781 if (cause & F_MC7_PMRX)
1782 mc7_intr_handler(&adapter->pmrx);
1783 if (cause & F_MC7_PMTX)
1784 mc7_intr_handler(&adapter->pmtx);
1785 if (cause & F_MC7_CM)
1786 mc7_intr_handler(&adapter->cm);
1787 if (cause & F_CIM)
1788 cim_intr_handler(adapter);
1789 if (cause & F_TP1)
1790 tp_intr_handler(adapter);
1791 if (cause & F_ULP2_RX)
1792 ulprx_intr_handler(adapter);
1793 if (cause & F_ULP2_TX)
1794 ulptx_intr_handler(adapter);
1795 if (cause & F_PM1_RX)
1796 pmrx_intr_handler(adapter);
1797 if (cause & F_PM1_TX)
1798 pmtx_intr_handler(adapter);
1799 if (cause & F_CPL_SWITCH)
1800 cplsw_intr_handler(adapter);
1801 if (cause & F_MPS0)
1802 mps_intr_handler(adapter);
1803 if (cause & F_MC5A)
1804 t3_mc5_intr_handler(&adapter->mc5);
1805 if (cause & F_XGMAC0_0)
1806 mac_intr_handler(adapter, 0);
1807 if (cause & F_XGMAC0_1)
1808 mac_intr_handler(adapter, 1);
1809 if (cause & F_T3DBG)
1810 t3_os_ext_intr_handler(adapter);
1811
1812 /* Clear the interrupts just processed. */
1813 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1814 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1815 return 1;
1816}
1817
f231e0a5
DLR
1818static unsigned int calc_gpio_intr(struct adapter *adap)
1819{
1820 unsigned int i, gpi_intr = 0;
1821
1822 for_each_port(adap, i)
1823 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1824 adapter_info(adap)->gpio_intr[i])
1825 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1826 return gpi_intr;
1827}
1828
4d22de3e
DLR
1829/**
1830 * t3_intr_enable - enable interrupts
1831 * @adapter: the adapter whose interrupts should be enabled
1832 *
1833 * Enable interrupts by setting the interrupt enable registers of the
1834 * various HW modules and then enabling the top-level interrupt
1835 * concentrator.
1836 */
1837void t3_intr_enable(struct adapter *adapter)
1838{
1839 static const struct addr_val_pair intr_en_avp[] = {
1840 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1841 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1842 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1843 MC7_INTR_MASK},
1844 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1845 MC7_INTR_MASK},
1846 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1847 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1848 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1849 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1850 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1851 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1852 };
1853
1854 adapter->slow_intr_mask = PL_INTR_MASK;
1855
1856 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1857 t3_write_reg(adapter, A_TP_INT_ENABLE,
1858 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1859
1860 if (adapter->params.rev > 0) {
1861 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1862 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1863 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1864 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1865 F_PBL_BOUND_ERR_CH1);
1866 } else {
1867 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1868 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1869 }
1870
f231e0a5
DLR
1871 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1872
4d22de3e
DLR
1873 if (is_pcie(adapter))
1874 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1875 else
1876 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1877 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1878 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1879}
1880
1881/**
1882 * t3_intr_disable - disable a card's interrupts
1883 * @adapter: the adapter whose interrupts should be disabled
1884 *
1885 * Disable interrupts. We only disable the top-level interrupt
1886 * concentrator and the SGE data interrupts.
1887 */
1888void t3_intr_disable(struct adapter *adapter)
1889{
1890 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1891 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1892 adapter->slow_intr_mask = 0;
1893}
1894
1895/**
1896 * t3_intr_clear - clear all interrupts
1897 * @adapter: the adapter whose interrupts should be cleared
1898 *
1899 * Clears all interrupts.
1900 */
1901void t3_intr_clear(struct adapter *adapter)
1902{
1903 static const unsigned int cause_reg_addr[] = {
1904 A_SG_INT_CAUSE,
1905 A_SG_RSPQ_FL_STATUS,
1906 A_PCIX_INT_CAUSE,
1907 A_MC7_INT_CAUSE,
1908 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1909 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1910 A_CIM_HOST_INT_CAUSE,
1911 A_TP_INT_CAUSE,
1912 A_MC5_DB_INT_CAUSE,
1913 A_ULPRX_INT_CAUSE,
1914 A_ULPTX_INT_CAUSE,
1915 A_CPL_INTR_CAUSE,
1916 A_PM1_TX_INT_CAUSE,
1917 A_PM1_RX_INT_CAUSE,
1918 A_MPS_INT_CAUSE,
1919 A_T3DBG_INT_CAUSE,
1920 };
1921 unsigned int i;
1922
1923 /* Clear PHY and MAC interrupts for each port. */
1924 for_each_port(adapter, i)
1925 t3_port_intr_clear(adapter, i);
1926
1927 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1928 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1929
3eea3337
DLR
1930 if (is_pcie(adapter))
1931 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1932 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1933 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1934}
1935
1936/**
1937 * t3_port_intr_enable - enable port-specific interrupts
1938 * @adapter: associated adapter
1939 * @idx: index of port whose interrupts should be enabled
1940 *
1941 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1942 * adapter port.
1943 */
1944void t3_port_intr_enable(struct adapter *adapter, int idx)
1945{
1946 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1947
1948 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1949 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1950 phy->ops->intr_enable(phy);
1951}
1952
1953/**
1954 * t3_port_intr_disable - disable port-specific interrupts
1955 * @adapter: associated adapter
1956 * @idx: index of port whose interrupts should be disabled
1957 *
1958 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1959 * adapter port.
1960 */
1961void t3_port_intr_disable(struct adapter *adapter, int idx)
1962{
1963 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1964
1965 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1966 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1967 phy->ops->intr_disable(phy);
1968}
1969
1970/**
1971 * t3_port_intr_clear - clear port-specific interrupts
1972 * @adapter: associated adapter
1973 * @idx: index of port whose interrupts to clear
1974 *
1975 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1976 * adapter port.
1977 */
1978void t3_port_intr_clear(struct adapter *adapter, int idx)
1979{
1980 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1981
1982 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1983 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1984 phy->ops->intr_clear(phy);
1985}
1986
bb9366af
DLR
1987#define SG_CONTEXT_CMD_ATTEMPTS 100
1988
4d22de3e
DLR
1989/**
1990 * t3_sge_write_context - write an SGE context
1991 * @adapter: the adapter
1992 * @id: the context id
1993 * @type: the context type
1994 *
1995 * Program an SGE context with the values already loaded in the
1996 * CONTEXT_DATA? registers.
1997 */
1998static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1999 unsigned int type)
2000{
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2003 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2004 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2005 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2006 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2007 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2008 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2009}
2010
b881955b
DLR
2011static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2012 unsigned int type)
2013{
2014 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2015 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2016 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2017 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2018 return t3_sge_write_context(adap, id, type);
2019}
2020
4d22de3e
DLR
2021/**
2022 * t3_sge_init_ecntxt - initialize an SGE egress context
2023 * @adapter: the adapter to configure
2024 * @id: the context id
2025 * @gts_enable: whether to enable GTS for the context
2026 * @type: the egress context type
2027 * @respq: associated response queue
2028 * @base_addr: base address of queue
2029 * @size: number of queue entries
2030 * @token: uP token
2031 * @gen: initial generation value for the context
2032 * @cidx: consumer pointer
2033 *
2034 * Initialize an SGE egress context and make it ready for use. If the
2035 * platform allows concurrent context operations, the caller is
2036 * responsible for appropriate locking.
2037 */
2038int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2039 enum sge_context_type type, int respq, u64 base_addr,
2040 unsigned int size, unsigned int token, int gen,
2041 unsigned int cidx)
2042{
2043 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2044
2045 if (base_addr & 0xfff) /* must be 4K aligned */
2046 return -EINVAL;
2047 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2048 return -EBUSY;
2049
2050 base_addr >>= 12;
2051 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2052 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2053 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2054 V_EC_BASE_LO(base_addr & 0xffff));
2055 base_addr >>= 16;
2056 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2057 base_addr >>= 32;
2058 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2059 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2060 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2061 F_EC_VALID);
2062 return t3_sge_write_context(adapter, id, F_EGRESS);
2063}
2064
2065/**
2066 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2067 * @adapter: the adapter to configure
2068 * @id: the context id
2069 * @gts_enable: whether to enable GTS for the context
2070 * @base_addr: base address of queue
2071 * @size: number of queue entries
2072 * @bsize: size of each buffer for this queue
2073 * @cong_thres: threshold to signal congestion to upstream producers
2074 * @gen: initial generation value for the context
2075 * @cidx: consumer pointer
2076 *
2077 * Initialize an SGE free list context and make it ready for use. The
2078 * caller is responsible for ensuring only one context operation occurs
2079 * at a time.
2080 */
2081int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2082 int gts_enable, u64 base_addr, unsigned int size,
2083 unsigned int bsize, unsigned int cong_thres, int gen,
2084 unsigned int cidx)
2085{
2086 if (base_addr & 0xfff) /* must be 4K aligned */
2087 return -EINVAL;
2088 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2089 return -EBUSY;
2090
2091 base_addr >>= 12;
2092 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2093 base_addr >>= 32;
2094 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2095 V_FL_BASE_HI((u32) base_addr) |
2096 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2097 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2098 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2099 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2100 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2101 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2102 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2103 return t3_sge_write_context(adapter, id, F_FREELIST);
2104}
2105
2106/**
2107 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2108 * @adapter: the adapter to configure
2109 * @id: the context id
2110 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2111 * @base_addr: base address of queue
2112 * @size: number of queue entries
2113 * @fl_thres: threshold for selecting the normal or jumbo free list
2114 * @gen: initial generation value for the context
2115 * @cidx: consumer pointer
2116 *
2117 * Initialize an SGE response queue context and make it ready for use.
2118 * The caller is responsible for ensuring only one context operation
2119 * occurs at a time.
2120 */
2121int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2122 int irq_vec_idx, u64 base_addr, unsigned int size,
2123 unsigned int fl_thres, int gen, unsigned int cidx)
2124{
2125 unsigned int intr = 0;
2126
2127 if (base_addr & 0xfff) /* must be 4K aligned */
2128 return -EINVAL;
2129 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2130 return -EBUSY;
2131
2132 base_addr >>= 12;
2133 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2134 V_CQ_INDEX(cidx));
2135 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2136 base_addr >>= 32;
2137 if (irq_vec_idx >= 0)
2138 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2139 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2140 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2141 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2142 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2143}
2144
2145/**
2146 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2147 * @adapter: the adapter to configure
2148 * @id: the context id
2149 * @base_addr: base address of queue
2150 * @size: number of queue entries
2151 * @rspq: response queue for async notifications
2152 * @ovfl_mode: CQ overflow mode
2153 * @credits: completion queue credits
2154 * @credit_thres: the credit threshold
2155 *
2156 * Initialize an SGE completion queue context and make it ready for use.
2157 * The caller is responsible for ensuring only one context operation
2158 * occurs at a time.
2159 */
2160int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2161 unsigned int size, int rspq, int ovfl_mode,
2162 unsigned int credits, unsigned int credit_thres)
2163{
2164 if (base_addr & 0xfff) /* must be 4K aligned */
2165 return -EINVAL;
2166 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2167 return -EBUSY;
2168
2169 base_addr >>= 12;
2170 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2171 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2172 base_addr >>= 32;
2173 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2174 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2175 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2176 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2177 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2178 V_CQ_CREDIT_THRES(credit_thres));
2179 return t3_sge_write_context(adapter, id, F_CQ);
2180}
2181
2182/**
2183 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2184 * @adapter: the adapter
2185 * @id: the egress context id
2186 * @enable: enable (1) or disable (0) the context
2187 *
2188 * Enable or disable an SGE egress context. The caller is responsible for
2189 * ensuring only one context operation occurs at a time.
2190 */
2191int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2192{
2193 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2194 return -EBUSY;
2195
2196 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2197 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2198 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2199 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2200 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2201 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2202 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2203 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2204 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2205}
2206
2207/**
2208 * t3_sge_disable_fl - disable an SGE free-buffer list
2209 * @adapter: the adapter
2210 * @id: the free list context id
2211 *
2212 * Disable an SGE free-buffer list. The caller is responsible for
2213 * ensuring only one context operation occurs at a time.
2214 */
2215int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2216{
2217 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2218 return -EBUSY;
2219
2220 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2221 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2222 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2223 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2224 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2225 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2226 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2227 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2228 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2229}
2230
2231/**
2232 * t3_sge_disable_rspcntxt - disable an SGE response queue
2233 * @adapter: the adapter
2234 * @id: the response queue context id
2235 *
2236 * Disable an SGE response queue. The caller is responsible for
2237 * ensuring only one context operation occurs at a time.
2238 */
2239int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2240{
2241 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2242 return -EBUSY;
2243
2244 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2245 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2246 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2247 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2248 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2249 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2250 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2251 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2252 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2253}
2254
2255/**
2256 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2257 * @adapter: the adapter
2258 * @id: the completion queue context id
2259 *
2260 * Disable an SGE completion queue. The caller is responsible for
2261 * ensuring only one context operation occurs at a time.
2262 */
2263int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2264{
2265 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2266 return -EBUSY;
2267
2268 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2269 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2270 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2271 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2272 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2273 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2274 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2275 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2276 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2277}
2278
2279/**
2280 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2281 * @adapter: the adapter
2282 * @id: the context id
2283 * @op: the operation to perform
2284 *
2285 * Perform the selected operation on an SGE completion queue context.
2286 * The caller is responsible for ensuring only one context operation
2287 * occurs at a time.
2288 */
2289int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2290 unsigned int credits)
2291{
2292 u32 val;
2293
2294 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2295 return -EBUSY;
2296
2297 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2298 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2299 V_CONTEXT(id) | F_CQ);
2300 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2301 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2302 return -EIO;
2303
2304 if (op >= 2 && op < 7) {
2305 if (adapter->params.rev > 0)
2306 return G_CQ_INDEX(val);
2307
2308 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2309 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2310 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2311 F_CONTEXT_CMD_BUSY, 0,
2312 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2313 return -EIO;
2314 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2315 }
2316 return 0;
2317}
2318
2319/**
2320 * t3_sge_read_context - read an SGE context
2321 * @type: the context type
2322 * @adapter: the adapter
2323 * @id: the context id
2324 * @data: holds the retrieved context
2325 *
2326 * Read an SGE egress context. The caller is responsible for ensuring
2327 * only one context operation occurs at a time.
2328 */
2329static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2330 unsigned int id, u32 data[4])
2331{
2332 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2333 return -EBUSY;
2334
2335 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2336 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2337 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2338 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2339 return -EIO;
2340 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2341 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2342 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2343 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2344 return 0;
2345}
2346
2347/**
2348 * t3_sge_read_ecntxt - read an SGE egress context
2349 * @adapter: the adapter
2350 * @id: the context id
2351 * @data: holds the retrieved context
2352 *
2353 * Read an SGE egress context. The caller is responsible for ensuring
2354 * only one context operation occurs at a time.
2355 */
2356int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2357{
2358 if (id >= 65536)
2359 return -EINVAL;
2360 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2361}
2362
2363/**
2364 * t3_sge_read_cq - read an SGE CQ context
2365 * @adapter: the adapter
2366 * @id: the context id
2367 * @data: holds the retrieved context
2368 *
2369 * Read an SGE CQ context. The caller is responsible for ensuring
2370 * only one context operation occurs at a time.
2371 */
2372int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2373{
2374 if (id >= 65536)
2375 return -EINVAL;
2376 return t3_sge_read_context(F_CQ, adapter, id, data);
2377}
2378
2379/**
2380 * t3_sge_read_fl - read an SGE free-list context
2381 * @adapter: the adapter
2382 * @id: the context id
2383 * @data: holds the retrieved context
2384 *
2385 * Read an SGE free-list context. The caller is responsible for ensuring
2386 * only one context operation occurs at a time.
2387 */
2388int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2389{
2390 if (id >= SGE_QSETS * 2)
2391 return -EINVAL;
2392 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2393}
2394
2395/**
2396 * t3_sge_read_rspq - read an SGE response queue context
2397 * @adapter: the adapter
2398 * @id: the context id
2399 * @data: holds the retrieved context
2400 *
2401 * Read an SGE response queue context. The caller is responsible for
2402 * ensuring only one context operation occurs at a time.
2403 */
2404int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2405{
2406 if (id >= SGE_QSETS)
2407 return -EINVAL;
2408 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2409}
2410
2411/**
2412 * t3_config_rss - configure Rx packet steering
2413 * @adapter: the adapter
2414 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2415 * @cpus: values for the CPU lookup table (0xff terminated)
2416 * @rspq: values for the response queue lookup table (0xffff terminated)
2417 *
2418 * Programs the receive packet steering logic. @cpus and @rspq provide
2419 * the values for the CPU and response queue lookup tables. If they
2420 * provide fewer values than the size of the tables the supplied values
2421 * are used repeatedly until the tables are fully populated.
2422 */
2423void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2424 const u8 * cpus, const u16 *rspq)
2425{
2426 int i, j, cpu_idx = 0, q_idx = 0;
2427
2428 if (cpus)
2429 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2430 u32 val = i << 16;
2431
2432 for (j = 0; j < 2; ++j) {
2433 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2434 if (cpus[cpu_idx] == 0xff)
2435 cpu_idx = 0;
2436 }
2437 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2438 }
2439
2440 if (rspq)
2441 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2442 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2443 (i << 16) | rspq[q_idx++]);
2444 if (rspq[q_idx] == 0xffff)
2445 q_idx = 0;
2446 }
2447
2448 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2449}
2450
2451/**
2452 * t3_read_rss - read the contents of the RSS tables
2453 * @adapter: the adapter
2454 * @lkup: holds the contents of the RSS lookup table
2455 * @map: holds the contents of the RSS map table
2456 *
2457 * Reads the contents of the receive packet steering tables.
2458 */
2459int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2460{
2461 int i;
2462 u32 val;
2463
2464 if (lkup)
2465 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2466 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2467 0xffff0000 | i);
2468 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2469 if (!(val & 0x80000000))
2470 return -EAGAIN;
2471 *lkup++ = val;
2472 *lkup++ = (val >> 8);
2473 }
2474
2475 if (map)
2476 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2477 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2478 0xffff0000 | i);
2479 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2480 if (!(val & 0x80000000))
2481 return -EAGAIN;
2482 *map++ = val;
2483 }
2484 return 0;
2485}
2486
2487/**
2488 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2489 * @adap: the adapter
2490 * @enable: 1 to select offload mode, 0 for regular NIC
2491 *
2492 * Switches TP to NIC/offload mode.
2493 */
2494void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2495{
2496 if (is_offload(adap) || !enable)
2497 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2498 V_NICMODE(!enable));
2499}
2500
2501/**
2502 * pm_num_pages - calculate the number of pages of the payload memory
2503 * @mem_size: the size of the payload memory
2504 * @pg_size: the size of each payload memory page
2505 *
2506 * Calculate the number of pages, each of the given size, that fit in a
2507 * memory of the specified size, respecting the HW requirement that the
2508 * number of pages must be a multiple of 24.
2509 */
2510static inline unsigned int pm_num_pages(unsigned int mem_size,
2511 unsigned int pg_size)
2512{
2513 unsigned int n = mem_size / pg_size;
2514
2515 return n - n % 24;
2516}
2517
2518#define mem_region(adap, start, size, reg) \
2519 t3_write_reg((adap), A_ ## reg, (start)); \
2520 start += size
2521
b881955b 2522/**
4d22de3e
DLR
2523 * partition_mem - partition memory and configure TP memory settings
2524 * @adap: the adapter
2525 * @p: the TP parameters
2526 *
2527 * Partitions context and payload memory and configures TP's memory
2528 * registers.
2529 */
2530static void partition_mem(struct adapter *adap, const struct tp_params *p)
2531{
2532 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2533 unsigned int timers = 0, timers_shift = 22;
2534
2535 if (adap->params.rev > 0) {
2536 if (tids <= 16 * 1024) {
2537 timers = 1;
2538 timers_shift = 16;
2539 } else if (tids <= 64 * 1024) {
2540 timers = 2;
2541 timers_shift = 18;
2542 } else if (tids <= 256 * 1024) {
2543 timers = 3;
2544 timers_shift = 20;
2545 }
2546 }
2547
2548 t3_write_reg(adap, A_TP_PMM_SIZE,
2549 p->chan_rx_size | (p->chan_tx_size >> 16));
2550
2551 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2552 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2553 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2554 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2555 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2556
2557 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2558 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2559 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2560
2561 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2562 /* Add a bit of headroom and make multiple of 24 */
2563 pstructs += 48;
2564 pstructs -= pstructs % 24;
2565 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2566
2567 m = tids * TCB_SIZE;
2568 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2569 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2570 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2571 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2572 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2573 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2574 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2575 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2576
2577 m = (m + 4095) & ~0xfff;
2578 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2579 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2580
2581 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2582 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2583 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2584 if (tids < m)
2585 adap->params.mc5.nservers += m - tids;
2586}
2587
2588static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2589 u32 val)
2590{
2591 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2592 t3_write_reg(adap, A_TP_PIO_DATA, val);
2593}
2594
2595static void tp_config(struct adapter *adap, const struct tp_params *p)
2596{
4d22de3e
DLR
2597 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2598 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2599 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2600 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2601 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2602 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2603 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2604 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2605 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2606 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2607 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2608 F_IPV6ENABLE | F_NICMODE);
2609 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2610 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2611 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2612 adap->params.rev > 0 ? F_ENABLEESND :
2613 F_T3A_ENABLEESND);
4d22de3e 2614
3b1d307b 2615 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2616 F_ENABLEEPCMDAFULL,
2617 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2618 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2619 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2620 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2621 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2622 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2623 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2624
4d22de3e
DLR
2625 if (adap->params.rev > 0) {
2626 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2627 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2628 F_TXPACEAUTO);
2629 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2630 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2631 } else
2632 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2633
a2604be5
DLR
2634 if (adap->params.rev == T3_REV_C)
2635 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2636 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2637 V_TABLELATENCYDELTA(4));
2638
8a9fab22
DLR
2639 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2640 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2641 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2642 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2643}
2644
2645/* Desired TP timer resolution in usec */
2646#define TP_TMR_RES 50
2647
2648/* TCP timer values in ms */
2649#define TP_DACK_TIMER 50
2650#define TP_RTO_MIN 250
2651
2652/**
2653 * tp_set_timers - set TP timing parameters
2654 * @adap: the adapter to set
2655 * @core_clk: the core clock frequency in Hz
2656 *
2657 * Set TP's timing parameters, such as the various timer resolutions and
2658 * the TCP timer values.
2659 */
2660static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2661{
2662 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2663 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2664 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2665 unsigned int tps = core_clk >> tre;
2666
2667 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2668 V_DELAYEDACKRESOLUTION(dack_re) |
2669 V_TIMESTAMPRESOLUTION(tstamp_re));
2670 t3_write_reg(adap, A_TP_DACK_TIMER,
2671 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2672 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2673 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2674 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2675 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2676 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2677 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2678 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2679 V_KEEPALIVEMAX(9));
2680
2681#define SECONDS * tps
2682
2683 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2684 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2685 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2686 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2687 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2688 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2689 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2690 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2691 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2692
2693#undef SECONDS
2694}
2695
2696/**
2697 * t3_tp_set_coalescing_size - set receive coalescing size
2698 * @adap: the adapter
2699 * @size: the receive coalescing size
2700 * @psh: whether a set PSH bit should deliver coalesced data
2701 *
2702 * Set the receive coalescing size and PSH bit handling.
2703 */
2704int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2705{
2706 u32 val;
2707
2708 if (size > MAX_RX_COALESCING_LEN)
2709 return -EINVAL;
2710
2711 val = t3_read_reg(adap, A_TP_PARA_REG3);
2712 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2713
2714 if (size) {
2715 val |= F_RXCOALESCEENABLE;
2716 if (psh)
2717 val |= F_RXCOALESCEPSHEN;
8a9fab22 2718 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2719 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2720 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2721 }
2722 t3_write_reg(adap, A_TP_PARA_REG3, val);
2723 return 0;
2724}
2725
2726/**
2727 * t3_tp_set_max_rxsize - set the max receive size
2728 * @adap: the adapter
2729 * @size: the max receive size
2730 *
2731 * Set TP's max receive size. This is the limit that applies when
2732 * receive coalescing is disabled.
2733 */
2734void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2735{
2736 t3_write_reg(adap, A_TP_PARA_REG7,
2737 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2738}
2739
7b9b0943 2740static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2741{
2742 /*
2743 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2744 * it can accomodate max size TCP/IP headers when SACK and timestamps
2745 * are enabled and still have at least 8 bytes of payload.
2746 */
75758e8a 2747 mtus[0] = 88;
8a9fab22
DLR
2748 mtus[1] = 88;
2749 mtus[2] = 256;
2750 mtus[3] = 512;
2751 mtus[4] = 576;
4d22de3e
DLR
2752 mtus[5] = 1024;
2753 mtus[6] = 1280;
2754 mtus[7] = 1492;
2755 mtus[8] = 1500;
2756 mtus[9] = 2002;
2757 mtus[10] = 2048;
2758 mtus[11] = 4096;
2759 mtus[12] = 4352;
2760 mtus[13] = 8192;
2761 mtus[14] = 9000;
2762 mtus[15] = 9600;
2763}
2764
2765/*
2766 * Initial congestion control parameters.
2767 */
7b9b0943 2768static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2769{
2770 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2771 a[9] = 2;
2772 a[10] = 3;
2773 a[11] = 4;
2774 a[12] = 5;
2775 a[13] = 6;
2776 a[14] = 7;
2777 a[15] = 8;
2778 a[16] = 9;
2779 a[17] = 10;
2780 a[18] = 14;
2781 a[19] = 17;
2782 a[20] = 21;
2783 a[21] = 25;
2784 a[22] = 30;
2785 a[23] = 35;
2786 a[24] = 45;
2787 a[25] = 60;
2788 a[26] = 80;
2789 a[27] = 100;
2790 a[28] = 200;
2791 a[29] = 300;
2792 a[30] = 400;
2793 a[31] = 500;
2794
2795 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2796 b[9] = b[10] = 1;
2797 b[11] = b[12] = 2;
2798 b[13] = b[14] = b[15] = b[16] = 3;
2799 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2800 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2801 b[28] = b[29] = 6;
2802 b[30] = b[31] = 7;
2803}
2804
2805/* The minimum additive increment value for the congestion control table */
2806#define CC_MIN_INCR 2U
2807
2808/**
2809 * t3_load_mtus - write the MTU and congestion control HW tables
2810 * @adap: the adapter
2811 * @mtus: the unrestricted values for the MTU table
2812 * @alphs: the values for the congestion control alpha parameter
2813 * @beta: the values for the congestion control beta parameter
2814 * @mtu_cap: the maximum permitted effective MTU
2815 *
2816 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2817 * Update the high-speed congestion control table with the supplied alpha,
2818 * beta, and MTUs.
2819 */
2820void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2821 unsigned short alpha[NCCTRL_WIN],
2822 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2823{
2824 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2825 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2826 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2827 28672, 40960, 57344, 81920, 114688, 163840, 229376
2828 };
2829
2830 unsigned int i, w;
2831
2832 for (i = 0; i < NMTUS; ++i) {
2833 unsigned int mtu = min(mtus[i], mtu_cap);
2834 unsigned int log2 = fls(mtu);
2835
2836 if (!(mtu & ((1 << log2) >> 2))) /* round */
2837 log2--;
2838 t3_write_reg(adap, A_TP_MTU_TABLE,
2839 (i << 24) | (log2 << 16) | mtu);
2840
2841 for (w = 0; w < NCCTRL_WIN; ++w) {
2842 unsigned int inc;
2843
2844 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2845 CC_MIN_INCR);
2846
2847 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2848 (w << 16) | (beta[w] << 13) | inc);
2849 }
2850 }
2851}
2852
2853/**
2854 * t3_read_hw_mtus - returns the values in the HW MTU table
2855 * @adap: the adapter
2856 * @mtus: where to store the HW MTU values
2857 *
2858 * Reads the HW MTU table.
2859 */
2860void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2861{
2862 int i;
2863
2864 for (i = 0; i < NMTUS; ++i) {
2865 unsigned int val;
2866
2867 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2868 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2869 mtus[i] = val & 0x3fff;
2870 }
2871}
2872
2873/**
2874 * t3_get_cong_cntl_tab - reads the congestion control table
2875 * @adap: the adapter
2876 * @incr: where to store the alpha values
2877 *
2878 * Reads the additive increments programmed into the HW congestion
2879 * control table.
2880 */
2881void t3_get_cong_cntl_tab(struct adapter *adap,
2882 unsigned short incr[NMTUS][NCCTRL_WIN])
2883{
2884 unsigned int mtu, w;
2885
2886 for (mtu = 0; mtu < NMTUS; ++mtu)
2887 for (w = 0; w < NCCTRL_WIN; ++w) {
2888 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2889 0xffff0000 | (mtu << 5) | w);
2890 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2891 0x1fff;
2892 }
2893}
2894
2895/**
2896 * t3_tp_get_mib_stats - read TP's MIB counters
2897 * @adap: the adapter
2898 * @tps: holds the returned counter values
2899 *
2900 * Returns the values of TP's MIB counters.
2901 */
2902void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2903{
2904 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2905 sizeof(*tps) / sizeof(u32), 0);
2906}
2907
2908#define ulp_region(adap, name, start, len) \
2909 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2910 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2911 (start) + (len) - 1); \
2912 start += len
2913
2914#define ulptx_region(adap, name, start, len) \
2915 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2916 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2917 (start) + (len) - 1)
2918
2919static void ulp_config(struct adapter *adap, const struct tp_params *p)
2920{
2921 unsigned int m = p->chan_rx_size;
2922
2923 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2924 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2925 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2926 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2927 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2928 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2929 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2930 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2931}
2932
480fe1a3
DLR
2933/**
2934 * t3_set_proto_sram - set the contents of the protocol sram
2935 * @adapter: the adapter
2936 * @data: the protocol image
2937 *
2938 * Write the contents of the protocol SRAM.
2939 */
2c733a16 2940int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
2941{
2942 int i;
2c733a16 2943 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
2944
2945 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
2946 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2947 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2948 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2949 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2950 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 2951
480fe1a3
DLR
2952 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2953 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2954 return -EIO;
2955 }
2956 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2957
2958 return 0;
2959}
2960
4d22de3e
DLR
2961void t3_config_trace_filter(struct adapter *adapter,
2962 const struct trace_params *tp, int filter_index,
2963 int invert, int enable)
2964{
2965 u32 addr, key[4], mask[4];
2966
2967 key[0] = tp->sport | (tp->sip << 16);
2968 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2969 key[2] = tp->dip;
2970 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2971
2972 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2973 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2974 mask[2] = tp->dip_mask;
2975 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2976
2977 if (invert)
2978 key[3] |= (1 << 29);
2979 if (enable)
2980 key[3] |= (1 << 28);
2981
2982 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2983 tp_wr_indirect(adapter, addr++, key[0]);
2984 tp_wr_indirect(adapter, addr++, mask[0]);
2985 tp_wr_indirect(adapter, addr++, key[1]);
2986 tp_wr_indirect(adapter, addr++, mask[1]);
2987 tp_wr_indirect(adapter, addr++, key[2]);
2988 tp_wr_indirect(adapter, addr++, mask[2]);
2989 tp_wr_indirect(adapter, addr++, key[3]);
2990 tp_wr_indirect(adapter, addr, mask[3]);
2991 t3_read_reg(adapter, A_TP_PIO_DATA);
2992}
2993
2994/**
2995 * t3_config_sched - configure a HW traffic scheduler
2996 * @adap: the adapter
2997 * @kbps: target rate in Kbps
2998 * @sched: the scheduler index
2999 *
3000 * Configure a HW scheduler for the target rate
3001 */
3002int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3003{
3004 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3005 unsigned int clk = adap->params.vpd.cclk * 1000;
3006 unsigned int selected_cpt = 0, selected_bpt = 0;
3007
3008 if (kbps > 0) {
3009 kbps *= 125; /* -> bytes */
3010 for (cpt = 1; cpt <= 255; cpt++) {
3011 tps = clk / cpt;
3012 bpt = (kbps + tps / 2) / tps;
3013 if (bpt > 0 && bpt <= 255) {
3014 v = bpt * tps;
3015 delta = v >= kbps ? v - kbps : kbps - v;
3016 if (delta <= mindelta) {
3017 mindelta = delta;
3018 selected_cpt = cpt;
3019 selected_bpt = bpt;
3020 }
3021 } else if (selected_cpt)
3022 break;
3023 }
3024 if (!selected_cpt)
3025 return -EINVAL;
3026 }
3027 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3028 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3029 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3030 if (sched & 1)
3031 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3032 else
3033 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3034 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3035 return 0;
3036}
3037
3038static int tp_init(struct adapter *adap, const struct tp_params *p)
3039{
3040 int busy = 0;
3041
3042 tp_config(adap, p);
3043 t3_set_vlan_accel(adap, 3, 0);
3044
3045 if (is_offload(adap)) {
3046 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3047 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3048 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3049 0, 1000, 5);
3050 if (busy)
3051 CH_ERR(adap, "TP initialization timed out\n");
3052 }
3053
3054 if (!busy)
3055 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3056 return busy;
3057}
3058
3059int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3060{
3061 if (port_mask & ~((1 << adap->params.nports) - 1))
3062 return -EINVAL;
3063 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3064 port_mask << S_PORT0ACTIVE);
3065 return 0;
3066}
3067
3068/*
3069 * Perform the bits of HW initialization that are dependent on the number
3070 * of available ports.
3071 */
3072static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3073{
3074 int i;
3075
3076 if (nports == 1) {
3077 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3078 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3079 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3080 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 3081 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
3082 } else {
3083 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3084 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3085 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3086 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3087 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3088 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3089 F_ENFORCEPKT);
3090 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3091 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3092 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3093 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3094 for (i = 0; i < 16; i++)
3095 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3096 (i << 16) | 0x1010);
3097 }
3098}
3099
3100static int calibrate_xgm(struct adapter *adapter)
3101{
3102 if (uses_xaui(adapter)) {
3103 unsigned int v, i;
3104
3105 for (i = 0; i < 5; ++i) {
3106 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3107 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3108 msleep(1);
3109 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3110 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3111 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3112 V_XAUIIMP(G_CALIMP(v) >> 2));
3113 return 0;
3114 }
3115 }
3116 CH_ERR(adapter, "MAC calibration failed\n");
3117 return -1;
3118 } else {
3119 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3120 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3121 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3122 F_XGM_IMPSETUPDATE);
3123 }
3124 return 0;
3125}
3126
3127static void calibrate_xgm_t3b(struct adapter *adapter)
3128{
3129 if (!uses_xaui(adapter)) {
3130 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3131 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3132 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3133 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3134 F_XGM_IMPSETUPDATE);
3135 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3136 0);
3137 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3138 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3139 }
3140}
3141
3142struct mc7_timing_params {
3143 unsigned char ActToPreDly;
3144 unsigned char ActToRdWrDly;
3145 unsigned char PreCyc;
3146 unsigned char RefCyc[5];
3147 unsigned char BkCyc;
3148 unsigned char WrToRdDly;
3149 unsigned char RdToWrDly;
3150};
3151
3152/*
3153 * Write a value to a register and check that the write completed. These
3154 * writes normally complete in a cycle or two, so one read should suffice.
3155 * The very first read exists to flush the posted write to the device.
3156 */
3157static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3158{
3159 t3_write_reg(adapter, addr, val);
3160 t3_read_reg(adapter, addr); /* flush */
3161 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3162 return 0;
3163 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3164 return -EIO;
3165}
3166
3167static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3168{
3169 static const unsigned int mc7_mode[] = {
3170 0x632, 0x642, 0x652, 0x432, 0x442
3171 };
3172 static const struct mc7_timing_params mc7_timings[] = {
3173 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3174 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3175 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3176 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3177 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3178 };
3179
3180 u32 val;
3181 unsigned int width, density, slow, attempts;
3182 struct adapter *adapter = mc7->adapter;
3183 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3184
8ac3ba68
DLR
3185 if (!mc7->size)
3186 return 0;
3187
4d22de3e
DLR
3188 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3189 slow = val & F_SLOW;
3190 width = G_WIDTH(val);
3191 density = G_DEN(val);
3192
3193 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3194 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3195 msleep(1);
3196
3197 if (!slow) {
3198 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3199 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3200 msleep(1);
3201 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3202 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3203 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3204 mc7->name);
3205 goto out_fail;
3206 }
3207 }
3208
3209 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3210 V_ACTTOPREDLY(p->ActToPreDly) |
3211 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3212 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3213 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3214
3215 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3216 val | F_CLKEN | F_TERM150);
3217 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3218
3219 if (!slow)
3220 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3221 F_DLLENB);
3222 udelay(1);
3223
3224 val = slow ? 3 : 6;
3225 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3226 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3227 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3228 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3229 goto out_fail;
3230
3231 if (!slow) {
3232 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3233 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3234 udelay(5);
3235 }
3236
3237 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3238 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3239 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3240 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3241 mc7_mode[mem_type]) ||
3242 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3243 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3244 goto out_fail;
3245
3246 /* clock value is in KHz */
3247 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3248 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3249
3250 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3251 F_PERREFEN | V_PREREFDIV(mc7_clock));
3252 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3253
3254 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3255 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3256 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3257 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3258 (mc7->size << width) - 1);
3259 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3260 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3261
3262 attempts = 50;
3263 do {
3264 msleep(250);
3265 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3266 } while ((val & F_BUSY) && --attempts);
3267 if (val & F_BUSY) {
3268 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3269 goto out_fail;
3270 }
3271
3272 /* Enable normal memory accesses. */
3273 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3274 return 0;
3275
3276out_fail:
3277 return -1;
3278}
3279
3280static void config_pcie(struct adapter *adap)
3281{
3282 static const u16 ack_lat[4][6] = {
3283 {237, 416, 559, 1071, 2095, 4143},
3284 {128, 217, 289, 545, 1057, 2081},
3285 {73, 118, 154, 282, 538, 1050},
3286 {67, 107, 86, 150, 278, 534}
3287 };
3288 static const u16 rpl_tmr[4][6] = {
3289 {711, 1248, 1677, 3213, 6285, 12429},
3290 {384, 651, 867, 1635, 3171, 6243},
3291 {219, 354, 462, 846, 1614, 3150},
3292 {201, 321, 258, 450, 834, 1602}
3293 };
3294
3295 u16 val;
3296 unsigned int log2_width, pldsize;
3297 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3298
3299 pci_read_config_word(adap->pdev,
3300 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3301 &val);
3302 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3303 pci_read_config_word(adap->pdev,
3304 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3305 &val);
3306
3307 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3308 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3309 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3310 log2_width = fls(adap->params.pci.width) - 1;
3311 acklat = ack_lat[log2_width][pldsize];
3312 if (val & 1) /* check LOsEnable */
3313 acklat += fst_trn_tx * 4;
3314 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3315
3316 if (adap->params.rev == 0)
3317 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3318 V_T3A_ACKLAT(M_T3A_ACKLAT),
3319 V_T3A_ACKLAT(acklat));
3320 else
3321 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3322 V_ACKLAT(acklat));
3323
3324 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3325 V_REPLAYLMT(rpllmt));
3326
3327 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3328 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3329 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3330 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3331}
3332
3333/*
3334 * Initialize and configure T3 HW modules. This performs the
3335 * initialization steps that need to be done once after a card is reset.
3336 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3337 *
3338 * fw_params are passed to FW and their value is platform dependent. Only the
3339 * top 8 bits are available for use, the rest must be 0.
3340 */
3341int t3_init_hw(struct adapter *adapter, u32 fw_params)
3342{
b881955b 3343 int err = -EIO, attempts, i;
4d22de3e
DLR
3344 const struct vpd_params *vpd = &adapter->params.vpd;
3345
3346 if (adapter->params.rev > 0)
3347 calibrate_xgm_t3b(adapter);
3348 else if (calibrate_xgm(adapter))
3349 goto out_err;
3350
3351 if (vpd->mclk) {
3352 partition_mem(adapter, &adapter->params.tp);
3353
3354 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3355 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3356 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3357 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3358 adapter->params.mc5.nfilters,
3359 adapter->params.mc5.nroutes))
3360 goto out_err;
b881955b
DLR
3361
3362 for (i = 0; i < 32; i++)
3363 if (clear_sge_ctxt(adapter, i, F_CQ))
3364 goto out_err;
4d22de3e
DLR
3365 }
3366
3367 if (tp_init(adapter, &adapter->params.tp))
3368 goto out_err;
3369
3370 t3_tp_set_coalescing_size(adapter,
3371 min(adapter->params.sge.max_pkt_size,
3372 MAX_RX_COALESCING_LEN), 1);
3373 t3_tp_set_max_rxsize(adapter,
3374 min(adapter->params.sge.max_pkt_size, 16384U));
3375 ulp_config(adapter, &adapter->params.tp);
3376
3377 if (is_pcie(adapter))
3378 config_pcie(adapter);
3379 else
b881955b
DLR
3380 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3381 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3382
a2604be5
DLR
3383 if (adapter->params.rev == T3_REV_C)
3384 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3385 F_CFG_CQE_SOP_MASK);
3386
8a9fab22 3387 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3388 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3389 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3390 init_hw_for_avail_ports(adapter, adapter->params.nports);
3391 t3_sge_init(adapter, &adapter->params.sge);
3392
f231e0a5
DLR
3393 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3394
4d22de3e
DLR
3395 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3396 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3397 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3398 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3399
b881955b 3400 attempts = 100;
4d22de3e
DLR
3401 do { /* wait for uP to initialize */
3402 msleep(20);
3403 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3404 if (!attempts) {
3405 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3406 goto out_err;
8ac3ba68 3407 }
4d22de3e
DLR
3408
3409 err = 0;
3410out_err:
3411 return err;
3412}
3413
3414/**
3415 * get_pci_mode - determine a card's PCI mode
3416 * @adapter: the adapter
3417 * @p: where to store the PCI settings
3418 *
3419 * Determines a card's PCI mode and associated parameters, such as speed
3420 * and width.
3421 */
7b9b0943 3422static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3423{
3424 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3425 u32 pci_mode, pcie_cap;
3426
3427 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3428 if (pcie_cap) {
3429 u16 val;
3430
3431 p->variant = PCI_VARIANT_PCIE;
3432 p->pcie_cap_addr = pcie_cap;
3433 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3434 &val);
3435 p->width = (val >> 4) & 0x3f;
3436 return;
3437 }
3438
3439 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3440 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3441 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3442 pci_mode = G_PCIXINITPAT(pci_mode);
3443 if (pci_mode == 0)
3444 p->variant = PCI_VARIANT_PCI;
3445 else if (pci_mode < 4)
3446 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3447 else if (pci_mode < 8)
3448 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3449 else
3450 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3451}
3452
3453/**
3454 * init_link_config - initialize a link's SW state
3455 * @lc: structure holding the link state
3456 * @ai: information about the current card
3457 *
3458 * Initializes the SW state maintained for each link, including the link's
3459 * capabilities and default speed/duplex/flow-control/autonegotiation
3460 * settings.
3461 */
7b9b0943 3462static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3463{
3464 lc->supported = caps;
3465 lc->requested_speed = lc->speed = SPEED_INVALID;
3466 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3467 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3468 if (lc->supported & SUPPORTED_Autoneg) {
3469 lc->advertising = lc->supported;
3470 lc->autoneg = AUTONEG_ENABLE;
3471 lc->requested_fc |= PAUSE_AUTONEG;
3472 } else {
3473 lc->advertising = 0;
3474 lc->autoneg = AUTONEG_DISABLE;
3475 }
3476}
3477
3478/**
3479 * mc7_calc_size - calculate MC7 memory size
3480 * @cfg: the MC7 configuration
3481 *
3482 * Calculates the size of an MC7 memory in bytes from the value of its
3483 * configuration register.
3484 */
7b9b0943 3485static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3486{
3487 unsigned int width = G_WIDTH(cfg);
3488 unsigned int banks = !!(cfg & F_BKS) + 1;
3489 unsigned int org = !!(cfg & F_ORG) + 1;
3490 unsigned int density = G_DEN(cfg);
3491 unsigned int MBs = ((256 << density) * banks) / (org << width);
3492
3493 return MBs << 20;
3494}
3495
7b9b0943
RD
3496static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3497 unsigned int base_addr, const char *name)
4d22de3e
DLR
3498{
3499 u32 cfg;
3500
3501 mc7->adapter = adapter;
3502 mc7->name = name;
3503 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3504 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3505 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3506 mc7->width = G_WIDTH(cfg);
3507}
3508
3509void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3510{
3511 mac->adapter = adapter;
3512 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3513 mac->nucast = 1;
3514
3515 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3516 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3517 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3518 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3519 F_ENRGMII, 0);
3520 }
3521}
3522
3523void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3524{
3525 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3526
3527 mi1_init(adapter, ai);
3528 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3529 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3530 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3531 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3532 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3533 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3534
3535 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3536 val |= F_ENRGMII;
3537
3538 /* Enable MAC clocks so we can access the registers */
3539 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3540 t3_read_reg(adapter, A_XGM_PORT_CFG);
3541
3542 val |= F_CLKDIVRESET_;
3543 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3544 t3_read_reg(adapter, A_XGM_PORT_CFG);
3545 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3546 t3_read_reg(adapter, A_XGM_PORT_CFG);
3547}
3548
3549/*
2eab17ab 3550 * Reset the adapter.
e4d08359 3551 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3552 * ones don't.
3553 */
20d3fc11 3554int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3555{
2eab17ab 3556 int i, save_and_restore_pcie =
e4d08359 3557 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3558 uint16_t devid = 0;
3559
e4d08359 3560 if (save_and_restore_pcie)
4d22de3e
DLR
3561 pci_save_state(adapter->pdev);
3562 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3563
3564 /*
3565 * Delay. Give Some time to device to reset fully.
3566 * XXX The delay time should be modified.
3567 */
3568 for (i = 0; i < 10; i++) {
3569 msleep(50);
3570 pci_read_config_word(adapter->pdev, 0x00, &devid);
3571 if (devid == 0x1425)
3572 break;
3573 }
3574
3575 if (devid != 0x1425)
3576 return -1;
3577
e4d08359 3578 if (save_and_restore_pcie)
4d22de3e
DLR
3579 pci_restore_state(adapter->pdev);
3580 return 0;
3581}
3582
7b9b0943 3583static int init_parity(struct adapter *adap)
b881955b
DLR
3584{
3585 int i, err, addr;
3586
3587 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3588 return -EBUSY;
3589
3590 for (err = i = 0; !err && i < 16; i++)
3591 err = clear_sge_ctxt(adap, i, F_EGRESS);
3592 for (i = 0xfff0; !err && i <= 0xffff; i++)
3593 err = clear_sge_ctxt(adap, i, F_EGRESS);
3594 for (i = 0; !err && i < SGE_QSETS; i++)
3595 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3596 if (err)
3597 return err;
3598
3599 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3600 for (i = 0; i < 4; i++)
3601 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3602 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3603 F_IBQDBGWR | V_IBQDBGQID(i) |
3604 V_IBQDBGADDR(addr));
3605 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3606 F_IBQDBGBUSY, 0, 2, 1);
3607 if (err)
3608 return err;
3609 }
3610 return 0;
3611}
3612
4d22de3e
DLR
3613/*
3614 * Initialize adapter SW state for the various HW modules, set initial values
3615 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3616 * interface.
3617 */
7b9b0943
RD
3618int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3619 int reset)
4d22de3e
DLR
3620{
3621 int ret;
04497982 3622 unsigned int i, j = -1;
4d22de3e
DLR
3623
3624 get_pci_mode(adapter, &adapter->params.pci);
3625
3626 adapter->params.info = ai;
3627 adapter->params.nports = ai->nports;
3628 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3629 adapter->params.linkpoll_period = 0;
3630 adapter->params.stats_update_period = is_10G(adapter) ?
3631 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3632 adapter->params.pci.vpd_cap_addr =
3633 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3634 ret = get_vpd_params(adapter, &adapter->params.vpd);
3635 if (ret < 0)
3636 return ret;
3637
3638 if (reset && t3_reset_adapter(adapter))
3639 return -1;
3640
3641 t3_sge_prep(adapter, &adapter->params.sge);
3642
3643 if (adapter->params.vpd.mclk) {
3644 struct tp_params *p = &adapter->params.tp;
3645
3646 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3647 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3648 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3649
3650 p->nchan = ai->nports;
3651 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3652 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3653 p->cm_size = t3_mc7_size(&adapter->cm);
3654 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3655 p->chan_tx_size = p->pmtx_size / p->nchan;
3656 p->rx_pg_size = 64 * 1024;
3657 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3658 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3659 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3660 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3661 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3662 }
3663
3664 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3665 t3_mc7_size(&adapter->pmtx) &&
3666 t3_mc7_size(&adapter->cm);
4d22de3e 3667
8ac3ba68 3668 if (is_offload(adapter)) {
4d22de3e
DLR
3669 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3670 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3671 DEFAULT_NFILTERS : 0;
3672 adapter->params.mc5.nroutes = 0;
3673 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3674
3675 init_mtus(adapter->params.mtus);
3676 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3677 }
3678
3679 early_hw_init(adapter, ai);
b881955b
DLR
3680 ret = init_parity(adapter);
3681 if (ret)
3682 return ret;
4d22de3e
DLR
3683
3684 for_each_port(adapter, i) {
3685 u8 hw_addr[6];
04497982 3686 const struct port_type_info *pti;
4d22de3e
DLR
3687 struct port_info *p = adap2pinfo(adapter, i);
3688
04497982
DLR
3689 while (!adapter->params.vpd.port_type[++j])
3690 ;
4d22de3e 3691
04497982
DLR
3692 pti = &port_types[adapter->params.vpd.port_type[j]];
3693 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3694 ai->mdio_ops);
78e4689e
DLR
3695 if (ret)
3696 return ret;
4d22de3e 3697 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3698
3699 /*
3700 * The VPD EEPROM stores the base Ethernet address for the
3701 * card. A port's address is derived from the base by adding
3702 * the port's index to the base's low octet.
3703 */
3704 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3705 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3706
3707 memcpy(adapter->port[i]->dev_addr, hw_addr,
3708 ETH_ALEN);
3709 memcpy(adapter->port[i]->perm_addr, hw_addr,
3710 ETH_ALEN);
04497982 3711 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3712 p->phy.ops->power_down(&p->phy, 1);
04497982 3713 if (!(p->phy.caps & SUPPORTED_IRQ))
4d22de3e
DLR
3714 adapter->params.linkpoll_period = 10;
3715 }
3716
3717 return 0;
3718}
3719
3720void t3_led_ready(struct adapter *adapter)
3721{
3722 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3723 F_GPIO0_OUT_VAL);
3724}
204e2f98
DLR
3725
3726int t3_replay_prep_adapter(struct adapter *adapter)
3727{
3728 const struct adapter_info *ai = adapter->params.info;
04497982 3729 unsigned int i, j = -1;
204e2f98
DLR
3730 int ret;
3731
3732 early_hw_init(adapter, ai);
3733 ret = init_parity(adapter);
3734 if (ret)
3735 return ret;
3736
3737 for_each_port(adapter, i) {
04497982 3738 const struct port_type_info *pti;
204e2f98 3739 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3740
04497982
DLR
3741 while (!adapter->params.vpd.port_type[++j])
3742 ;
3743
3744 pti = &port_types[adapter->params.vpd.port_type[j]];
3745 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
78e4689e
DLR
3746 if (ret)
3747 return ret;
204e2f98 3748 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3749 }
3750
3751return 0;
3752}
3753