2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
43 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
59 #define TG3_VLAN_TAG_USED 0
63 #define TG3_TSO_SUPPORT 1
65 #define TG3_TSO_SUPPORT 0
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.51"
73 #define DRV_MODULE_RELDATE "Feb 21, 2006"
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
91 #define TG3_TX_TIMEOUT (5 * HZ)
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define TX_BUFFS_AVAIL(TP) \
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266 const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_ucast_packets" },
271 { "rx_mcast_packets" },
272 { "rx_bcast_packets" },
274 { "rx_align_errors" },
275 { "rx_xon_pause_rcvd" },
276 { "rx_xoff_pause_rcvd" },
277 { "rx_mac_ctrl_rcvd" },
278 { "rx_xoff_entered" },
279 { "rx_frame_too_long_errors" },
281 { "rx_undersize_packets" },
282 { "rx_in_length_errors" },
283 { "rx_out_length_errors" },
284 { "rx_64_or_less_octet_packets" },
285 { "rx_65_to_127_octet_packets" },
286 { "rx_128_to_255_octet_packets" },
287 { "rx_256_to_511_octet_packets" },
288 { "rx_512_to_1023_octet_packets" },
289 { "rx_1024_to_1522_octet_packets" },
290 { "rx_1523_to_2047_octet_packets" },
291 { "rx_2048_to_4095_octet_packets" },
292 { "rx_4096_to_8191_octet_packets" },
293 { "rx_8192_to_9022_octet_packets" },
300 { "tx_flow_control" },
302 { "tx_single_collisions" },
303 { "tx_mult_collisions" },
305 { "tx_excessive_collisions" },
306 { "tx_late_collisions" },
307 { "tx_collide_2times" },
308 { "tx_collide_3times" },
309 { "tx_collide_4times" },
310 { "tx_collide_5times" },
311 { "tx_collide_6times" },
312 { "tx_collide_7times" },
313 { "tx_collide_8times" },
314 { "tx_collide_9times" },
315 { "tx_collide_10times" },
316 { "tx_collide_11times" },
317 { "tx_collide_12times" },
318 { "tx_collide_13times" },
319 { "tx_collide_14times" },
320 { "tx_collide_15times" },
321 { "tx_ucast_packets" },
322 { "tx_mcast_packets" },
323 { "tx_bcast_packets" },
324 { "tx_carrier_sense_errors" },
328 { "dma_writeq_full" },
329 { "dma_write_prioq_full" },
333 { "rx_threshold_hit" },
335 { "dma_readq_full" },
336 { "dma_read_prioq_full" },
337 { "tx_comp_queue_full" },
339 { "ring_set_send_prod_index" },
340 { "ring_status_update" },
342 { "nic_avoided_irqs" },
343 { "nic_tx_threshold_hit" }
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349 { "nvram test (online) " },
350 { "link test (online) " },
351 { "register test (offline)" },
352 { "memory test (offline)" },
353 { "loopback test (offline)" },
354 { "interrupt test (offline)" },
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
359 writel(val, tp->regs + off);
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
364 return (readl(tp->regs + off));
367 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
371 spin_lock_irqsave(&tp->indirect_lock, flags);
372 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
373 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
374 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
379 writel(val, tp->regs + off);
380 readl(tp->regs + off);
383 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 spin_lock_irqsave(&tp->indirect_lock, flags);
389 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
390 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
391 spin_unlock_irqrestore(&tp->indirect_lock, flags);
395 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
399 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
400 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
401 TG3_64BIT_REG_LOW, val);
404 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
405 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
406 TG3_64BIT_REG_LOW, val);
410 spin_lock_irqsave(&tp->indirect_lock, flags);
411 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
412 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
413 spin_unlock_irqrestore(&tp->indirect_lock, flags);
415 /* In indirect mode when disabling interrupts, we also need
416 * to clear the interrupt bit in the GRC local ctrl register.
418 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
420 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
421 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
425 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 spin_lock_irqsave(&tp->indirect_lock, flags);
431 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
432 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
433 spin_unlock_irqrestore(&tp->indirect_lock, flags);
437 /* usec_wait specifies the wait time in usec when writing to certain registers
438 * where it is unsafe to read back the register without some delay.
439 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
440 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
442 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
444 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
445 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
446 /* Non-posted methods */
447 tp->write32(tp, off, val);
450 tg3_write32(tp, off, val);
455 /* Wait again after the read for the posted method to guarantee that
456 * the wait time is met.
462 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
464 tp->write32_mbox(tp, off, val);
465 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
466 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467 tp->read32_mbox(tp, off);
470 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
472 void __iomem *mbox = tp->regs + off;
474 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
476 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
480 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
481 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
482 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
483 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
484 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
486 #define tw32(reg,val) tp->write32(tp, reg, val)
487 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
488 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
489 #define tr32(reg) tp->read32(tp, reg)
491 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
499 /* Always leave this as zero. */
500 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
506 /* If no workaround is needed, write to mem space directly */
507 if (tp->write32 != tg3_write_indirect_reg32)
508 tw32(NIC_SRAM_WIN_BASE + off, val);
510 tg3_write_mem(tp, off, val);
513 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517 spin_lock_irqsave(&tp->indirect_lock, flags);
518 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
519 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
521 /* Always leave this as zero. */
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
526 static void tg3_disable_ints(struct tg3 *tp)
528 tw32(TG3PCI_MISC_HOST_CTRL,
529 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
533 static inline void tg3_cond_int(struct tg3 *tp)
535 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
536 (tp->hw_status->status & SD_STATUS_UPDATED))
537 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
540 static void tg3_enable_ints(struct tg3 *tp)
545 tw32(TG3PCI_MISC_HOST_CTRL,
546 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
547 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
548 (tp->last_tag << 24));
552 static inline unsigned int tg3_has_work(struct tg3 *tp)
554 struct tg3_hw_status *sblk = tp->hw_status;
555 unsigned int work_exists = 0;
557 /* check for phy events */
558 if (!(tp->tg3_flags &
559 (TG3_FLAG_USE_LINKCHG_REG |
560 TG3_FLAG_POLL_SERDES))) {
561 if (sblk->status & SD_STATUS_LINK_CHG)
564 /* check for RX/TX work to do */
565 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
566 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
573 * similar to tg3_enable_ints, but it accurately determines whether there
574 * is new work pending and can return without flushing the PIO write
575 * which reenables interrupts
577 static void tg3_restart_ints(struct tg3 *tp)
579 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
583 /* When doing tagged status, this work check is unnecessary.
584 * The last_tag we write above tells the chip which piece of
585 * work we've completed.
587 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
589 tw32(HOSTCC_MODE, tp->coalesce_mode |
590 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
593 static inline void tg3_netif_stop(struct tg3 *tp)
595 tp->dev->trans_start = jiffies; /* prevent tx timeout */
596 netif_poll_disable(tp->dev);
597 netif_tx_disable(tp->dev);
600 static inline void tg3_netif_start(struct tg3 *tp)
602 netif_wake_queue(tp->dev);
603 /* NOTE: unconditional netif_wake_queue is only appropriate
604 * so long as all callers are assured to have free tx slots
605 * (such as after tg3_init_hw)
607 netif_poll_enable(tp->dev);
608 tp->hw_status->status |= SD_STATUS_UPDATED;
612 static void tg3_switch_clocks(struct tg3 *tp)
614 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
617 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
620 orig_clock_ctrl = clock_ctrl;
621 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
622 CLOCK_CTRL_CLKRUN_OENABLE |
624 tp->pci_clock_ctrl = clock_ctrl;
626 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
627 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
628 tw32_wait_f(TG3PCI_CLOCK_CTRL,
629 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
631 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
632 tw32_wait_f(TG3PCI_CLOCK_CTRL,
634 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
636 tw32_wait_f(TG3PCI_CLOCK_CTRL,
637 clock_ctrl | (CLOCK_CTRL_ALTCLK),
640 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
643 #define PHY_BUSY_LOOPS 5000
645 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
651 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
653 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
659 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
660 MI_COM_PHY_ADDR_MASK);
661 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
662 MI_COM_REG_ADDR_MASK);
663 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
665 tw32_f(MAC_MI_COM, frame_val);
667 loops = PHY_BUSY_LOOPS;
670 frame_val = tr32(MAC_MI_COM);
672 if ((frame_val & MI_COM_BUSY) == 0) {
674 frame_val = tr32(MAC_MI_COM);
682 *val = frame_val & MI_COM_DATA_MASK;
686 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
687 tw32_f(MAC_MI_MODE, tp->mi_mode);
694 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
700 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
706 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
707 MI_COM_PHY_ADDR_MASK);
708 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
709 MI_COM_REG_ADDR_MASK);
710 frame_val |= (val & MI_COM_DATA_MASK);
711 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
713 tw32_f(MAC_MI_COM, frame_val);
715 loops = PHY_BUSY_LOOPS;
718 frame_val = tr32(MAC_MI_COM);
719 if ((frame_val & MI_COM_BUSY) == 0) {
721 frame_val = tr32(MAC_MI_COM);
731 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
732 tw32_f(MAC_MI_MODE, tp->mi_mode);
739 static void tg3_phy_set_wirespeed(struct tg3 *tp)
743 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
746 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
747 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
748 tg3_writephy(tp, MII_TG3_AUX_CTRL,
749 (val | (1 << 15) | (1 << 4)));
752 static int tg3_bmcr_reset(struct tg3 *tp)
757 /* OK, reset it, and poll the BMCR_RESET bit until it
758 * clears or we time out.
760 phy_control = BMCR_RESET;
761 err = tg3_writephy(tp, MII_BMCR, phy_control);
767 err = tg3_readphy(tp, MII_BMCR, &phy_control);
771 if ((phy_control & BMCR_RESET) == 0) {
783 static int tg3_wait_macro_done(struct tg3 *tp)
790 if (!tg3_readphy(tp, 0x16, &tmp32)) {
791 if ((tmp32 & 0x1000) == 0)
801 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
803 static const u32 test_pat[4][6] = {
804 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
805 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
806 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
807 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
811 for (chan = 0; chan < 4; chan++) {
814 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
815 (chan * 0x2000) | 0x0200);
816 tg3_writephy(tp, 0x16, 0x0002);
818 for (i = 0; i < 6; i++)
819 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
822 tg3_writephy(tp, 0x16, 0x0202);
823 if (tg3_wait_macro_done(tp)) {
828 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829 (chan * 0x2000) | 0x0200);
830 tg3_writephy(tp, 0x16, 0x0082);
831 if (tg3_wait_macro_done(tp)) {
836 tg3_writephy(tp, 0x16, 0x0802);
837 if (tg3_wait_macro_done(tp)) {
842 for (i = 0; i < 6; i += 2) {
845 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
846 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
847 tg3_wait_macro_done(tp)) {
853 if (low != test_pat[chan][i] ||
854 high != test_pat[chan][i+1]) {
855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
857 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
867 static int tg3_phy_reset_chanpat(struct tg3 *tp)
871 for (chan = 0; chan < 4; chan++) {
874 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
875 (chan * 0x2000) | 0x0200);
876 tg3_writephy(tp, 0x16, 0x0002);
877 for (i = 0; i < 6; i++)
878 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
879 tg3_writephy(tp, 0x16, 0x0202);
880 if (tg3_wait_macro_done(tp))
887 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
889 u32 reg32, phy9_orig;
890 int retries, do_phy_reset, err;
896 err = tg3_bmcr_reset(tp);
902 /* Disable transmitter and interrupt. */
903 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
907 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
909 /* Set full-duplex, 1000 mbps. */
910 tg3_writephy(tp, MII_BMCR,
911 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
913 /* Set to master mode. */
914 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
917 tg3_writephy(tp, MII_TG3_CTRL,
918 (MII_TG3_CTRL_AS_MASTER |
919 MII_TG3_CTRL_ENABLE_AS_MASTER));
921 /* Enable SM_DSP_CLOCK and 6dB. */
922 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
924 /* Block the PHY control access. */
925 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
928 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
933 err = tg3_phy_reset_chanpat(tp);
937 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
938 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
941 tg3_writephy(tp, 0x16, 0x0000);
943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
945 /* Set Extended packet length bit for jumbo frames */
946 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
952 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
954 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
956 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
963 /* This will reset the tigon3 PHY if there is no valid
964 * link unless the FORCE argument is non-zero.
966 static int tg3_phy_reset(struct tg3 *tp)
971 err = tg3_readphy(tp, MII_BMSR, &phy_status);
972 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
979 err = tg3_phy_reset_5703_4_5(tp);
985 err = tg3_bmcr_reset(tp);
990 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
991 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
993 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
994 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
995 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
996 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
998 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
999 tg3_writephy(tp, 0x1c, 0x8d68);
1000 tg3_writephy(tp, 0x1c, 0x8d68);
1002 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1003 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1004 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1005 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1006 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1008 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1009 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1010 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1012 /* Set Extended packet length bit (bit 14) on all chips that */
1013 /* support jumbo frames */
1014 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1015 /* Cannot do read-modify-write on 5401 */
1016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1017 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1020 /* Set bit 14 with read-modify-write to preserve other bits */
1021 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1022 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1023 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1026 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1027 * jumbo frames transmission.
1029 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1033 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1034 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1037 tg3_phy_set_wirespeed(tp);
1041 static void tg3_frob_aux_power(struct tg3 *tp)
1043 struct tg3 *tp_peer = tp;
1045 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1048 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1049 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1050 struct net_device *dev_peer;
1052 dev_peer = pci_get_drvdata(tp->pdev_peer);
1053 /* remove_one() may have been run on the peer. */
1057 tp_peer = netdev_priv(dev_peer);
1060 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1061 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1062 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1063 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1067 (GRC_LCLCTRL_GPIO_OE0 |
1068 GRC_LCLCTRL_GPIO_OE1 |
1069 GRC_LCLCTRL_GPIO_OE2 |
1070 GRC_LCLCTRL_GPIO_OUTPUT0 |
1071 GRC_LCLCTRL_GPIO_OUTPUT1),
1075 u32 grc_local_ctrl = 0;
1077 if (tp_peer != tp &&
1078 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1081 /* Workaround to prevent overdrawing Amps. */
1082 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1084 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1085 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1086 grc_local_ctrl, 100);
1089 /* On 5753 and variants, GPIO2 cannot be used. */
1090 no_gpio2 = tp->nic_sram_data_cfg &
1091 NIC_SRAM_DATA_CFG_NO_GPIO2;
1093 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1094 GRC_LCLCTRL_GPIO_OE1 |
1095 GRC_LCLCTRL_GPIO_OE2 |
1096 GRC_LCLCTRL_GPIO_OUTPUT1 |
1097 GRC_LCLCTRL_GPIO_OUTPUT2;
1099 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1100 GRC_LCLCTRL_GPIO_OUTPUT2);
1102 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103 grc_local_ctrl, 100);
1105 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1107 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1108 grc_local_ctrl, 100);
1111 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1112 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1113 grc_local_ctrl, 100);
1117 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1118 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1119 if (tp_peer != tp &&
1120 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1123 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124 (GRC_LCLCTRL_GPIO_OE1 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1127 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128 GRC_LCLCTRL_GPIO_OE1, 100);
1130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131 (GRC_LCLCTRL_GPIO_OE1 |
1132 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1137 static int tg3_setup_phy(struct tg3 *, int);
1139 #define RESET_KIND_SHUTDOWN 0
1140 #define RESET_KIND_INIT 1
1141 #define RESET_KIND_SUSPEND 2
1143 static void tg3_write_sig_post_reset(struct tg3 *, int);
1144 static int tg3_halt_cpu(struct tg3 *, u32);
1145 static int tg3_nvram_lock(struct tg3 *);
1146 static void tg3_nvram_unlock(struct tg3 *);
1148 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1151 u16 power_control, power_caps;
1152 int pm = tp->pm_cap;
1154 /* Make sure register accesses (indirect or otherwise)
1155 * will function correctly.
1157 pci_write_config_dword(tp->pdev,
1158 TG3PCI_MISC_HOST_CTRL,
1159 tp->misc_host_ctrl);
1161 pci_read_config_word(tp->pdev,
1164 power_control |= PCI_PM_CTRL_PME_STATUS;
1165 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1169 pci_write_config_word(tp->pdev,
1172 udelay(100); /* Delay after power state change */
1174 /* Switch out of Vaux if it is not a LOM */
1175 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1176 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1193 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1195 tp->dev->name, state);
1199 power_control |= PCI_PM_CTRL_PME_ENABLE;
1201 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1202 tw32(TG3PCI_MISC_HOST_CTRL,
1203 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1205 if (tp->link_config.phy_is_low_power == 0) {
1206 tp->link_config.phy_is_low_power = 1;
1207 tp->link_config.orig_speed = tp->link_config.speed;
1208 tp->link_config.orig_duplex = tp->link_config.duplex;
1209 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1212 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1213 tp->link_config.speed = SPEED_10;
1214 tp->link_config.duplex = DUPLEX_HALF;
1215 tp->link_config.autoneg = AUTONEG_ENABLE;
1216 tg3_setup_phy(tp, 0);
1219 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1223 for (i = 0; i < 200; i++) {
1224 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1225 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1230 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1231 WOL_DRV_STATE_SHUTDOWN |
1232 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1234 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1236 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1239 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1240 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1243 mac_mode = MAC_MODE_PORT_MODE_MII;
1245 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1246 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1247 mac_mode |= MAC_MODE_LINK_POLARITY;
1249 mac_mode = MAC_MODE_PORT_MODE_TBI;
1252 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1253 tw32(MAC_LED_CTRL, tp->led_ctrl);
1255 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1256 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1257 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1259 tw32_f(MAC_MODE, mac_mode);
1262 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1266 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1267 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1271 base_val = tp->pci_clock_ctrl;
1272 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1273 CLOCK_CTRL_TXCLK_DISABLE);
1275 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1276 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1277 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1279 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1280 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1281 u32 newbits1, newbits2;
1283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1285 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1286 CLOCK_CTRL_TXCLK_DISABLE |
1288 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1289 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1290 newbits1 = CLOCK_CTRL_625_CORE;
1291 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1293 newbits1 = CLOCK_CTRL_ALTCLK;
1294 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1297 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1300 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1303 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1308 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1309 CLOCK_CTRL_TXCLK_DISABLE |
1310 CLOCK_CTRL_44MHZ_CORE);
1312 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1315 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1316 tp->pci_clock_ctrl | newbits3, 40);
1320 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1321 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1322 /* Turn off the PHY */
1323 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1324 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1325 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1326 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1327 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1328 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1332 tg3_frob_aux_power(tp);
1334 /* Workaround for unstable PLL clock */
1335 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1336 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1337 u32 val = tr32(0x7d00);
1339 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1341 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1344 err = tg3_nvram_lock(tp);
1345 tg3_halt_cpu(tp, RX_CPU_BASE);
1347 tg3_nvram_unlock(tp);
1351 /* Finally, set the new power state. */
1352 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1353 udelay(100); /* Delay after power state change */
1355 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1360 static void tg3_link_report(struct tg3 *tp)
1362 if (!netif_carrier_ok(tp->dev)) {
1363 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1365 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1367 (tp->link_config.active_speed == SPEED_1000 ?
1369 (tp->link_config.active_speed == SPEED_100 ?
1371 (tp->link_config.active_duplex == DUPLEX_FULL ?
1374 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1377 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1378 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1382 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1384 u32 new_tg3_flags = 0;
1385 u32 old_rx_mode = tp->rx_mode;
1386 u32 old_tx_mode = tp->tx_mode;
1388 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1390 /* Convert 1000BaseX flow control bits to 1000BaseT
1391 * bits before resolving flow control.
1393 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1394 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1395 ADVERTISE_PAUSE_ASYM);
1396 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1398 if (local_adv & ADVERTISE_1000XPAUSE)
1399 local_adv |= ADVERTISE_PAUSE_CAP;
1400 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1401 local_adv |= ADVERTISE_PAUSE_ASYM;
1402 if (remote_adv & LPA_1000XPAUSE)
1403 remote_adv |= LPA_PAUSE_CAP;
1404 if (remote_adv & LPA_1000XPAUSE_ASYM)
1405 remote_adv |= LPA_PAUSE_ASYM;
1408 if (local_adv & ADVERTISE_PAUSE_CAP) {
1409 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410 if (remote_adv & LPA_PAUSE_CAP)
1412 (TG3_FLAG_RX_PAUSE |
1414 else if (remote_adv & LPA_PAUSE_ASYM)
1416 (TG3_FLAG_RX_PAUSE);
1418 if (remote_adv & LPA_PAUSE_CAP)
1420 (TG3_FLAG_RX_PAUSE |
1423 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1424 if ((remote_adv & LPA_PAUSE_CAP) &&
1425 (remote_adv & LPA_PAUSE_ASYM))
1426 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1429 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1430 tp->tg3_flags |= new_tg3_flags;
1432 new_tg3_flags = tp->tg3_flags;
1435 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1436 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1438 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1440 if (old_rx_mode != tp->rx_mode) {
1441 tw32_f(MAC_RX_MODE, tp->rx_mode);
1444 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1445 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1447 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1449 if (old_tx_mode != tp->tx_mode) {
1450 tw32_f(MAC_TX_MODE, tp->tx_mode);
1454 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1456 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1457 case MII_TG3_AUX_STAT_10HALF:
1459 *duplex = DUPLEX_HALF;
1462 case MII_TG3_AUX_STAT_10FULL:
1464 *duplex = DUPLEX_FULL;
1467 case MII_TG3_AUX_STAT_100HALF:
1469 *duplex = DUPLEX_HALF;
1472 case MII_TG3_AUX_STAT_100FULL:
1474 *duplex = DUPLEX_FULL;
1477 case MII_TG3_AUX_STAT_1000HALF:
1478 *speed = SPEED_1000;
1479 *duplex = DUPLEX_HALF;
1482 case MII_TG3_AUX_STAT_1000FULL:
1483 *speed = SPEED_1000;
1484 *duplex = DUPLEX_FULL;
1488 *speed = SPEED_INVALID;
1489 *duplex = DUPLEX_INVALID;
1494 static void tg3_phy_copper_begin(struct tg3 *tp)
1499 if (tp->link_config.phy_is_low_power) {
1500 /* Entering low power mode. Disable gigabit and
1501 * 100baseT advertisements.
1503 tg3_writephy(tp, MII_TG3_CTRL, 0);
1505 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1506 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1507 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1508 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1510 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1511 } else if (tp->link_config.speed == SPEED_INVALID) {
1512 tp->link_config.advertising =
1513 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1514 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1515 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1516 ADVERTISED_Autoneg | ADVERTISED_MII);
1518 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1519 tp->link_config.advertising &=
1520 ~(ADVERTISED_1000baseT_Half |
1521 ADVERTISED_1000baseT_Full);
1523 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1524 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1525 new_adv |= ADVERTISE_10HALF;
1526 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1527 new_adv |= ADVERTISE_10FULL;
1528 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1529 new_adv |= ADVERTISE_100HALF;
1530 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1531 new_adv |= ADVERTISE_100FULL;
1532 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1534 if (tp->link_config.advertising &
1535 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1537 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1538 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1539 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1540 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1541 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1542 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1544 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1545 MII_TG3_CTRL_ENABLE_AS_MASTER);
1546 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1548 tg3_writephy(tp, MII_TG3_CTRL, 0);
1551 /* Asking for a specific link mode. */
1552 if (tp->link_config.speed == SPEED_1000) {
1553 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1554 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1556 if (tp->link_config.duplex == DUPLEX_FULL)
1557 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1559 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1560 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1561 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1562 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1563 MII_TG3_CTRL_ENABLE_AS_MASTER);
1564 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1566 tg3_writephy(tp, MII_TG3_CTRL, 0);
1568 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1569 if (tp->link_config.speed == SPEED_100) {
1570 if (tp->link_config.duplex == DUPLEX_FULL)
1571 new_adv |= ADVERTISE_100FULL;
1573 new_adv |= ADVERTISE_100HALF;
1575 if (tp->link_config.duplex == DUPLEX_FULL)
1576 new_adv |= ADVERTISE_10FULL;
1578 new_adv |= ADVERTISE_10HALF;
1580 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1584 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1585 tp->link_config.speed != SPEED_INVALID) {
1586 u32 bmcr, orig_bmcr;
1588 tp->link_config.active_speed = tp->link_config.speed;
1589 tp->link_config.active_duplex = tp->link_config.duplex;
1592 switch (tp->link_config.speed) {
1598 bmcr |= BMCR_SPEED100;
1602 bmcr |= TG3_BMCR_SPEED1000;
1606 if (tp->link_config.duplex == DUPLEX_FULL)
1607 bmcr |= BMCR_FULLDPLX;
1609 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1610 (bmcr != orig_bmcr)) {
1611 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1612 for (i = 0; i < 1500; i++) {
1616 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1617 tg3_readphy(tp, MII_BMSR, &tmp))
1619 if (!(tmp & BMSR_LSTATUS)) {
1624 tg3_writephy(tp, MII_BMCR, bmcr);
1628 tg3_writephy(tp, MII_BMCR,
1629 BMCR_ANENABLE | BMCR_ANRESTART);
1633 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1637 /* Turn off tap power management. */
1638 /* Set Extended packet length bit */
1639 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1641 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1642 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1644 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1645 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1647 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1648 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1650 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1651 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1653 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1654 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1661 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1663 u32 adv_reg, all_mask;
1665 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1668 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1669 ADVERTISE_100HALF | ADVERTISE_100FULL);
1670 if ((adv_reg & all_mask) != all_mask)
1672 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1675 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1678 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1679 MII_TG3_CTRL_ADV_1000_FULL);
1680 if ((tg3_ctrl & all_mask) != all_mask)
1686 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1688 int current_link_up;
1697 (MAC_STATUS_SYNC_CHANGED |
1698 MAC_STATUS_CFG_CHANGED |
1699 MAC_STATUS_MI_COMPLETION |
1700 MAC_STATUS_LNKSTATE_CHANGED));
1703 tp->mi_mode = MAC_MI_MODE_BASE;
1704 tw32_f(MAC_MI_MODE, tp->mi_mode);
1707 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1709 /* Some third-party PHYs need to be reset on link going
1712 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1715 netif_carrier_ok(tp->dev)) {
1716 tg3_readphy(tp, MII_BMSR, &bmsr);
1717 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1718 !(bmsr & BMSR_LSTATUS))
1724 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1725 tg3_readphy(tp, MII_BMSR, &bmsr);
1726 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1727 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1730 if (!(bmsr & BMSR_LSTATUS)) {
1731 err = tg3_init_5401phy_dsp(tp);
1735 tg3_readphy(tp, MII_BMSR, &bmsr);
1736 for (i = 0; i < 1000; i++) {
1738 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1739 (bmsr & BMSR_LSTATUS)) {
1745 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1746 !(bmsr & BMSR_LSTATUS) &&
1747 tp->link_config.active_speed == SPEED_1000) {
1748 err = tg3_phy_reset(tp);
1750 err = tg3_init_5401phy_dsp(tp);
1755 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1756 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1757 /* 5701 {A0,B0} CRC bug workaround */
1758 tg3_writephy(tp, 0x15, 0x0a75);
1759 tg3_writephy(tp, 0x1c, 0x8c68);
1760 tg3_writephy(tp, 0x1c, 0x8d68);
1761 tg3_writephy(tp, 0x1c, 0x8c68);
1764 /* Clear pending interrupts... */
1765 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1766 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1768 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1769 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1771 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1774 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1775 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1776 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1777 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1779 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1782 current_link_up = 0;
1783 current_speed = SPEED_INVALID;
1784 current_duplex = DUPLEX_INVALID;
1786 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1790 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1791 if (!(val & (1 << 10))) {
1793 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1799 for (i = 0; i < 100; i++) {
1800 tg3_readphy(tp, MII_BMSR, &bmsr);
1801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1802 (bmsr & BMSR_LSTATUS))
1807 if (bmsr & BMSR_LSTATUS) {
1810 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1811 for (i = 0; i < 2000; i++) {
1813 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1818 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1823 for (i = 0; i < 200; i++) {
1824 tg3_readphy(tp, MII_BMCR, &bmcr);
1825 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1827 if (bmcr && bmcr != 0x7fff)
1832 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1833 if (bmcr & BMCR_ANENABLE) {
1834 current_link_up = 1;
1836 /* Force autoneg restart if we are exiting
1839 if (!tg3_copper_is_advertising_all(tp))
1840 current_link_up = 0;
1842 current_link_up = 0;
1845 if (!(bmcr & BMCR_ANENABLE) &&
1846 tp->link_config.speed == current_speed &&
1847 tp->link_config.duplex == current_duplex) {
1848 current_link_up = 1;
1850 current_link_up = 0;
1854 tp->link_config.active_speed = current_speed;
1855 tp->link_config.active_duplex = current_duplex;
1858 if (current_link_up == 1 &&
1859 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1860 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1861 u32 local_adv, remote_adv;
1863 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1865 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1867 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1870 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1872 /* If we are not advertising full pause capability,
1873 * something is wrong. Bring the link down and reconfigure.
1875 if (local_adv != ADVERTISE_PAUSE_CAP) {
1876 current_link_up = 0;
1878 tg3_setup_flow_control(tp, local_adv, remote_adv);
1882 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1885 tg3_phy_copper_begin(tp);
1887 tg3_readphy(tp, MII_BMSR, &tmp);
1888 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1889 (tmp & BMSR_LSTATUS))
1890 current_link_up = 1;
1893 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1894 if (current_link_up == 1) {
1895 if (tp->link_config.active_speed == SPEED_100 ||
1896 tp->link_config.active_speed == SPEED_10)
1897 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1899 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1901 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1903 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1904 if (tp->link_config.active_duplex == DUPLEX_HALF)
1905 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1907 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1909 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1910 (current_link_up == 1 &&
1911 tp->link_config.active_speed == SPEED_10))
1912 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1914 if (current_link_up == 1)
1915 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1918 /* ??? Without this setting Netgear GA302T PHY does not
1919 * ??? send/receive packets...
1921 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1922 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1923 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1924 tw32_f(MAC_MI_MODE, tp->mi_mode);
1928 tw32_f(MAC_MODE, tp->mac_mode);
1931 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1932 /* Polled via timer. */
1933 tw32_f(MAC_EVENT, 0);
1935 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1940 current_link_up == 1 &&
1941 tp->link_config.active_speed == SPEED_1000 &&
1942 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1943 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1946 (MAC_STATUS_SYNC_CHANGED |
1947 MAC_STATUS_CFG_CHANGED));
1950 NIC_SRAM_FIRMWARE_MBOX,
1951 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1954 if (current_link_up != netif_carrier_ok(tp->dev)) {
1955 if (current_link_up)
1956 netif_carrier_on(tp->dev);
1958 netif_carrier_off(tp->dev);
1959 tg3_link_report(tp);
1965 struct tg3_fiber_aneginfo {
1967 #define ANEG_STATE_UNKNOWN 0
1968 #define ANEG_STATE_AN_ENABLE 1
1969 #define ANEG_STATE_RESTART_INIT 2
1970 #define ANEG_STATE_RESTART 3
1971 #define ANEG_STATE_DISABLE_LINK_OK 4
1972 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1973 #define ANEG_STATE_ABILITY_DETECT 6
1974 #define ANEG_STATE_ACK_DETECT_INIT 7
1975 #define ANEG_STATE_ACK_DETECT 8
1976 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1977 #define ANEG_STATE_COMPLETE_ACK 10
1978 #define ANEG_STATE_IDLE_DETECT_INIT 11
1979 #define ANEG_STATE_IDLE_DETECT 12
1980 #define ANEG_STATE_LINK_OK 13
1981 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1982 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1985 #define MR_AN_ENABLE 0x00000001
1986 #define MR_RESTART_AN 0x00000002
1987 #define MR_AN_COMPLETE 0x00000004
1988 #define MR_PAGE_RX 0x00000008
1989 #define MR_NP_LOADED 0x00000010
1990 #define MR_TOGGLE_TX 0x00000020
1991 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1992 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1993 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1994 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1995 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1996 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1997 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1998 #define MR_TOGGLE_RX 0x00002000
1999 #define MR_NP_RX 0x00004000
2001 #define MR_LINK_OK 0x80000000
2003 unsigned long link_time, cur_time;
2005 u32 ability_match_cfg;
2006 int ability_match_count;
2008 char ability_match, idle_match, ack_match;
2010 u32 txconfig, rxconfig;
2011 #define ANEG_CFG_NP 0x00000080
2012 #define ANEG_CFG_ACK 0x00000040
2013 #define ANEG_CFG_RF2 0x00000020
2014 #define ANEG_CFG_RF1 0x00000010
2015 #define ANEG_CFG_PS2 0x00000001
2016 #define ANEG_CFG_PS1 0x00008000
2017 #define ANEG_CFG_HD 0x00004000
2018 #define ANEG_CFG_FD 0x00002000
2019 #define ANEG_CFG_INVAL 0x00001f06
2024 #define ANEG_TIMER_ENAB 2
2025 #define ANEG_FAILED -1
2027 #define ANEG_STATE_SETTLE_TIME 10000
2029 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2030 struct tg3_fiber_aneginfo *ap)
2032 unsigned long delta;
2036 if (ap->state == ANEG_STATE_UNKNOWN) {
2040 ap->ability_match_cfg = 0;
2041 ap->ability_match_count = 0;
2042 ap->ability_match = 0;
2048 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2049 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2051 if (rx_cfg_reg != ap->ability_match_cfg) {
2052 ap->ability_match_cfg = rx_cfg_reg;
2053 ap->ability_match = 0;
2054 ap->ability_match_count = 0;
2056 if (++ap->ability_match_count > 1) {
2057 ap->ability_match = 1;
2058 ap->ability_match_cfg = rx_cfg_reg;
2061 if (rx_cfg_reg & ANEG_CFG_ACK)
2069 ap->ability_match_cfg = 0;
2070 ap->ability_match_count = 0;
2071 ap->ability_match = 0;
2077 ap->rxconfig = rx_cfg_reg;
2081 case ANEG_STATE_UNKNOWN:
2082 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2083 ap->state = ANEG_STATE_AN_ENABLE;
2086 case ANEG_STATE_AN_ENABLE:
2087 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2088 if (ap->flags & MR_AN_ENABLE) {
2091 ap->ability_match_cfg = 0;
2092 ap->ability_match_count = 0;
2093 ap->ability_match = 0;
2097 ap->state = ANEG_STATE_RESTART_INIT;
2099 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2103 case ANEG_STATE_RESTART_INIT:
2104 ap->link_time = ap->cur_time;
2105 ap->flags &= ~(MR_NP_LOADED);
2107 tw32(MAC_TX_AUTO_NEG, 0);
2108 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2109 tw32_f(MAC_MODE, tp->mac_mode);
2112 ret = ANEG_TIMER_ENAB;
2113 ap->state = ANEG_STATE_RESTART;
2116 case ANEG_STATE_RESTART:
2117 delta = ap->cur_time - ap->link_time;
2118 if (delta > ANEG_STATE_SETTLE_TIME) {
2119 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2121 ret = ANEG_TIMER_ENAB;
2125 case ANEG_STATE_DISABLE_LINK_OK:
2129 case ANEG_STATE_ABILITY_DETECT_INIT:
2130 ap->flags &= ~(MR_TOGGLE_TX);
2131 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2132 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2133 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2134 tw32_f(MAC_MODE, tp->mac_mode);
2137 ap->state = ANEG_STATE_ABILITY_DETECT;
2140 case ANEG_STATE_ABILITY_DETECT:
2141 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2142 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2146 case ANEG_STATE_ACK_DETECT_INIT:
2147 ap->txconfig |= ANEG_CFG_ACK;
2148 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2153 ap->state = ANEG_STATE_ACK_DETECT;
2156 case ANEG_STATE_ACK_DETECT:
2157 if (ap->ack_match != 0) {
2158 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2159 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2160 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2162 ap->state = ANEG_STATE_AN_ENABLE;
2164 } else if (ap->ability_match != 0 &&
2165 ap->rxconfig == 0) {
2166 ap->state = ANEG_STATE_AN_ENABLE;
2170 case ANEG_STATE_COMPLETE_ACK_INIT:
2171 if (ap->rxconfig & ANEG_CFG_INVAL) {
2175 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2176 MR_LP_ADV_HALF_DUPLEX |
2177 MR_LP_ADV_SYM_PAUSE |
2178 MR_LP_ADV_ASYM_PAUSE |
2179 MR_LP_ADV_REMOTE_FAULT1 |
2180 MR_LP_ADV_REMOTE_FAULT2 |
2181 MR_LP_ADV_NEXT_PAGE |
2184 if (ap->rxconfig & ANEG_CFG_FD)
2185 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2186 if (ap->rxconfig & ANEG_CFG_HD)
2187 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2188 if (ap->rxconfig & ANEG_CFG_PS1)
2189 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2190 if (ap->rxconfig & ANEG_CFG_PS2)
2191 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2192 if (ap->rxconfig & ANEG_CFG_RF1)
2193 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2194 if (ap->rxconfig & ANEG_CFG_RF2)
2195 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2196 if (ap->rxconfig & ANEG_CFG_NP)
2197 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2199 ap->link_time = ap->cur_time;
2201 ap->flags ^= (MR_TOGGLE_TX);
2202 if (ap->rxconfig & 0x0008)
2203 ap->flags |= MR_TOGGLE_RX;
2204 if (ap->rxconfig & ANEG_CFG_NP)
2205 ap->flags |= MR_NP_RX;
2206 ap->flags |= MR_PAGE_RX;
2208 ap->state = ANEG_STATE_COMPLETE_ACK;
2209 ret = ANEG_TIMER_ENAB;
2212 case ANEG_STATE_COMPLETE_ACK:
2213 if (ap->ability_match != 0 &&
2214 ap->rxconfig == 0) {
2215 ap->state = ANEG_STATE_AN_ENABLE;
2218 delta = ap->cur_time - ap->link_time;
2219 if (delta > ANEG_STATE_SETTLE_TIME) {
2220 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2221 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2223 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2224 !(ap->flags & MR_NP_RX)) {
2225 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2233 case ANEG_STATE_IDLE_DETECT_INIT:
2234 ap->link_time = ap->cur_time;
2235 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2236 tw32_f(MAC_MODE, tp->mac_mode);
2239 ap->state = ANEG_STATE_IDLE_DETECT;
2240 ret = ANEG_TIMER_ENAB;
2243 case ANEG_STATE_IDLE_DETECT:
2244 if (ap->ability_match != 0 &&
2245 ap->rxconfig == 0) {
2246 ap->state = ANEG_STATE_AN_ENABLE;
2249 delta = ap->cur_time - ap->link_time;
2250 if (delta > ANEG_STATE_SETTLE_TIME) {
2251 /* XXX another gem from the Broadcom driver :( */
2252 ap->state = ANEG_STATE_LINK_OK;
2256 case ANEG_STATE_LINK_OK:
2257 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2261 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2262 /* ??? unimplemented */
2265 case ANEG_STATE_NEXT_PAGE_WAIT:
2266 /* ??? unimplemented */
2277 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2280 struct tg3_fiber_aneginfo aninfo;
2281 int status = ANEG_FAILED;
2285 tw32_f(MAC_TX_AUTO_NEG, 0);
2287 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2288 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2291 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2294 memset(&aninfo, 0, sizeof(aninfo));
2295 aninfo.flags |= MR_AN_ENABLE;
2296 aninfo.state = ANEG_STATE_UNKNOWN;
2297 aninfo.cur_time = 0;
2299 while (++tick < 195000) {
2300 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2301 if (status == ANEG_DONE || status == ANEG_FAILED)
2307 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2308 tw32_f(MAC_MODE, tp->mac_mode);
2311 *flags = aninfo.flags;
2313 if (status == ANEG_DONE &&
2314 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2315 MR_LP_ADV_FULL_DUPLEX)))
2321 static void tg3_init_bcm8002(struct tg3 *tp)
2323 u32 mac_status = tr32(MAC_STATUS);
2326 /* Reset when initting first time or we have a link. */
2327 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2328 !(mac_status & MAC_STATUS_PCS_SYNCED))
2331 /* Set PLL lock range. */
2332 tg3_writephy(tp, 0x16, 0x8007);
2335 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2337 /* Wait for reset to complete. */
2338 /* XXX schedule_timeout() ... */
2339 for (i = 0; i < 500; i++)
2342 /* Config mode; select PMA/Ch 1 regs. */
2343 tg3_writephy(tp, 0x10, 0x8411);
2345 /* Enable auto-lock and comdet, select txclk for tx. */
2346 tg3_writephy(tp, 0x11, 0x0a10);
2348 tg3_writephy(tp, 0x18, 0x00a0);
2349 tg3_writephy(tp, 0x16, 0x41ff);
2351 /* Assert and deassert POR. */
2352 tg3_writephy(tp, 0x13, 0x0400);
2354 tg3_writephy(tp, 0x13, 0x0000);
2356 tg3_writephy(tp, 0x11, 0x0a50);
2358 tg3_writephy(tp, 0x11, 0x0a10);
2360 /* Wait for signal to stabilize */
2361 /* XXX schedule_timeout() ... */
2362 for (i = 0; i < 15000; i++)
2365 /* Deselect the channel register so we can read the PHYID
2368 tg3_writephy(tp, 0x10, 0x8011);
2371 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2373 u32 sg_dig_ctrl, sg_dig_status;
2374 u32 serdes_cfg, expected_sg_dig_ctrl;
2375 int workaround, port_a;
2376 int current_link_up;
2379 expected_sg_dig_ctrl = 0;
2382 current_link_up = 0;
2384 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2385 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2387 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2390 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2391 /* preserve bits 20-23 for voltage regulator */
2392 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2395 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2397 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2398 if (sg_dig_ctrl & (1 << 31)) {
2400 u32 val = serdes_cfg;
2406 tw32_f(MAC_SERDES_CFG, val);
2408 tw32_f(SG_DIG_CTRL, 0x01388400);
2410 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2411 tg3_setup_flow_control(tp, 0, 0);
2412 current_link_up = 1;
2417 /* Want auto-negotiation. */
2418 expected_sg_dig_ctrl = 0x81388400;
2420 /* Pause capability */
2421 expected_sg_dig_ctrl |= (1 << 11);
2423 /* Asymettric pause */
2424 expected_sg_dig_ctrl |= (1 << 12);
2426 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2428 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2429 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2431 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2433 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2434 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2435 MAC_STATUS_SIGNAL_DET)) {
2438 /* Giver time to negotiate (~200ms) */
2439 for (i = 0; i < 40000; i++) {
2440 sg_dig_status = tr32(SG_DIG_STATUS);
2441 if (sg_dig_status & (0x3))
2445 mac_status = tr32(MAC_STATUS);
2447 if ((sg_dig_status & (1 << 1)) &&
2448 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2449 u32 local_adv, remote_adv;
2451 local_adv = ADVERTISE_PAUSE_CAP;
2453 if (sg_dig_status & (1 << 19))
2454 remote_adv |= LPA_PAUSE_CAP;
2455 if (sg_dig_status & (1 << 20))
2456 remote_adv |= LPA_PAUSE_ASYM;
2458 tg3_setup_flow_control(tp, local_adv, remote_adv);
2459 current_link_up = 1;
2460 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2461 } else if (!(sg_dig_status & (1 << 1))) {
2462 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2463 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2466 u32 val = serdes_cfg;
2473 tw32_f(MAC_SERDES_CFG, val);
2476 tw32_f(SG_DIG_CTRL, 0x01388400);
2479 /* Link parallel detection - link is up */
2480 /* only if we have PCS_SYNC and not */
2481 /* receiving config code words */
2482 mac_status = tr32(MAC_STATUS);
2483 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2484 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2485 tg3_setup_flow_control(tp, 0, 0);
2486 current_link_up = 1;
2493 return current_link_up;
2496 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2498 int current_link_up = 0;
2500 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2501 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2505 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2509 if (fiber_autoneg(tp, &flags)) {
2510 u32 local_adv, remote_adv;
2512 local_adv = ADVERTISE_PAUSE_CAP;
2514 if (flags & MR_LP_ADV_SYM_PAUSE)
2515 remote_adv |= LPA_PAUSE_CAP;
2516 if (flags & MR_LP_ADV_ASYM_PAUSE)
2517 remote_adv |= LPA_PAUSE_ASYM;
2519 tg3_setup_flow_control(tp, local_adv, remote_adv);
2521 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2522 current_link_up = 1;
2524 for (i = 0; i < 30; i++) {
2527 (MAC_STATUS_SYNC_CHANGED |
2528 MAC_STATUS_CFG_CHANGED));
2530 if ((tr32(MAC_STATUS) &
2531 (MAC_STATUS_SYNC_CHANGED |
2532 MAC_STATUS_CFG_CHANGED)) == 0)
2536 mac_status = tr32(MAC_STATUS);
2537 if (current_link_up == 0 &&
2538 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2539 !(mac_status & MAC_STATUS_RCVD_CFG))
2540 current_link_up = 1;
2542 /* Forcing 1000FD link up. */
2543 current_link_up = 1;
2544 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2546 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2551 return current_link_up;
2554 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2557 u16 orig_active_speed;
2558 u8 orig_active_duplex;
2560 int current_link_up;
2564 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2565 TG3_FLAG_TX_PAUSE));
2566 orig_active_speed = tp->link_config.active_speed;
2567 orig_active_duplex = tp->link_config.active_duplex;
2569 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2570 netif_carrier_ok(tp->dev) &&
2571 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2572 mac_status = tr32(MAC_STATUS);
2573 mac_status &= (MAC_STATUS_PCS_SYNCED |
2574 MAC_STATUS_SIGNAL_DET |
2575 MAC_STATUS_CFG_CHANGED |
2576 MAC_STATUS_RCVD_CFG);
2577 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2578 MAC_STATUS_SIGNAL_DET)) {
2579 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2580 MAC_STATUS_CFG_CHANGED));
2585 tw32_f(MAC_TX_AUTO_NEG, 0);
2587 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2588 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2589 tw32_f(MAC_MODE, tp->mac_mode);
2592 if (tp->phy_id == PHY_ID_BCM8002)
2593 tg3_init_bcm8002(tp);
2595 /* Enable link change event even when serdes polling. */
2596 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2599 current_link_up = 0;
2600 mac_status = tr32(MAC_STATUS);
2602 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2603 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2605 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2607 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2608 tw32_f(MAC_MODE, tp->mac_mode);
2611 tp->hw_status->status =
2612 (SD_STATUS_UPDATED |
2613 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2615 for (i = 0; i < 100; i++) {
2616 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2617 MAC_STATUS_CFG_CHANGED));
2619 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2620 MAC_STATUS_CFG_CHANGED)) == 0)
2624 mac_status = tr32(MAC_STATUS);
2625 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2626 current_link_up = 0;
2627 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2628 tw32_f(MAC_MODE, (tp->mac_mode |
2629 MAC_MODE_SEND_CONFIGS));
2631 tw32_f(MAC_MODE, tp->mac_mode);
2635 if (current_link_up == 1) {
2636 tp->link_config.active_speed = SPEED_1000;
2637 tp->link_config.active_duplex = DUPLEX_FULL;
2638 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2639 LED_CTRL_LNKLED_OVERRIDE |
2640 LED_CTRL_1000MBPS_ON));
2642 tp->link_config.active_speed = SPEED_INVALID;
2643 tp->link_config.active_duplex = DUPLEX_INVALID;
2644 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2645 LED_CTRL_LNKLED_OVERRIDE |
2646 LED_CTRL_TRAFFIC_OVERRIDE));
2649 if (current_link_up != netif_carrier_ok(tp->dev)) {
2650 if (current_link_up)
2651 netif_carrier_on(tp->dev);
2653 netif_carrier_off(tp->dev);
2654 tg3_link_report(tp);
2657 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2659 if (orig_pause_cfg != now_pause_cfg ||
2660 orig_active_speed != tp->link_config.active_speed ||
2661 orig_active_duplex != tp->link_config.active_duplex)
2662 tg3_link_report(tp);
2668 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2670 int current_link_up, err = 0;
2675 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2676 tw32_f(MAC_MODE, tp->mac_mode);
2682 (MAC_STATUS_SYNC_CHANGED |
2683 MAC_STATUS_CFG_CHANGED |
2684 MAC_STATUS_MI_COMPLETION |
2685 MAC_STATUS_LNKSTATE_CHANGED));
2691 current_link_up = 0;
2692 current_speed = SPEED_INVALID;
2693 current_duplex = DUPLEX_INVALID;
2695 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2698 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2699 bmsr |= BMSR_LSTATUS;
2701 bmsr &= ~BMSR_LSTATUS;
2704 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2706 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2707 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2708 /* do nothing, just check for link up at the end */
2709 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2712 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2713 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2714 ADVERTISE_1000XPAUSE |
2715 ADVERTISE_1000XPSE_ASYM |
2718 /* Always advertise symmetric PAUSE just like copper */
2719 new_adv |= ADVERTISE_1000XPAUSE;
2721 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2722 new_adv |= ADVERTISE_1000XHALF;
2723 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2724 new_adv |= ADVERTISE_1000XFULL;
2726 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2727 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2729 tg3_writephy(tp, MII_BMCR, bmcr);
2731 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2732 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2733 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2740 bmcr &= ~BMCR_SPEED1000;
2741 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2743 if (tp->link_config.duplex == DUPLEX_FULL)
2744 new_bmcr |= BMCR_FULLDPLX;
2746 if (new_bmcr != bmcr) {
2747 /* BMCR_SPEED1000 is a reserved bit that needs
2748 * to be set on write.
2750 new_bmcr |= BMCR_SPEED1000;
2752 /* Force a linkdown */
2753 if (netif_carrier_ok(tp->dev)) {
2756 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2757 adv &= ~(ADVERTISE_1000XFULL |
2758 ADVERTISE_1000XHALF |
2760 tg3_writephy(tp, MII_ADVERTISE, adv);
2761 tg3_writephy(tp, MII_BMCR, bmcr |
2765 netif_carrier_off(tp->dev);
2767 tg3_writephy(tp, MII_BMCR, new_bmcr);
2769 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2770 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2771 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2773 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2774 bmsr |= BMSR_LSTATUS;
2776 bmsr &= ~BMSR_LSTATUS;
2778 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2782 if (bmsr & BMSR_LSTATUS) {
2783 current_speed = SPEED_1000;
2784 current_link_up = 1;
2785 if (bmcr & BMCR_FULLDPLX)
2786 current_duplex = DUPLEX_FULL;
2788 current_duplex = DUPLEX_HALF;
2790 if (bmcr & BMCR_ANENABLE) {
2791 u32 local_adv, remote_adv, common;
2793 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2794 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2795 common = local_adv & remote_adv;
2796 if (common & (ADVERTISE_1000XHALF |
2797 ADVERTISE_1000XFULL)) {
2798 if (common & ADVERTISE_1000XFULL)
2799 current_duplex = DUPLEX_FULL;
2801 current_duplex = DUPLEX_HALF;
2803 tg3_setup_flow_control(tp, local_adv,
2807 current_link_up = 0;
2811 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812 if (tp->link_config.active_duplex == DUPLEX_HALF)
2813 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2815 tw32_f(MAC_MODE, tp->mac_mode);
2818 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2820 tp->link_config.active_speed = current_speed;
2821 tp->link_config.active_duplex = current_duplex;
2823 if (current_link_up != netif_carrier_ok(tp->dev)) {
2824 if (current_link_up)
2825 netif_carrier_on(tp->dev);
2827 netif_carrier_off(tp->dev);
2828 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2830 tg3_link_report(tp);
2835 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2837 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2838 /* Give autoneg time to complete. */
2839 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2842 if (!netif_carrier_ok(tp->dev) &&
2843 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2846 tg3_readphy(tp, MII_BMCR, &bmcr);
2847 if (bmcr & BMCR_ANENABLE) {
2850 /* Select shadow register 0x1f */
2851 tg3_writephy(tp, 0x1c, 0x7c00);
2852 tg3_readphy(tp, 0x1c, &phy1);
2854 /* Select expansion interrupt status register */
2855 tg3_writephy(tp, 0x17, 0x0f01);
2856 tg3_readphy(tp, 0x15, &phy2);
2857 tg3_readphy(tp, 0x15, &phy2);
2859 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2860 /* We have signal detect and not receiving
2861 * config code words, link is up by parallel
2865 bmcr &= ~BMCR_ANENABLE;
2866 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2867 tg3_writephy(tp, MII_BMCR, bmcr);
2868 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2872 else if (netif_carrier_ok(tp->dev) &&
2873 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2874 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2877 /* Select expansion interrupt status register */
2878 tg3_writephy(tp, 0x17, 0x0f01);
2879 tg3_readphy(tp, 0x15, &phy2);
2883 /* Config code words received, turn on autoneg. */
2884 tg3_readphy(tp, MII_BMCR, &bmcr);
2885 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2887 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2893 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2897 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2898 err = tg3_setup_fiber_phy(tp, force_reset);
2899 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2900 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2902 err = tg3_setup_copper_phy(tp, force_reset);
2905 if (tp->link_config.active_speed == SPEED_1000 &&
2906 tp->link_config.active_duplex == DUPLEX_HALF)
2907 tw32(MAC_TX_LENGTHS,
2908 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2909 (6 << TX_LENGTHS_IPG_SHIFT) |
2910 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2912 tw32(MAC_TX_LENGTHS,
2913 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2914 (6 << TX_LENGTHS_IPG_SHIFT) |
2915 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2917 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2918 if (netif_carrier_ok(tp->dev)) {
2919 tw32(HOSTCC_STAT_COAL_TICKS,
2920 tp->coal.stats_block_coalesce_usecs);
2922 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2929 /* Tigon3 never reports partial packet sends. So we do not
2930 * need special logic to handle SKBs that have not had all
2931 * of their frags sent yet, like SunGEM does.
2933 static void tg3_tx(struct tg3 *tp)
2935 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2936 u32 sw_idx = tp->tx_cons;
2938 while (sw_idx != hw_idx) {
2939 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2940 struct sk_buff *skb = ri->skb;
2943 if (unlikely(skb == NULL))
2946 pci_unmap_single(tp->pdev,
2947 pci_unmap_addr(ri, mapping),
2953 sw_idx = NEXT_TX(sw_idx);
2955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2956 if (unlikely(sw_idx == hw_idx))
2959 ri = &tp->tx_buffers[sw_idx];
2960 if (unlikely(ri->skb != NULL))
2963 pci_unmap_page(tp->pdev,
2964 pci_unmap_addr(ri, mapping),
2965 skb_shinfo(skb)->frags[i].size,
2968 sw_idx = NEXT_TX(sw_idx);
2974 tp->tx_cons = sw_idx;
2976 if (unlikely(netif_queue_stopped(tp->dev))) {
2977 spin_lock(&tp->tx_lock);
2978 if (netif_queue_stopped(tp->dev) &&
2979 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2980 netif_wake_queue(tp->dev);
2981 spin_unlock(&tp->tx_lock);
2985 /* Returns size of skb allocated or < 0 on error.
2987 * We only need to fill in the address because the other members
2988 * of the RX descriptor are invariant, see tg3_init_rings.
2990 * Note the purposeful assymetry of cpu vs. chip accesses. For
2991 * posting buffers we only dirty the first cache line of the RX
2992 * descriptor (containing the address). Whereas for the RX status
2993 * buffers the cpu only reads the last cacheline of the RX descriptor
2994 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2996 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2997 int src_idx, u32 dest_idx_unmasked)
2999 struct tg3_rx_buffer_desc *desc;
3000 struct ring_info *map, *src_map;
3001 struct sk_buff *skb;
3003 int skb_size, dest_idx;
3006 switch (opaque_key) {
3007 case RXD_OPAQUE_RING_STD:
3008 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3009 desc = &tp->rx_std[dest_idx];
3010 map = &tp->rx_std_buffers[dest_idx];
3012 src_map = &tp->rx_std_buffers[src_idx];
3013 skb_size = tp->rx_pkt_buf_sz;
3016 case RXD_OPAQUE_RING_JUMBO:
3017 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3018 desc = &tp->rx_jumbo[dest_idx];
3019 map = &tp->rx_jumbo_buffers[dest_idx];
3021 src_map = &tp->rx_jumbo_buffers[src_idx];
3022 skb_size = RX_JUMBO_PKT_BUF_SZ;
3029 /* Do not overwrite any of the map or rp information
3030 * until we are sure we can commit to a new buffer.
3032 * Callers depend upon this behavior and assume that
3033 * we leave everything unchanged if we fail.
3035 skb = dev_alloc_skb(skb_size);
3040 skb_reserve(skb, tp->rx_offset);
3042 mapping = pci_map_single(tp->pdev, skb->data,
3043 skb_size - tp->rx_offset,
3044 PCI_DMA_FROMDEVICE);
3047 pci_unmap_addr_set(map, mapping, mapping);
3049 if (src_map != NULL)
3050 src_map->skb = NULL;
3052 desc->addr_hi = ((u64)mapping >> 32);
3053 desc->addr_lo = ((u64)mapping & 0xffffffff);
3058 /* We only need to move over in the address because the other
3059 * members of the RX descriptor are invariant. See notes above
3060 * tg3_alloc_rx_skb for full details.
3062 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3063 int src_idx, u32 dest_idx_unmasked)
3065 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3066 struct ring_info *src_map, *dest_map;
3069 switch (opaque_key) {
3070 case RXD_OPAQUE_RING_STD:
3071 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3072 dest_desc = &tp->rx_std[dest_idx];
3073 dest_map = &tp->rx_std_buffers[dest_idx];
3074 src_desc = &tp->rx_std[src_idx];
3075 src_map = &tp->rx_std_buffers[src_idx];
3078 case RXD_OPAQUE_RING_JUMBO:
3079 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3080 dest_desc = &tp->rx_jumbo[dest_idx];
3081 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3082 src_desc = &tp->rx_jumbo[src_idx];
3083 src_map = &tp->rx_jumbo_buffers[src_idx];
3090 dest_map->skb = src_map->skb;
3091 pci_unmap_addr_set(dest_map, mapping,
3092 pci_unmap_addr(src_map, mapping));
3093 dest_desc->addr_hi = src_desc->addr_hi;
3094 dest_desc->addr_lo = src_desc->addr_lo;
3096 src_map->skb = NULL;
3099 #if TG3_VLAN_TAG_USED
3100 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3102 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3106 /* The RX ring scheme is composed of multiple rings which post fresh
3107 * buffers to the chip, and one special ring the chip uses to report
3108 * status back to the host.
3110 * The special ring reports the status of received packets to the
3111 * host. The chip does not write into the original descriptor the
3112 * RX buffer was obtained from. The chip simply takes the original
3113 * descriptor as provided by the host, updates the status and length
3114 * field, then writes this into the next status ring entry.
3116 * Each ring the host uses to post buffers to the chip is described
3117 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3118 * it is first placed into the on-chip ram. When the packet's length
3119 * is known, it walks down the TG3_BDINFO entries to select the ring.
3120 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3121 * which is within the range of the new packet's length is chosen.
3123 * The "separate ring for rx status" scheme may sound queer, but it makes
3124 * sense from a cache coherency perspective. If only the host writes
3125 * to the buffer post rings, and only the chip writes to the rx status
3126 * rings, then cache lines never move beyond shared-modified state.
3127 * If both the host and chip were to write into the same ring, cache line
3128 * eviction could occur since both entities want it in an exclusive state.
3130 static int tg3_rx(struct tg3 *tp, int budget)
3133 u32 sw_idx = tp->rx_rcb_ptr;
3137 hw_idx = tp->hw_status->idx[0].rx_producer;
3139 * We need to order the read of hw_idx and the read of
3140 * the opaque cookie.
3145 while (sw_idx != hw_idx && budget > 0) {
3146 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3148 struct sk_buff *skb;
3149 dma_addr_t dma_addr;
3150 u32 opaque_key, desc_idx, *post_ptr;
3152 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3153 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3154 if (opaque_key == RXD_OPAQUE_RING_STD) {
3155 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3157 skb = tp->rx_std_buffers[desc_idx].skb;
3158 post_ptr = &tp->rx_std_ptr;
3159 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3160 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3162 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3163 post_ptr = &tp->rx_jumbo_ptr;
3166 goto next_pkt_nopost;
3169 work_mask |= opaque_key;
3171 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3172 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3174 tg3_recycle_rx(tp, opaque_key,
3175 desc_idx, *post_ptr);
3177 /* Other statistics kept track of by card. */
3178 tp->net_stats.rx_dropped++;
3182 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3184 if (len > RX_COPY_THRESHOLD
3185 && tp->rx_offset == 2
3186 /* rx_offset != 2 iff this is a 5701 card running
3187 * in PCI-X mode [see tg3_get_invariants()] */
3191 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3192 desc_idx, *post_ptr);
3196 pci_unmap_single(tp->pdev, dma_addr,
3197 skb_size - tp->rx_offset,
3198 PCI_DMA_FROMDEVICE);
3202 struct sk_buff *copy_skb;
3204 tg3_recycle_rx(tp, opaque_key,
3205 desc_idx, *post_ptr);
3207 copy_skb = dev_alloc_skb(len + 2);
3208 if (copy_skb == NULL)
3209 goto drop_it_no_recycle;
3211 copy_skb->dev = tp->dev;
3212 skb_reserve(copy_skb, 2);
3213 skb_put(copy_skb, len);
3214 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3215 memcpy(copy_skb->data, skb->data, len);
3216 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3218 /* We'll reuse the original ring buffer. */
3222 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3223 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3224 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3225 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3226 skb->ip_summed = CHECKSUM_UNNECESSARY;
3228 skb->ip_summed = CHECKSUM_NONE;
3230 skb->protocol = eth_type_trans(skb, tp->dev);
3231 #if TG3_VLAN_TAG_USED
3232 if (tp->vlgrp != NULL &&
3233 desc->type_flags & RXD_FLAG_VLAN) {
3234 tg3_vlan_rx(tp, skb,
3235 desc->err_vlan & RXD_VLAN_MASK);
3238 netif_receive_skb(skb);
3240 tp->dev->last_rx = jiffies;
3248 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3250 /* Refresh hw_idx to see if there is new work */
3251 if (sw_idx == hw_idx) {
3252 hw_idx = tp->hw_status->idx[0].rx_producer;
3257 /* ACK the status ring. */
3258 tp->rx_rcb_ptr = sw_idx;
3259 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3261 /* Refill RX ring(s). */
3262 if (work_mask & RXD_OPAQUE_RING_STD) {
3263 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3264 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3267 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3268 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3269 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3277 static int tg3_poll(struct net_device *netdev, int *budget)
3279 struct tg3 *tp = netdev_priv(netdev);
3280 struct tg3_hw_status *sblk = tp->hw_status;
3283 /* handle link change and other phy events */
3284 if (!(tp->tg3_flags &
3285 (TG3_FLAG_USE_LINKCHG_REG |
3286 TG3_FLAG_POLL_SERDES))) {
3287 if (sblk->status & SD_STATUS_LINK_CHG) {
3288 sblk->status = SD_STATUS_UPDATED |
3289 (sblk->status & ~SD_STATUS_LINK_CHG);
3290 spin_lock(&tp->lock);
3291 tg3_setup_phy(tp, 0);
3292 spin_unlock(&tp->lock);
3296 /* run TX completion thread */
3297 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3301 /* run RX thread, within the bounds set by NAPI.
3302 * All RX "locking" is done by ensuring outside
3303 * code synchronizes with dev->poll()
3305 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3306 int orig_budget = *budget;
3309 if (orig_budget > netdev->quota)
3310 orig_budget = netdev->quota;
3312 work_done = tg3_rx(tp, orig_budget);
3314 *budget -= work_done;
3315 netdev->quota -= work_done;
3318 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3319 tp->last_tag = sblk->status_tag;
3322 sblk->status &= ~SD_STATUS_UPDATED;
3324 /* if no more work, tell net stack and NIC we're done */
3325 done = !tg3_has_work(tp);
3327 netif_rx_complete(netdev);
3328 tg3_restart_ints(tp);
3331 return (done ? 0 : 1);
3334 static void tg3_irq_quiesce(struct tg3 *tp)
3336 BUG_ON(tp->irq_sync);
3341 synchronize_irq(tp->pdev->irq);
3344 static inline int tg3_irq_sync(struct tg3 *tp)
3346 return tp->irq_sync;
3349 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3350 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3351 * with as well. Most of the time, this is not necessary except when
3352 * shutting down the device.
3354 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3357 tg3_irq_quiesce(tp);
3358 spin_lock_bh(&tp->lock);
3359 spin_lock(&tp->tx_lock);
3362 static inline void tg3_full_unlock(struct tg3 *tp)
3364 spin_unlock(&tp->tx_lock);
3365 spin_unlock_bh(&tp->lock);
3368 /* MSI ISR - No need to check for interrupt sharing and no need to
3369 * flush status block and interrupt mailbox. PCI ordering rules
3370 * guarantee that MSI will arrive after the status block.
3372 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3374 struct net_device *dev = dev_id;
3375 struct tg3 *tp = netdev_priv(dev);
3377 prefetch(tp->hw_status);
3378 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3380 * Writing any value to intr-mbox-0 clears PCI INTA# and
3381 * chip-internal interrupt pending events.
3382 * Writing non-zero to intr-mbox-0 additional tells the
3383 * NIC to stop sending us irqs, engaging "in-intr-handler"
3386 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3387 if (likely(!tg3_irq_sync(tp)))
3388 netif_rx_schedule(dev); /* schedule NAPI poll */
3390 return IRQ_RETVAL(1);
3393 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3395 struct net_device *dev = dev_id;
3396 struct tg3 *tp = netdev_priv(dev);
3397 struct tg3_hw_status *sblk = tp->hw_status;
3398 unsigned int handled = 1;
3400 /* In INTx mode, it is possible for the interrupt to arrive at
3401 * the CPU before the status block posted prior to the interrupt.
3402 * Reading the PCI State register will confirm whether the
3403 * interrupt is ours and will flush the status block.
3405 if ((sblk->status & SD_STATUS_UPDATED) ||
3406 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3408 * Writing any value to intr-mbox-0 clears PCI INTA# and
3409 * chip-internal interrupt pending events.
3410 * Writing non-zero to intr-mbox-0 additional tells the
3411 * NIC to stop sending us irqs, engaging "in-intr-handler"
3414 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3416 if (tg3_irq_sync(tp))
3418 sblk->status &= ~SD_STATUS_UPDATED;
3419 if (likely(tg3_has_work(tp))) {
3420 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3421 netif_rx_schedule(dev); /* schedule NAPI poll */
3423 /* No work, shared interrupt perhaps? re-enable
3424 * interrupts, and flush that PCI write
3426 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3429 } else { /* shared interrupt */
3433 return IRQ_RETVAL(handled);
3436 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3438 struct net_device *dev = dev_id;
3439 struct tg3 *tp = netdev_priv(dev);
3440 struct tg3_hw_status *sblk = tp->hw_status;
3441 unsigned int handled = 1;
3443 /* In INTx mode, it is possible for the interrupt to arrive at
3444 * the CPU before the status block posted prior to the interrupt.
3445 * Reading the PCI State register will confirm whether the
3446 * interrupt is ours and will flush the status block.
3448 if ((sblk->status_tag != tp->last_tag) ||
3449 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3451 * writing any value to intr-mbox-0 clears PCI INTA# and
3452 * chip-internal interrupt pending events.
3453 * writing non-zero to intr-mbox-0 additional tells the
3454 * NIC to stop sending us irqs, engaging "in-intr-handler"
3457 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3459 if (tg3_irq_sync(tp))
3461 if (netif_rx_schedule_prep(dev)) {
3462 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3463 /* Update last_tag to mark that this status has been
3464 * seen. Because interrupt may be shared, we may be
3465 * racing with tg3_poll(), so only update last_tag
3466 * if tg3_poll() is not scheduled.
3468 tp->last_tag = sblk->status_tag;
3469 __netif_rx_schedule(dev);
3471 } else { /* shared interrupt */
3475 return IRQ_RETVAL(handled);
3478 /* ISR for interrupt test */
3479 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3480 struct pt_regs *regs)
3482 struct net_device *dev = dev_id;
3483 struct tg3 *tp = netdev_priv(dev);
3484 struct tg3_hw_status *sblk = tp->hw_status;
3486 if ((sblk->status & SD_STATUS_UPDATED) ||
3487 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3488 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3490 return IRQ_RETVAL(1);
3492 return IRQ_RETVAL(0);
3495 static int tg3_init_hw(struct tg3 *);
3496 static int tg3_halt(struct tg3 *, int, int);
3498 #ifdef CONFIG_NET_POLL_CONTROLLER
3499 static void tg3_poll_controller(struct net_device *dev)
3501 struct tg3 *tp = netdev_priv(dev);
3503 tg3_interrupt(tp->pdev->irq, dev, NULL);
3507 static void tg3_reset_task(void *_data)
3509 struct tg3 *tp = _data;
3510 unsigned int restart_timer;
3512 tg3_full_lock(tp, 0);
3513 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3515 if (!netif_running(tp->dev)) {
3516 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3517 tg3_full_unlock(tp);
3521 tg3_full_unlock(tp);
3525 tg3_full_lock(tp, 1);
3527 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3528 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3530 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3533 tg3_netif_start(tp);
3536 mod_timer(&tp->timer, jiffies + 1);
3538 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3540 tg3_full_unlock(tp);
3543 static void tg3_tx_timeout(struct net_device *dev)
3545 struct tg3 *tp = netdev_priv(dev);
3547 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3550 schedule_work(&tp->reset_task);
3553 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3554 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3556 u32 base = (u32) mapping & 0xffffffff;
3558 return ((base > 0xffffdcc0) &&
3559 (base + len + 8 < base));
3562 /* Test for DMA addresses > 40-bit */
3563 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3566 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3567 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3568 return (((u64) mapping + len) > DMA_40BIT_MASK);
3575 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3577 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3578 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3579 u32 last_plus_one, u32 *start,
3580 u32 base_flags, u32 mss)
3582 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3583 dma_addr_t new_addr = 0;
3590 /* New SKB is guaranteed to be linear. */
3592 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3594 /* Make sure new skb does not cross any 4G boundaries.
3595 * Drop the packet if it does.
3597 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3599 dev_kfree_skb(new_skb);
3602 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3603 base_flags, 1 | (mss << 1));
3604 *start = NEXT_TX(entry);
3608 /* Now clean up the sw ring entries. */
3610 while (entry != last_plus_one) {
3614 len = skb_headlen(skb);
3616 len = skb_shinfo(skb)->frags[i-1].size;
3617 pci_unmap_single(tp->pdev,
3618 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3619 len, PCI_DMA_TODEVICE);
3621 tp->tx_buffers[entry].skb = new_skb;
3622 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3624 tp->tx_buffers[entry].skb = NULL;
3626 entry = NEXT_TX(entry);
3635 static void tg3_set_txd(struct tg3 *tp, int entry,
3636 dma_addr_t mapping, int len, u32 flags,
3639 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3640 int is_end = (mss_and_is_end & 0x1);
3641 u32 mss = (mss_and_is_end >> 1);
3645 flags |= TXD_FLAG_END;
3646 if (flags & TXD_FLAG_VLAN) {
3647 vlan_tag = flags >> 16;
3650 vlan_tag |= (mss << TXD_MSS_SHIFT);
3652 txd->addr_hi = ((u64) mapping >> 32);
3653 txd->addr_lo = ((u64) mapping & 0xffffffff);
3654 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3655 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3658 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3660 struct tg3 *tp = netdev_priv(dev);
3662 u32 len, entry, base_flags, mss;
3663 int would_hit_hwbug;
3665 len = skb_headlen(skb);
3667 /* No BH disabling for tx_lock here. We are running in BH disabled
3668 * context and TX reclaim runs via tp->poll inside of a software
3669 * interrupt. Furthermore, IRQ processing runs lockless so we have
3670 * no IRQ context deadlocks to worry about either. Rejoice!
3672 if (!spin_trylock(&tp->tx_lock))
3673 return NETDEV_TX_LOCKED;
3675 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3676 if (!netif_queue_stopped(dev)) {
3677 netif_stop_queue(dev);
3679 /* This is a hard error, log it. */
3680 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3681 "queue awake!\n", dev->name);
3683 spin_unlock(&tp->tx_lock);
3684 return NETDEV_TX_BUSY;
3687 entry = tp->tx_prod;
3689 if (skb->ip_summed == CHECKSUM_HW)
3690 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3691 #if TG3_TSO_SUPPORT != 0
3693 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3694 (mss = skb_shinfo(skb)->tso_size) != 0) {
3695 int tcp_opt_len, ip_tcp_len;
3697 if (skb_header_cloned(skb) &&
3698 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3703 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3704 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3706 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3707 TXD_FLAG_CPU_POST_DMA);
3709 skb->nh.iph->check = 0;
3710 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3711 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3712 skb->h.th->check = 0;
3713 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3717 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3722 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3723 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3724 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3727 tsflags = ((skb->nh.iph->ihl - 5) +
3728 (tcp_opt_len >> 2));
3729 mss |= (tsflags << 11);
3732 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3735 tsflags = ((skb->nh.iph->ihl - 5) +
3736 (tcp_opt_len >> 2));
3737 base_flags |= tsflags << 12;
3744 #if TG3_VLAN_TAG_USED
3745 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3746 base_flags |= (TXD_FLAG_VLAN |
3747 (vlan_tx_tag_get(skb) << 16));
3750 /* Queue skb data, a.k.a. the main skb fragment. */
3751 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3753 tp->tx_buffers[entry].skb = skb;
3754 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3756 would_hit_hwbug = 0;
3758 if (tg3_4g_overflow_test(mapping, len))
3759 would_hit_hwbug = 1;
3761 tg3_set_txd(tp, entry, mapping, len, base_flags,
3762 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3764 entry = NEXT_TX(entry);
3766 /* Now loop through additional data fragments, and queue them. */
3767 if (skb_shinfo(skb)->nr_frags > 0) {
3768 unsigned int i, last;
3770 last = skb_shinfo(skb)->nr_frags - 1;
3771 for (i = 0; i <= last; i++) {
3772 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3775 mapping = pci_map_page(tp->pdev,
3778 len, PCI_DMA_TODEVICE);
3780 tp->tx_buffers[entry].skb = NULL;
3781 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3783 if (tg3_4g_overflow_test(mapping, len))
3784 would_hit_hwbug = 1;
3786 if (tg3_40bit_overflow_test(tp, mapping, len))
3787 would_hit_hwbug = 1;
3789 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3790 tg3_set_txd(tp, entry, mapping, len,
3791 base_flags, (i == last)|(mss << 1));
3793 tg3_set_txd(tp, entry, mapping, len,
3794 base_flags, (i == last));
3796 entry = NEXT_TX(entry);
3800 if (would_hit_hwbug) {
3801 u32 last_plus_one = entry;
3804 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3805 start &= (TG3_TX_RING_SIZE - 1);
3807 /* If the workaround fails due to memory/mapping
3808 * failure, silently drop this packet.
3810 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3811 &start, base_flags, mss))
3817 /* Packets are ready, update Tx producer idx local and on card. */
3818 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3820 tp->tx_prod = entry;
3821 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3822 netif_stop_queue(dev);
3823 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3824 netif_wake_queue(tp->dev);
3829 spin_unlock(&tp->tx_lock);
3831 dev->trans_start = jiffies;
3833 return NETDEV_TX_OK;
3836 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3841 if (new_mtu > ETH_DATA_LEN) {
3842 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3843 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3844 ethtool_op_set_tso(dev, 0);
3847 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3849 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3850 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3851 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3855 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3857 struct tg3 *tp = netdev_priv(dev);
3859 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3862 if (!netif_running(dev)) {
3863 /* We'll just catch it later when the
3866 tg3_set_mtu(dev, tp, new_mtu);
3872 tg3_full_lock(tp, 1);
3874 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3876 tg3_set_mtu(dev, tp, new_mtu);
3880 tg3_netif_start(tp);
3882 tg3_full_unlock(tp);
3887 /* Free up pending packets in all rx/tx rings.
3889 * The chip has been shut down and the driver detached from
3890 * the networking, so no interrupts or new tx packets will
3891 * end up in the driver. tp->{tx,}lock is not held and we are not
3892 * in an interrupt context and thus may sleep.
3894 static void tg3_free_rings(struct tg3 *tp)
3896 struct ring_info *rxp;
3899 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3900 rxp = &tp->rx_std_buffers[i];
3902 if (rxp->skb == NULL)
3904 pci_unmap_single(tp->pdev,
3905 pci_unmap_addr(rxp, mapping),
3906 tp->rx_pkt_buf_sz - tp->rx_offset,
3907 PCI_DMA_FROMDEVICE);
3908 dev_kfree_skb_any(rxp->skb);
3912 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3913 rxp = &tp->rx_jumbo_buffers[i];
3915 if (rxp->skb == NULL)
3917 pci_unmap_single(tp->pdev,
3918 pci_unmap_addr(rxp, mapping),
3919 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3920 PCI_DMA_FROMDEVICE);
3921 dev_kfree_skb_any(rxp->skb);
3925 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3926 struct tx_ring_info *txp;
3927 struct sk_buff *skb;
3930 txp = &tp->tx_buffers[i];
3938 pci_unmap_single(tp->pdev,
3939 pci_unmap_addr(txp, mapping),
3946 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3947 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3948 pci_unmap_page(tp->pdev,
3949 pci_unmap_addr(txp, mapping),
3950 skb_shinfo(skb)->frags[j].size,
3955 dev_kfree_skb_any(skb);
3959 /* Initialize tx/rx rings for packet processing.
3961 * The chip has been shut down and the driver detached from
3962 * the networking, so no interrupts or new tx packets will
3963 * end up in the driver. tp->{tx,}lock are held and thus
3966 static void tg3_init_rings(struct tg3 *tp)
3970 /* Free up all the SKBs. */
3973 /* Zero out all descriptors. */
3974 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3975 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3976 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3977 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3979 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3980 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3981 (tp->dev->mtu > ETH_DATA_LEN))
3982 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3984 /* Initialize invariants of the rings, we only set this
3985 * stuff once. This works because the card does not
3986 * write into the rx buffer posting rings.
3988 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3989 struct tg3_rx_buffer_desc *rxd;
3991 rxd = &tp->rx_std[i];
3992 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3994 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3995 rxd->opaque = (RXD_OPAQUE_RING_STD |
3996 (i << RXD_OPAQUE_INDEX_SHIFT));
3999 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4000 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4001 struct tg3_rx_buffer_desc *rxd;
4003 rxd = &tp->rx_jumbo[i];
4004 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4006 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4008 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4009 (i << RXD_OPAQUE_INDEX_SHIFT));
4013 /* Now allocate fresh SKBs for each rx ring. */
4014 for (i = 0; i < tp->rx_pending; i++) {
4015 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4020 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4021 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4022 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4030 * Must not be invoked with interrupt sources disabled and
4031 * the hardware shutdown down.
4033 static void tg3_free_consistent(struct tg3 *tp)
4035 kfree(tp->rx_std_buffers);
4036 tp->rx_std_buffers = NULL;
4038 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4039 tp->rx_std, tp->rx_std_mapping);
4043 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4044 tp->rx_jumbo, tp->rx_jumbo_mapping);
4045 tp->rx_jumbo = NULL;
4048 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4049 tp->rx_rcb, tp->rx_rcb_mapping);
4053 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4054 tp->tx_ring, tp->tx_desc_mapping);
4057 if (tp->hw_status) {
4058 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4059 tp->hw_status, tp->status_mapping);
4060 tp->hw_status = NULL;
4063 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4064 tp->hw_stats, tp->stats_mapping);
4065 tp->hw_stats = NULL;
4070 * Must not be invoked with interrupt sources disabled and
4071 * the hardware shutdown down. Can sleep.
4073 static int tg3_alloc_consistent(struct tg3 *tp)
4075 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4077 TG3_RX_JUMBO_RING_SIZE)) +
4078 (sizeof(struct tx_ring_info) *
4081 if (!tp->rx_std_buffers)
4084 memset(tp->rx_std_buffers, 0,
4085 (sizeof(struct ring_info) *
4087 TG3_RX_JUMBO_RING_SIZE)) +
4088 (sizeof(struct tx_ring_info) *
4091 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4092 tp->tx_buffers = (struct tx_ring_info *)
4093 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4095 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4096 &tp->rx_std_mapping);
4100 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4101 &tp->rx_jumbo_mapping);
4106 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4107 &tp->rx_rcb_mapping);
4111 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4112 &tp->tx_desc_mapping);
4116 tp->hw_status = pci_alloc_consistent(tp->pdev,
4118 &tp->status_mapping);
4122 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4123 sizeof(struct tg3_hw_stats),
4124 &tp->stats_mapping);
4128 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4129 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4134 tg3_free_consistent(tp);
4138 #define MAX_WAIT_CNT 1000
4140 /* To stop a block, clear the enable bit and poll till it
4141 * clears. tp->lock is held.
4143 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4148 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4155 /* We can't enable/disable these bits of the
4156 * 5705/5750, just say success.
4169 for (i = 0; i < MAX_WAIT_CNT; i++) {
4172 if ((val & enable_bit) == 0)
4176 if (i == MAX_WAIT_CNT && !silent) {
4177 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4178 "ofs=%lx enable_bit=%x\n",
4186 /* tp->lock is held. */
4187 static int tg3_abort_hw(struct tg3 *tp, int silent)
4191 tg3_disable_ints(tp);
4193 tp->rx_mode &= ~RX_MODE_ENABLE;
4194 tw32_f(MAC_RX_MODE, tp->rx_mode);
4197 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4198 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4199 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4200 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4201 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4202 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4204 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4205 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4206 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4207 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4208 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4209 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4210 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4212 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4213 tw32_f(MAC_MODE, tp->mac_mode);
4216 tp->tx_mode &= ~TX_MODE_ENABLE;
4217 tw32_f(MAC_TX_MODE, tp->tx_mode);
4219 for (i = 0; i < MAX_WAIT_CNT; i++) {
4221 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4224 if (i >= MAX_WAIT_CNT) {
4225 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4226 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4227 tp->dev->name, tr32(MAC_TX_MODE));
4231 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4232 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4233 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4235 tw32(FTQ_RESET, 0xffffffff);
4236 tw32(FTQ_RESET, 0x00000000);
4238 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4239 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4242 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4244 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4249 /* tp->lock is held. */
4250 static int tg3_nvram_lock(struct tg3 *tp)
4252 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4255 if (tp->nvram_lock_cnt == 0) {
4256 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4257 for (i = 0; i < 8000; i++) {
4258 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4263 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4267 tp->nvram_lock_cnt++;
4272 /* tp->lock is held. */
4273 static void tg3_nvram_unlock(struct tg3 *tp)
4275 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4276 if (tp->nvram_lock_cnt > 0)
4277 tp->nvram_lock_cnt--;
4278 if (tp->nvram_lock_cnt == 0)
4279 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4283 /* tp->lock is held. */
4284 static void tg3_enable_nvram_access(struct tg3 *tp)
4286 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4287 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4288 u32 nvaccess = tr32(NVRAM_ACCESS);
4290 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4294 /* tp->lock is held. */
4295 static void tg3_disable_nvram_access(struct tg3 *tp)
4297 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4298 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4299 u32 nvaccess = tr32(NVRAM_ACCESS);
4301 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4305 /* tp->lock is held. */
4306 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4308 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4309 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4310 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4312 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4314 case RESET_KIND_INIT:
4315 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4319 case RESET_KIND_SHUTDOWN:
4320 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4324 case RESET_KIND_SUSPEND:
4325 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4335 /* tp->lock is held. */
4336 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4338 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4340 case RESET_KIND_INIT:
4341 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4342 DRV_STATE_START_DONE);
4345 case RESET_KIND_SHUTDOWN:
4346 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4347 DRV_STATE_UNLOAD_DONE);
4356 /* tp->lock is held. */
4357 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4359 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4361 case RESET_KIND_INIT:
4362 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4366 case RESET_KIND_SHUTDOWN:
4367 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4371 case RESET_KIND_SUSPEND:
4372 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4382 static void tg3_stop_fw(struct tg3 *);
4384 /* tp->lock is held. */
4385 static int tg3_chip_reset(struct tg3 *tp)
4388 void (*write_op)(struct tg3 *, u32, u32);
4391 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4393 /* No matching tg3_nvram_unlock() after this because
4394 * chip reset below will undo the nvram lock.
4396 tp->nvram_lock_cnt = 0;
4399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4401 tw32(GRC_FASTBOOT_PC, 0);
4404 * We must avoid the readl() that normally takes place.
4405 * It locks machines, causes machine checks, and other
4406 * fun things. So, temporarily disable the 5701
4407 * hardware workaround, while we do the reset.
4409 write_op = tp->write32;
4410 if (write_op == tg3_write_flush_reg32)
4411 tp->write32 = tg3_write32;
4414 val = GRC_MISC_CFG_CORECLK_RESET;
4416 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4417 if (tr32(0x7e2c) == 0x60) {
4420 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4421 tw32(GRC_MISC_CFG, (1 << 29));
4426 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4427 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4428 tw32(GRC_MISC_CFG, val);
4430 /* restore 5701 hardware bug workaround write method */
4431 tp->write32 = write_op;
4433 /* Unfortunately, we have to delay before the PCI read back.
4434 * Some 575X chips even will not respond to a PCI cfg access
4435 * when the reset command is given to the chip.
4437 * How do these hardware designers expect things to work
4438 * properly if the PCI write is posted for a long period
4439 * of time? It is always necessary to have some method by
4440 * which a register read back can occur to push the write
4441 * out which does the reset.
4443 * For most tg3 variants the trick below was working.
4448 /* Flush PCI posted writes. The normal MMIO registers
4449 * are inaccessible at this time so this is the only
4450 * way to make this reliably (actually, this is no longer
4451 * the case, see above). I tried to use indirect
4452 * register read/write but this upset some 5701 variants.
4454 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4458 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4459 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4463 /* Wait for link training to complete. */
4464 for (i = 0; i < 5000; i++)
4467 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4468 pci_write_config_dword(tp->pdev, 0xc4,
4469 cfg_val | (1 << 15));
4471 /* Set PCIE max payload size and clear error status. */
4472 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4475 /* Re-enable indirect register accesses. */
4476 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4477 tp->misc_host_ctrl);
4479 /* Set MAX PCI retry to zero. */
4480 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4481 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4482 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4483 val |= PCISTATE_RETRY_SAME_DMA;
4484 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4486 pci_restore_state(tp->pdev);
4488 /* Make sure PCI-X relaxed ordering bit is clear. */
4489 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4490 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4491 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4493 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4496 /* Chip reset on 5780 will reset MSI enable bit,
4497 * so need to restore it.
4499 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4502 pci_read_config_word(tp->pdev,
4503 tp->msi_cap + PCI_MSI_FLAGS,
4505 pci_write_config_word(tp->pdev,
4506 tp->msi_cap + PCI_MSI_FLAGS,
4507 ctrl | PCI_MSI_FLAGS_ENABLE);
4508 val = tr32(MSGINT_MODE);
4509 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4512 val = tr32(MEMARB_MODE);
4513 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4516 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4518 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4520 tw32(0x5000, 0x400);
4523 tw32(GRC_MODE, tp->grc_mode);
4525 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4526 u32 val = tr32(0xc4);
4528 tw32(0xc4, val | (1 << 15));
4531 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4533 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4534 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4535 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4536 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4539 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4540 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4541 tw32_f(MAC_MODE, tp->mac_mode);
4542 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4543 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4544 tw32_f(MAC_MODE, tp->mac_mode);
4546 tw32_f(MAC_MODE, 0);
4549 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4550 /* Wait for firmware initialization to complete. */
4551 for (i = 0; i < 100000; i++) {
4552 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4553 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4558 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4559 "firmware will not restart magic=%08x\n",
4560 tp->dev->name, val);
4565 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4566 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4567 u32 val = tr32(0x7c00);
4569 tw32(0x7c00, val | (1 << 25));
4572 /* Reprobe ASF enable state. */
4573 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4574 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4575 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4576 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4579 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4580 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4581 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4582 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4583 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4590 /* tp->lock is held. */
4591 static void tg3_stop_fw(struct tg3 *tp)
4593 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4597 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4598 val = tr32(GRC_RX_CPU_EVENT);
4600 tw32(GRC_RX_CPU_EVENT, val);
4602 /* Wait for RX cpu to ACK the event. */
4603 for (i = 0; i < 100; i++) {
4604 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4611 /* tp->lock is held. */
4612 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4618 tg3_write_sig_pre_reset(tp, kind);
4620 tg3_abort_hw(tp, silent);
4621 err = tg3_chip_reset(tp);
4623 tg3_write_sig_legacy(tp, kind);
4624 tg3_write_sig_post_reset(tp, kind);
4632 #define TG3_FW_RELEASE_MAJOR 0x0
4633 #define TG3_FW_RELASE_MINOR 0x0
4634 #define TG3_FW_RELEASE_FIX 0x0
4635 #define TG3_FW_START_ADDR 0x08000000
4636 #define TG3_FW_TEXT_ADDR 0x08000000
4637 #define TG3_FW_TEXT_LEN 0x9c0
4638 #define TG3_FW_RODATA_ADDR 0x080009c0
4639 #define TG3_FW_RODATA_LEN 0x60
4640 #define TG3_FW_DATA_ADDR 0x08000a40
4641 #define TG3_FW_DATA_LEN 0x20
4642 #define TG3_FW_SBSS_ADDR 0x08000a60
4643 #define TG3_FW_SBSS_LEN 0xc
4644 #define TG3_FW_BSS_ADDR 0x08000a70
4645 #define TG3_FW_BSS_LEN 0x10
4647 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4648 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4649 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4650 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4651 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4652 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4653 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4654 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4655 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4656 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4657 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4658 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4659 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4660 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4661 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4662 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4663 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4664 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4665 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4666 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4667 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4668 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4669 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4670 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4671 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4672 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4675 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4676 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4677 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4678 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4679 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4680 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4681 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4682 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4683 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4684 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4685 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4686 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4687 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4688 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4689 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4690 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4691 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4692 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4693 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4694 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4695 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4696 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4697 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4698 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4699 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4700 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4701 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4702 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4703 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4704 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4705 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4706 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4707 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4708 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4709 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4710 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4711 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4712 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4713 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4714 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4715 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4716 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4717 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4718 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4719 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4720 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4721 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4722 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4723 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4724 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4725 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4726 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4727 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4728 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4729 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4730 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4731 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4732 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4733 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4734 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4735 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4736 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4737 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4738 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4741 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4742 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4743 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4744 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4745 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4749 #if 0 /* All zeros, don't eat up space with it. */
4750 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4751 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4752 0x00000000, 0x00000000, 0x00000000, 0x00000000
4756 #define RX_CPU_SCRATCH_BASE 0x30000
4757 #define RX_CPU_SCRATCH_SIZE 0x04000
4758 #define TX_CPU_SCRATCH_BASE 0x34000
4759 #define TX_CPU_SCRATCH_SIZE 0x04000
4761 /* tp->lock is held. */
4762 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4766 if (offset == TX_CPU_BASE &&
4767 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4770 if (offset == RX_CPU_BASE) {
4771 for (i = 0; i < 10000; i++) {
4772 tw32(offset + CPU_STATE, 0xffffffff);
4773 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4774 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4778 tw32(offset + CPU_STATE, 0xffffffff);
4779 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4782 for (i = 0; i < 10000; i++) {
4783 tw32(offset + CPU_STATE, 0xffffffff);
4784 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4785 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4791 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4794 (offset == RX_CPU_BASE ? "RX" : "TX"));
4798 /* Clear firmware's nvram arbitration. */
4799 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4800 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4805 unsigned int text_base;
4806 unsigned int text_len;
4808 unsigned int rodata_base;
4809 unsigned int rodata_len;
4811 unsigned int data_base;
4812 unsigned int data_len;
4816 /* tp->lock is held. */
4817 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4818 int cpu_scratch_size, struct fw_info *info)
4820 int err, lock_err, i;
4821 void (*write_op)(struct tg3 *, u32, u32);
4823 if (cpu_base == TX_CPU_BASE &&
4824 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4825 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4826 "TX cpu firmware on %s which is 5705.\n",
4831 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4832 write_op = tg3_write_mem;
4834 write_op = tg3_write_indirect_reg32;
4836 /* It is possible that bootcode is still loading at this point.
4837 * Get the nvram lock first before halting the cpu.
4839 lock_err = tg3_nvram_lock(tp);
4840 err = tg3_halt_cpu(tp, cpu_base);
4842 tg3_nvram_unlock(tp);
4846 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4847 write_op(tp, cpu_scratch_base + i, 0);
4848 tw32(cpu_base + CPU_STATE, 0xffffffff);
4849 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4850 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4851 write_op(tp, (cpu_scratch_base +
4852 (info->text_base & 0xffff) +
4855 info->text_data[i] : 0));
4856 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4857 write_op(tp, (cpu_scratch_base +
4858 (info->rodata_base & 0xffff) +
4860 (info->rodata_data ?
4861 info->rodata_data[i] : 0));
4862 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4863 write_op(tp, (cpu_scratch_base +
4864 (info->data_base & 0xffff) +
4867 info->data_data[i] : 0));
4875 /* tp->lock is held. */
4876 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4878 struct fw_info info;
4881 info.text_base = TG3_FW_TEXT_ADDR;
4882 info.text_len = TG3_FW_TEXT_LEN;
4883 info.text_data = &tg3FwText[0];
4884 info.rodata_base = TG3_FW_RODATA_ADDR;
4885 info.rodata_len = TG3_FW_RODATA_LEN;
4886 info.rodata_data = &tg3FwRodata[0];
4887 info.data_base = TG3_FW_DATA_ADDR;
4888 info.data_len = TG3_FW_DATA_LEN;
4889 info.data_data = NULL;
4891 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4892 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4897 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4898 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4903 /* Now startup only the RX cpu. */
4904 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4905 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4907 for (i = 0; i < 5; i++) {
4908 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4910 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4911 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4912 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4916 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4917 "to set RX CPU PC, is %08x should be %08x\n",
4918 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4922 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4923 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4928 #if TG3_TSO_SUPPORT != 0
4930 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4931 #define TG3_TSO_FW_RELASE_MINOR 0x6
4932 #define TG3_TSO_FW_RELEASE_FIX 0x0
4933 #define TG3_TSO_FW_START_ADDR 0x08000000
4934 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4935 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4936 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4937 #define TG3_TSO_FW_RODATA_LEN 0x60
4938 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4939 #define TG3_TSO_FW_DATA_LEN 0x30
4940 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4941 #define TG3_TSO_FW_SBSS_LEN 0x2c
4942 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4943 #define TG3_TSO_FW_BSS_LEN 0x894
4945 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4946 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4947 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4948 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4949 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4950 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4951 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4952 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4953 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4954 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4955 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4956 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4957 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4958 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4959 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4960 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4961 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4962 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4963 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4964 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4965 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4966 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4967 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4968 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4969 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4970 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4971 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4972 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4973 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4974 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4975 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4976 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4977 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4978 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4979 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4980 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4981 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4982 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4983 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4984 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4985 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4986 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4987 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4988 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4989 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4990 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4991 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4992 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4993 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4994 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4995 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4996 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4997 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4998 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4999 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5000 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5001 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5002 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5003 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5004 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5005 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5006 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5007 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5008 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5009 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5010 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5011 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5012 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5013 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5014 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5015 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5016 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5017 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5018 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5019 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5020 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5021 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5022 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5023 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5024 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5025 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5026 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5027 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5028 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5029 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5030 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5031 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5032 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5033 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5034 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5035 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5036 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5037 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5038 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5039 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5040 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5041 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5042 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5043 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5044 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5045 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5046 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5047 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5048 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5049 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5050 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5051 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5052 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5053 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5054 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5055 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5056 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5057 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5058 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5059 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5060 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5061 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5062 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5063 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5064 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5065 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5066 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5067 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5068 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5069 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5070 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5071 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5072 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5073 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5074 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5075 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5076 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5077 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5078 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5079 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5080 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5081 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5082 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5083 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5084 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5085 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5086 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5087 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5088 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5089 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5090 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5091 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5092 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5093 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5094 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5095 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5096 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5097 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5098 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5099 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5100 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5101 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5102 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5103 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5104 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5105 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5106 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5107 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5108 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5109 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5110 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5111 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5112 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5113 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5114 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5115 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5116 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5117 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5118 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5119 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5120 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5121 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5122 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5123 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5124 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5125 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5126 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5127 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5128 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5129 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5130 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5131 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5132 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5133 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5134 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5135 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5136 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5137 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5138 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5139 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5140 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5141 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5142 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5143 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5144 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5145 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5146 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5147 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5148 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5149 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5150 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5151 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5152 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5153 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5154 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5155 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5156 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5157 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5158 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5159 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5160 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5161 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5162 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5163 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5164 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5165 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5166 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5167 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5168 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5169 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5170 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5171 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5172 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5173 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5174 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5175 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5176 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5177 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5178 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5179 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5180 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5181 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5182 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5183 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5184 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5185 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5186 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5187 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5188 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5189 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5190 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5191 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5192 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5193 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5194 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5195 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5196 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5197 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5198 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5199 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5200 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5201 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5202 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5203 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5204 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5205 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5206 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5207 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5208 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5209 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5210 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5211 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5212 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5213 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5214 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5215 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5216 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5217 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5218 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5219 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5220 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5221 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5222 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5223 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5224 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5225 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5226 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5227 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5228 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5229 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5232 static u32 tg3TsoFwRodata[] = {
5233 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5234 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5235 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5236 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5240 static u32 tg3TsoFwData[] = {
5241 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5242 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5246 /* 5705 needs a special version of the TSO firmware. */
5247 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5248 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5249 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5250 #define TG3_TSO5_FW_START_ADDR 0x00010000
5251 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5252 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5253 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5254 #define TG3_TSO5_FW_RODATA_LEN 0x50
5255 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5256 #define TG3_TSO5_FW_DATA_LEN 0x20
5257 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5258 #define TG3_TSO5_FW_SBSS_LEN 0x28
5259 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5260 #define TG3_TSO5_FW_BSS_LEN 0x88
5262 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5263 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5264 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5265 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5266 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5267 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5268 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5269 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5270 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5271 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5272 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5273 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5274 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5275 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5276 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5277 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5278 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5279 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5280 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5281 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5282 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5283 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5284 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5285 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5286 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5287 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5288 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5289 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5290 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5291 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5292 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5293 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5294 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5295 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5296 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5297 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5298 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5299 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5300 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5301 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5302 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5303 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5304 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5305 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5306 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5307 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5308 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5309 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5310 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5311 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5312 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5313 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5314 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5315 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5316 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5317 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5318 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5319 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5320 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5321 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5322 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5323 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5324 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5325 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5326 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5327 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5328 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5329 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5330 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5331 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5332 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5333 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5334 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5335 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5336 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5337 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5338 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5339 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5340 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5341 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5342 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5343 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5344 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5345 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5346 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5347 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5348 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5349 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5350 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5351 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5352 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5353 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5354 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5355 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5356 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5357 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5358 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5359 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5360 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5361 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5362 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5363 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5364 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5365 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5366 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5367 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5368 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5369 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5370 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5371 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5372 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5373 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5374 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5375 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5376 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5377 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5378 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5379 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5380 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5381 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5382 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5383 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5384 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5385 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5386 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5387 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5388 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5389 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5390 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5391 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5392 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5393 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5394 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5395 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5396 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5397 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5398 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5399 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5400 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5401 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5402 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5403 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5404 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5405 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5406 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5407 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5408 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5409 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5410 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5411 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5412 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5413 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5414 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5415 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5416 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5417 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5418 0x00000000, 0x00000000, 0x00000000,
5421 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5422 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5423 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5424 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5425 0x00000000, 0x00000000, 0x00000000,
5428 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5429 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5430 0x00000000, 0x00000000, 0x00000000,
5433 /* tp->lock is held. */
5434 static int tg3_load_tso_firmware(struct tg3 *tp)
5436 struct fw_info info;
5437 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5440 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5444 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5445 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5446 info.text_data = &tg3Tso5FwText[0];
5447 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5448 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5449 info.rodata_data = &tg3Tso5FwRodata[0];
5450 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5451 info.data_len = TG3_TSO5_FW_DATA_LEN;
5452 info.data_data = &tg3Tso5FwData[0];
5453 cpu_base = RX_CPU_BASE;
5454 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5455 cpu_scratch_size = (info.text_len +
5458 TG3_TSO5_FW_SBSS_LEN +
5459 TG3_TSO5_FW_BSS_LEN);
5461 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5462 info.text_len = TG3_TSO_FW_TEXT_LEN;
5463 info.text_data = &tg3TsoFwText[0];
5464 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5465 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5466 info.rodata_data = &tg3TsoFwRodata[0];
5467 info.data_base = TG3_TSO_FW_DATA_ADDR;
5468 info.data_len = TG3_TSO_FW_DATA_LEN;
5469 info.data_data = &tg3TsoFwData[0];
5470 cpu_base = TX_CPU_BASE;
5471 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5472 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5475 err = tg3_load_firmware_cpu(tp, cpu_base,
5476 cpu_scratch_base, cpu_scratch_size,
5481 /* Now startup the cpu. */
5482 tw32(cpu_base + CPU_STATE, 0xffffffff);
5483 tw32_f(cpu_base + CPU_PC, info.text_base);
5485 for (i = 0; i < 5; i++) {
5486 if (tr32(cpu_base + CPU_PC) == info.text_base)
5488 tw32(cpu_base + CPU_STATE, 0xffffffff);
5489 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5490 tw32_f(cpu_base + CPU_PC, info.text_base);
5494 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5495 "to set CPU PC, is %08x should be %08x\n",
5496 tp->dev->name, tr32(cpu_base + CPU_PC),
5500 tw32(cpu_base + CPU_STATE, 0xffffffff);
5501 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5505 #endif /* TG3_TSO_SUPPORT != 0 */
5507 /* tp->lock is held. */
5508 static void __tg3_set_mac_addr(struct tg3 *tp)
5510 u32 addr_high, addr_low;
5513 addr_high = ((tp->dev->dev_addr[0] << 8) |
5514 tp->dev->dev_addr[1]);
5515 addr_low = ((tp->dev->dev_addr[2] << 24) |
5516 (tp->dev->dev_addr[3] << 16) |
5517 (tp->dev->dev_addr[4] << 8) |
5518 (tp->dev->dev_addr[5] << 0));
5519 for (i = 0; i < 4; i++) {
5520 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5521 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5526 for (i = 0; i < 12; i++) {
5527 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5528 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5532 addr_high = (tp->dev->dev_addr[0] +
5533 tp->dev->dev_addr[1] +
5534 tp->dev->dev_addr[2] +
5535 tp->dev->dev_addr[3] +
5536 tp->dev->dev_addr[4] +
5537 tp->dev->dev_addr[5]) &
5538 TX_BACKOFF_SEED_MASK;
5539 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5542 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5544 struct tg3 *tp = netdev_priv(dev);
5545 struct sockaddr *addr = p;
5547 if (!is_valid_ether_addr(addr->sa_data))
5550 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5552 if (!netif_running(dev))
5555 spin_lock_bh(&tp->lock);
5556 __tg3_set_mac_addr(tp);
5557 spin_unlock_bh(&tp->lock);
5562 /* tp->lock is held. */
5563 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5564 dma_addr_t mapping, u32 maxlen_flags,
5568 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5569 ((u64) mapping >> 32));
5571 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5572 ((u64) mapping & 0xffffffff));
5574 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5577 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5579 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5583 static void __tg3_set_rx_mode(struct net_device *);
5584 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5586 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5587 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5588 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5589 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5590 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5591 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5592 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5594 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5595 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5596 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5597 u32 val = ec->stats_block_coalesce_usecs;
5599 if (!netif_carrier_ok(tp->dev))
5602 tw32(HOSTCC_STAT_COAL_TICKS, val);
5606 /* tp->lock is held. */
5607 static int tg3_reset_hw(struct tg3 *tp)
5609 u32 val, rdmac_mode;
5612 tg3_disable_ints(tp);
5616 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5618 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5619 tg3_abort_hw(tp, 1);
5622 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5625 err = tg3_chip_reset(tp);
5629 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5631 /* This works around an issue with Athlon chipsets on
5632 * B3 tigon3 silicon. This bit has no effect on any
5633 * other revision. But do not set this on PCI Express
5636 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5637 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5638 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5640 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5641 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5642 val = tr32(TG3PCI_PCISTATE);
5643 val |= PCISTATE_RETRY_SAME_DMA;
5644 tw32(TG3PCI_PCISTATE, val);
5647 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5648 /* Enable some hw fixes. */
5649 val = tr32(TG3PCI_MSI_DATA);
5650 val |= (1 << 26) | (1 << 28) | (1 << 29);
5651 tw32(TG3PCI_MSI_DATA, val);
5654 /* Descriptor ring init may make accesses to the
5655 * NIC SRAM area to setup the TX descriptors, so we
5656 * can only do this after the hardware has been
5657 * successfully reset.
5661 /* This value is determined during the probe time DMA
5662 * engine test, tg3_test_dma.
5664 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5666 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5667 GRC_MODE_4X_NIC_SEND_RINGS |
5668 GRC_MODE_NO_TX_PHDR_CSUM |
5669 GRC_MODE_NO_RX_PHDR_CSUM);
5670 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5671 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5672 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5673 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5674 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5678 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5680 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5681 val = tr32(GRC_MISC_CFG);
5683 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5684 tw32(GRC_MISC_CFG, val);
5686 /* Initialize MBUF/DESC pool. */
5687 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5689 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5690 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5692 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5694 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5695 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5696 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5698 #if TG3_TSO_SUPPORT != 0
5699 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5702 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5703 TG3_TSO5_FW_RODATA_LEN +
5704 TG3_TSO5_FW_DATA_LEN +
5705 TG3_TSO5_FW_SBSS_LEN +
5706 TG3_TSO5_FW_BSS_LEN);
5707 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5708 tw32(BUFMGR_MB_POOL_ADDR,
5709 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5710 tw32(BUFMGR_MB_POOL_SIZE,
5711 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5715 if (tp->dev->mtu <= ETH_DATA_LEN) {
5716 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5717 tp->bufmgr_config.mbuf_read_dma_low_water);
5718 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5719 tp->bufmgr_config.mbuf_mac_rx_low_water);
5720 tw32(BUFMGR_MB_HIGH_WATER,
5721 tp->bufmgr_config.mbuf_high_water);
5723 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5724 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5725 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5726 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5727 tw32(BUFMGR_MB_HIGH_WATER,
5728 tp->bufmgr_config.mbuf_high_water_jumbo);
5730 tw32(BUFMGR_DMA_LOW_WATER,
5731 tp->bufmgr_config.dma_low_water);
5732 tw32(BUFMGR_DMA_HIGH_WATER,
5733 tp->bufmgr_config.dma_high_water);
5735 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5736 for (i = 0; i < 2000; i++) {
5737 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5742 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5747 /* Setup replenish threshold. */
5748 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5750 /* Initialize TG3_BDINFO's at:
5751 * RCVDBDI_STD_BD: standard eth size rx ring
5752 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5753 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5756 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5757 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5758 * ring attribute flags
5759 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5761 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5762 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5764 * The size of each ring is fixed in the firmware, but the location is
5767 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5768 ((u64) tp->rx_std_mapping >> 32));
5769 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5770 ((u64) tp->rx_std_mapping & 0xffffffff));
5771 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5772 NIC_SRAM_RX_BUFFER_DESC);
5774 /* Don't even try to program the JUMBO/MINI buffer descriptor
5777 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5778 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5779 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5781 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5782 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5784 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5785 BDINFO_FLAGS_DISABLED);
5787 /* Setup replenish threshold. */
5788 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5790 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5791 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5792 ((u64) tp->rx_jumbo_mapping >> 32));
5793 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5794 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5795 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5796 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5797 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5798 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5800 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5801 BDINFO_FLAGS_DISABLED);
5806 /* There is only one send ring on 5705/5750, no need to explicitly
5807 * disable the others.
5809 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5810 /* Clear out send RCB ring in SRAM. */
5811 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5812 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5813 BDINFO_FLAGS_DISABLED);
5818 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5819 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5821 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5822 tp->tx_desc_mapping,
5823 (TG3_TX_RING_SIZE <<
5824 BDINFO_FLAGS_MAXLEN_SHIFT),
5825 NIC_SRAM_TX_BUFFER_DESC);
5827 /* There is only one receive return ring on 5705/5750, no need
5828 * to explicitly disable the others.
5830 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5831 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5832 i += TG3_BDINFO_SIZE) {
5833 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5834 BDINFO_FLAGS_DISABLED);
5839 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5841 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5843 (TG3_RX_RCB_RING_SIZE(tp) <<
5844 BDINFO_FLAGS_MAXLEN_SHIFT),
5847 tp->rx_std_ptr = tp->rx_pending;
5848 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5851 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5852 tp->rx_jumbo_pending : 0;
5853 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5856 /* Initialize MAC address and backoff seed. */
5857 __tg3_set_mac_addr(tp);
5859 /* MTU + ethernet header + FCS + optional VLAN tag */
5860 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5862 /* The slot time is changed by tg3_setup_phy if we
5863 * run at gigabit with half duplex.
5865 tw32(MAC_TX_LENGTHS,
5866 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5867 (6 << TX_LENGTHS_IPG_SHIFT) |
5868 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5870 /* Receive rules. */
5871 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5872 tw32(RCVLPC_CONFIG, 0x0181);
5874 /* Calculate RDMAC_MODE setting early, we need it to determine
5875 * the RCVLPC_STATE_ENABLE mask.
5877 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5878 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5879 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5880 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5881 RDMAC_MODE_LNGREAD_ENAB);
5882 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5883 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5885 /* If statement applies to 5705 and 5750 PCI devices only */
5886 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5887 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5888 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5889 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5890 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5891 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5892 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5893 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5894 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5895 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5899 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5900 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5902 #if TG3_TSO_SUPPORT != 0
5903 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5904 rdmac_mode |= (1 << 27);
5907 /* Receive/send statistics. */
5908 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5909 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5910 val = tr32(RCVLPC_STATS_ENABLE);
5911 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5912 tw32(RCVLPC_STATS_ENABLE, val);
5914 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5916 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5917 tw32(SNDDATAI_STATSENAB, 0xffffff);
5918 tw32(SNDDATAI_STATSCTRL,
5919 (SNDDATAI_SCTRL_ENABLE |
5920 SNDDATAI_SCTRL_FASTUPD));
5922 /* Setup host coalescing engine. */
5923 tw32(HOSTCC_MODE, 0);
5924 for (i = 0; i < 2000; i++) {
5925 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5930 __tg3_set_coalesce(tp, &tp->coal);
5932 /* set status block DMA address */
5933 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5934 ((u64) tp->status_mapping >> 32));
5935 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5936 ((u64) tp->status_mapping & 0xffffffff));
5938 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5939 /* Status/statistics block address. See tg3_timer,
5940 * the tg3_periodic_fetch_stats call there, and
5941 * tg3_get_stats to see how this works for 5705/5750 chips.
5943 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5944 ((u64) tp->stats_mapping >> 32));
5945 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5946 ((u64) tp->stats_mapping & 0xffffffff));
5947 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5948 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5951 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5953 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5954 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5955 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5956 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5958 /* Clear statistics/status block in chip, and status block in ram. */
5959 for (i = NIC_SRAM_STATS_BLK;
5960 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5962 tg3_write_mem(tp, i, 0);
5965 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5967 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5968 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5969 /* reset to prevent losing 1st rx packet intermittently */
5970 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5974 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5975 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5976 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5979 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5980 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5981 * register to preserve the GPIO settings for LOMs. The GPIOs,
5982 * whether used as inputs or outputs, are set by boot code after
5985 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5988 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5989 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5992 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5993 GRC_LCLCTRL_GPIO_OUTPUT3;
5995 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5997 /* GPIO1 must be driven high for eeprom write protect */
5998 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5999 GRC_LCLCTRL_GPIO_OUTPUT1);
6001 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6004 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6007 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6008 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6012 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6013 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6014 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6015 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6016 WDMAC_MODE_LNGREAD_ENAB);
6018 /* If statement applies to 5705 and 5750 PCI devices only */
6019 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6020 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6022 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6023 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6024 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6026 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6027 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6028 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6029 val |= WDMAC_MODE_RX_ACCEL;
6033 /* Enable host coalescing bug fix */
6034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
6037 tw32_f(WDMAC_MODE, val);
6040 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6041 val = tr32(TG3PCI_X_CAPS);
6042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6043 val &= ~PCIX_CAPS_BURST_MASK;
6044 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6045 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6046 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6047 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6048 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6049 val |= (tp->split_mode_max_reqs <<
6050 PCIX_CAPS_SPLIT_SHIFT);
6052 tw32(TG3PCI_X_CAPS, val);
6055 tw32_f(RDMAC_MODE, rdmac_mode);
6058 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6059 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6060 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6061 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6062 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6063 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6064 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6065 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6066 #if TG3_TSO_SUPPORT != 0
6067 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6068 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6070 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6071 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6073 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6074 err = tg3_load_5701_a0_firmware_fix(tp);
6079 #if TG3_TSO_SUPPORT != 0
6080 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6081 err = tg3_load_tso_firmware(tp);
6087 tp->tx_mode = TX_MODE_ENABLE;
6088 tw32_f(MAC_TX_MODE, tp->tx_mode);
6091 tp->rx_mode = RX_MODE_ENABLE;
6092 tw32_f(MAC_RX_MODE, tp->rx_mode);
6095 if (tp->link_config.phy_is_low_power) {
6096 tp->link_config.phy_is_low_power = 0;
6097 tp->link_config.speed = tp->link_config.orig_speed;
6098 tp->link_config.duplex = tp->link_config.orig_duplex;
6099 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6102 tp->mi_mode = MAC_MI_MODE_BASE;
6103 tw32_f(MAC_MI_MODE, tp->mi_mode);
6106 tw32(MAC_LED_CTRL, tp->led_ctrl);
6108 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6109 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6110 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6113 tw32_f(MAC_RX_MODE, tp->rx_mode);
6116 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6117 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6118 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6119 /* Set drive transmission level to 1.2V */
6120 /* only if the signal pre-emphasis bit is not set */
6121 val = tr32(MAC_SERDES_CFG);
6124 tw32(MAC_SERDES_CFG, val);
6126 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6127 tw32(MAC_SERDES_CFG, 0x616000);
6130 /* Prevent chip from dropping frames when flow control
6133 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6136 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6137 /* Use hardware link auto-negotiation */
6138 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6141 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6142 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6145 tmp = tr32(SERDES_RX_CTRL);
6146 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6147 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6148 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6149 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6152 err = tg3_setup_phy(tp, 1);
6156 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6159 /* Clear CRC stats. */
6160 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6161 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6162 tg3_readphy(tp, 0x14, &tmp);
6166 __tg3_set_rx_mode(tp->dev);
6168 /* Initialize receive rules. */
6169 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6170 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6171 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6172 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6174 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6175 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6179 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6183 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6185 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6187 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6189 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6191 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6193 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6195 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6197 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6199 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6201 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6203 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6205 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6207 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6209 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6217 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6222 /* Called at device open time to get the chip ready for
6223 * packet processing. Invoked with tp->lock held.
6225 static int tg3_init_hw(struct tg3 *tp)
6229 /* Force the chip into D0. */
6230 err = tg3_set_power_state(tp, PCI_D0);
6234 tg3_switch_clocks(tp);
6236 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6238 err = tg3_reset_hw(tp);
6244 #define TG3_STAT_ADD32(PSTAT, REG) \
6245 do { u32 __val = tr32(REG); \
6246 (PSTAT)->low += __val; \
6247 if ((PSTAT)->low < __val) \
6248 (PSTAT)->high += 1; \
6251 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6253 struct tg3_hw_stats *sp = tp->hw_stats;
6255 if (!netif_carrier_ok(tp->dev))
6258 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6259 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6260 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6261 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6262 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6263 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6264 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6265 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6266 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6267 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6268 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6269 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6270 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6272 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6273 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6274 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6275 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6276 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6277 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6278 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6279 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6280 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6281 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6282 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6283 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6284 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6285 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6288 static void tg3_timer(unsigned long __opaque)
6290 struct tg3 *tp = (struct tg3 *) __opaque;
6292 spin_lock(&tp->lock);
6294 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6295 /* All of this garbage is because when using non-tagged
6296 * IRQ status the mailbox/status_block protocol the chip
6297 * uses with the cpu is race prone.
6299 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6300 tw32(GRC_LOCAL_CTRL,
6301 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6303 tw32(HOSTCC_MODE, tp->coalesce_mode |
6304 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6307 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6308 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6309 spin_unlock(&tp->lock);
6310 schedule_work(&tp->reset_task);
6315 /* This part only runs once per second. */
6316 if (!--tp->timer_counter) {
6317 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6318 tg3_periodic_fetch_stats(tp);
6320 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6324 mac_stat = tr32(MAC_STATUS);
6327 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6328 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6330 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6334 tg3_setup_phy(tp, 0);
6335 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6336 u32 mac_stat = tr32(MAC_STATUS);
6339 if (netif_carrier_ok(tp->dev) &&
6340 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6343 if (! netif_carrier_ok(tp->dev) &&
6344 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6345 MAC_STATUS_SIGNAL_DET))) {
6351 ~MAC_MODE_PORT_MODE_MASK));
6353 tw32_f(MAC_MODE, tp->mac_mode);
6355 tg3_setup_phy(tp, 0);
6357 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6358 tg3_serdes_parallel_detect(tp);
6360 tp->timer_counter = tp->timer_multiplier;
6363 /* Heartbeat is only sent once every 2 seconds. */
6364 if (!--tp->asf_counter) {
6365 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6368 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6369 FWCMD_NICDRV_ALIVE2);
6370 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6371 /* 5 seconds timeout */
6372 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6373 val = tr32(GRC_RX_CPU_EVENT);
6375 tw32(GRC_RX_CPU_EVENT, val);
6377 tp->asf_counter = tp->asf_multiplier;
6380 spin_unlock(&tp->lock);
6382 tp->timer.expires = jiffies + tp->timer_offset;
6383 add_timer(&tp->timer);
6386 static int tg3_test_interrupt(struct tg3 *tp)
6388 struct net_device *dev = tp->dev;
6392 if (!netif_running(dev))
6395 tg3_disable_ints(tp);
6397 free_irq(tp->pdev->irq, dev);
6399 err = request_irq(tp->pdev->irq, tg3_test_isr,
6400 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6404 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6405 tg3_enable_ints(tp);
6407 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6410 for (i = 0; i < 5; i++) {
6411 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6418 tg3_disable_ints(tp);
6420 free_irq(tp->pdev->irq, dev);
6422 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6423 err = request_irq(tp->pdev->irq, tg3_msi,
6424 SA_SAMPLE_RANDOM, dev->name, dev);
6426 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6427 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6428 fn = tg3_interrupt_tagged;
6429 err = request_irq(tp->pdev->irq, fn,
6430 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6442 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6443 * successfully restored
6445 static int tg3_test_msi(struct tg3 *tp)
6447 struct net_device *dev = tp->dev;
6451 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6454 /* Turn off SERR reporting in case MSI terminates with Master
6457 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6458 pci_write_config_word(tp->pdev, PCI_COMMAND,
6459 pci_cmd & ~PCI_COMMAND_SERR);
6461 err = tg3_test_interrupt(tp);
6463 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6468 /* other failures */
6472 /* MSI test failed, go back to INTx mode */
6473 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6474 "switching to INTx mode. Please report this failure to "
6475 "the PCI maintainer and include system chipset information.\n",
6478 free_irq(tp->pdev->irq, dev);
6479 pci_disable_msi(tp->pdev);
6481 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6484 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6485 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6486 fn = tg3_interrupt_tagged;
6488 err = request_irq(tp->pdev->irq, fn,
6489 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6494 /* Need to reset the chip because the MSI cycle may have terminated
6495 * with Master Abort.
6497 tg3_full_lock(tp, 1);
6499 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6500 err = tg3_init_hw(tp);
6502 tg3_full_unlock(tp);
6505 free_irq(tp->pdev->irq, dev);
6510 static int tg3_open(struct net_device *dev)
6512 struct tg3 *tp = netdev_priv(dev);
6515 tg3_full_lock(tp, 0);
6517 err = tg3_set_power_state(tp, PCI_D0);
6521 tg3_disable_ints(tp);
6522 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6524 tg3_full_unlock(tp);
6526 /* The placement of this call is tied
6527 * to the setup and use of Host TX descriptors.
6529 err = tg3_alloc_consistent(tp);
6533 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6534 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6535 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6536 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6537 (tp->pdev_peer == tp->pdev))) {
6538 /* All MSI supporting chips should support tagged
6539 * status. Assert that this is the case.
6541 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6542 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6543 "Not using MSI.\n", tp->dev->name);
6544 } else if (pci_enable_msi(tp->pdev) == 0) {
6547 msi_mode = tr32(MSGINT_MODE);
6548 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6549 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6552 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6553 err = request_irq(tp->pdev->irq, tg3_msi,
6554 SA_SAMPLE_RANDOM, dev->name, dev);
6556 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6557 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6558 fn = tg3_interrupt_tagged;
6560 err = request_irq(tp->pdev->irq, fn,
6561 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6565 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6566 pci_disable_msi(tp->pdev);
6567 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6569 tg3_free_consistent(tp);
6573 tg3_full_lock(tp, 0);
6575 err = tg3_init_hw(tp);
6577 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6580 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6581 tp->timer_offset = HZ;
6583 tp->timer_offset = HZ / 10;
6585 BUG_ON(tp->timer_offset > HZ);
6586 tp->timer_counter = tp->timer_multiplier =
6587 (HZ / tp->timer_offset);
6588 tp->asf_counter = tp->asf_multiplier =
6589 ((HZ / tp->timer_offset) * 2);
6591 init_timer(&tp->timer);
6592 tp->timer.expires = jiffies + tp->timer_offset;
6593 tp->timer.data = (unsigned long) tp;
6594 tp->timer.function = tg3_timer;
6597 tg3_full_unlock(tp);
6600 free_irq(tp->pdev->irq, dev);
6601 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6602 pci_disable_msi(tp->pdev);
6603 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6605 tg3_free_consistent(tp);
6609 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6610 err = tg3_test_msi(tp);
6613 tg3_full_lock(tp, 0);
6615 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6616 pci_disable_msi(tp->pdev);
6617 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6619 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6621 tg3_free_consistent(tp);
6623 tg3_full_unlock(tp);
6629 tg3_full_lock(tp, 0);
6631 add_timer(&tp->timer);
6632 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6633 tg3_enable_ints(tp);
6635 tg3_full_unlock(tp);
6637 netif_start_queue(dev);
6643 /*static*/ void tg3_dump_state(struct tg3 *tp)
6645 u32 val32, val32_2, val32_3, val32_4, val32_5;
6649 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6650 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6651 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6655 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6656 tr32(MAC_MODE), tr32(MAC_STATUS));
6657 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6658 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6659 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6660 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6661 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6662 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6664 /* Send data initiator control block */
6665 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6666 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6667 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6668 tr32(SNDDATAI_STATSCTRL));
6670 /* Send data completion control block */
6671 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6673 /* Send BD ring selector block */
6674 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6675 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6677 /* Send BD initiator control block */
6678 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6679 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6681 /* Send BD completion control block */
6682 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6684 /* Receive list placement control block */
6685 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6686 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6687 printk(" RCVLPC_STATSCTRL[%08x]\n",
6688 tr32(RCVLPC_STATSCTRL));
6690 /* Receive data and receive BD initiator control block */
6691 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6692 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6694 /* Receive data completion control block */
6695 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6698 /* Receive BD initiator control block */
6699 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6700 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6702 /* Receive BD completion control block */
6703 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6704 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6706 /* Receive list selector control block */
6707 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6708 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6710 /* Mbuf cluster free block */
6711 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6712 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6714 /* Host coalescing control block */
6715 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6716 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6717 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6718 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6719 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6720 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6721 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6722 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6723 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6724 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6725 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6726 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6728 /* Memory arbiter control block */
6729 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6730 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6732 /* Buffer manager control block */
6733 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6734 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6735 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6736 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6737 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6738 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6739 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6740 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6742 /* Read DMA control block */
6743 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6744 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6746 /* Write DMA control block */
6747 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6748 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6750 /* DMA completion block */
6751 printk("DEBUG: DMAC_MODE[%08x]\n",
6755 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6756 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6757 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6758 tr32(GRC_LOCAL_CTRL));
6761 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6762 tr32(RCVDBDI_JUMBO_BD + 0x0),
6763 tr32(RCVDBDI_JUMBO_BD + 0x4),
6764 tr32(RCVDBDI_JUMBO_BD + 0x8),
6765 tr32(RCVDBDI_JUMBO_BD + 0xc));
6766 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6767 tr32(RCVDBDI_STD_BD + 0x0),
6768 tr32(RCVDBDI_STD_BD + 0x4),
6769 tr32(RCVDBDI_STD_BD + 0x8),
6770 tr32(RCVDBDI_STD_BD + 0xc));
6771 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6772 tr32(RCVDBDI_MINI_BD + 0x0),
6773 tr32(RCVDBDI_MINI_BD + 0x4),
6774 tr32(RCVDBDI_MINI_BD + 0x8),
6775 tr32(RCVDBDI_MINI_BD + 0xc));
6777 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6778 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6779 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6780 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6781 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6782 val32, val32_2, val32_3, val32_4);
6784 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6785 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6786 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6787 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6788 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6789 val32, val32_2, val32_3, val32_4);
6791 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6792 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6793 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6794 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6795 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6796 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6797 val32, val32_2, val32_3, val32_4, val32_5);
6799 /* SW status block */
6800 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6801 tp->hw_status->status,
6802 tp->hw_status->status_tag,
6803 tp->hw_status->rx_jumbo_consumer,
6804 tp->hw_status->rx_consumer,
6805 tp->hw_status->rx_mini_consumer,
6806 tp->hw_status->idx[0].rx_producer,
6807 tp->hw_status->idx[0].tx_consumer);
6809 /* SW statistics block */
6810 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6811 ((u32 *)tp->hw_stats)[0],
6812 ((u32 *)tp->hw_stats)[1],
6813 ((u32 *)tp->hw_stats)[2],
6814 ((u32 *)tp->hw_stats)[3]);
6817 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6818 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6819 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6820 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6821 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6823 /* NIC side send descriptors. */
6824 for (i = 0; i < 6; i++) {
6827 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6828 + (i * sizeof(struct tg3_tx_buffer_desc));
6829 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6831 readl(txd + 0x0), readl(txd + 0x4),
6832 readl(txd + 0x8), readl(txd + 0xc));
6835 /* NIC side RX descriptors. */
6836 for (i = 0; i < 6; i++) {
6839 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6840 + (i * sizeof(struct tg3_rx_buffer_desc));
6841 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6843 readl(rxd + 0x0), readl(rxd + 0x4),
6844 readl(rxd + 0x8), readl(rxd + 0xc));
6845 rxd += (4 * sizeof(u32));
6846 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6848 readl(rxd + 0x0), readl(rxd + 0x4),
6849 readl(rxd + 0x8), readl(rxd + 0xc));
6852 for (i = 0; i < 6; i++) {
6855 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6856 + (i * sizeof(struct tg3_rx_buffer_desc));
6857 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6859 readl(rxd + 0x0), readl(rxd + 0x4),
6860 readl(rxd + 0x8), readl(rxd + 0xc));
6861 rxd += (4 * sizeof(u32));
6862 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6864 readl(rxd + 0x0), readl(rxd + 0x4),
6865 readl(rxd + 0x8), readl(rxd + 0xc));
6870 static struct net_device_stats *tg3_get_stats(struct net_device *);
6871 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6873 static int tg3_close(struct net_device *dev)
6875 struct tg3 *tp = netdev_priv(dev);
6877 /* Calling flush_scheduled_work() may deadlock because
6878 * linkwatch_event() may be on the workqueue and it will try to get
6879 * the rtnl_lock which we are holding.
6881 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6884 netif_stop_queue(dev);
6886 del_timer_sync(&tp->timer);
6888 tg3_full_lock(tp, 1);
6893 tg3_disable_ints(tp);
6895 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6898 ~(TG3_FLAG_INIT_COMPLETE |
6899 TG3_FLAG_GOT_SERDES_FLOWCTL);
6901 tg3_full_unlock(tp);
6903 free_irq(tp->pdev->irq, dev);
6904 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6905 pci_disable_msi(tp->pdev);
6906 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6909 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6910 sizeof(tp->net_stats_prev));
6911 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6912 sizeof(tp->estats_prev));
6914 tg3_free_consistent(tp);
6916 tg3_set_power_state(tp, PCI_D3hot);
6918 netif_carrier_off(tp->dev);
6923 static inline unsigned long get_stat64(tg3_stat64_t *val)
6927 #if (BITS_PER_LONG == 32)
6930 ret = ((u64)val->high << 32) | ((u64)val->low);
6935 static unsigned long calc_crc_errors(struct tg3 *tp)
6937 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6939 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6940 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6944 spin_lock_bh(&tp->lock);
6945 if (!tg3_readphy(tp, 0x1e, &val)) {
6946 tg3_writephy(tp, 0x1e, val | 0x8000);
6947 tg3_readphy(tp, 0x14, &val);
6950 spin_unlock_bh(&tp->lock);
6952 tp->phy_crc_errors += val;
6954 return tp->phy_crc_errors;
6957 return get_stat64(&hw_stats->rx_fcs_errors);
6960 #define ESTAT_ADD(member) \
6961 estats->member = old_estats->member + \
6962 get_stat64(&hw_stats->member)
6964 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6966 struct tg3_ethtool_stats *estats = &tp->estats;
6967 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6968 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6973 ESTAT_ADD(rx_octets);
6974 ESTAT_ADD(rx_fragments);
6975 ESTAT_ADD(rx_ucast_packets);
6976 ESTAT_ADD(rx_mcast_packets);
6977 ESTAT_ADD(rx_bcast_packets);
6978 ESTAT_ADD(rx_fcs_errors);
6979 ESTAT_ADD(rx_align_errors);
6980 ESTAT_ADD(rx_xon_pause_rcvd);
6981 ESTAT_ADD(rx_xoff_pause_rcvd);
6982 ESTAT_ADD(rx_mac_ctrl_rcvd);
6983 ESTAT_ADD(rx_xoff_entered);
6984 ESTAT_ADD(rx_frame_too_long_errors);
6985 ESTAT_ADD(rx_jabbers);
6986 ESTAT_ADD(rx_undersize_packets);
6987 ESTAT_ADD(rx_in_length_errors);
6988 ESTAT_ADD(rx_out_length_errors);
6989 ESTAT_ADD(rx_64_or_less_octet_packets);
6990 ESTAT_ADD(rx_65_to_127_octet_packets);
6991 ESTAT_ADD(rx_128_to_255_octet_packets);
6992 ESTAT_ADD(rx_256_to_511_octet_packets);
6993 ESTAT_ADD(rx_512_to_1023_octet_packets);
6994 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6995 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6996 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6997 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6998 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7000 ESTAT_ADD(tx_octets);
7001 ESTAT_ADD(tx_collisions);
7002 ESTAT_ADD(tx_xon_sent);
7003 ESTAT_ADD(tx_xoff_sent);
7004 ESTAT_ADD(tx_flow_control);
7005 ESTAT_ADD(tx_mac_errors);
7006 ESTAT_ADD(tx_single_collisions);
7007 ESTAT_ADD(tx_mult_collisions);
7008 ESTAT_ADD(tx_deferred);
7009 ESTAT_ADD(tx_excessive_collisions);
7010 ESTAT_ADD(tx_late_collisions);
7011 ESTAT_ADD(tx_collide_2times);
7012 ESTAT_ADD(tx_collide_3times);
7013 ESTAT_ADD(tx_collide_4times);
7014 ESTAT_ADD(tx_collide_5times);
7015 ESTAT_ADD(tx_collide_6times);
7016 ESTAT_ADD(tx_collide_7times);
7017 ESTAT_ADD(tx_collide_8times);
7018 ESTAT_ADD(tx_collide_9times);
7019 ESTAT_ADD(tx_collide_10times);
7020 ESTAT_ADD(tx_collide_11times);
7021 ESTAT_ADD(tx_collide_12times);
7022 ESTAT_ADD(tx_collide_13times);
7023 ESTAT_ADD(tx_collide_14times);
7024 ESTAT_ADD(tx_collide_15times);
7025 ESTAT_ADD(tx_ucast_packets);
7026 ESTAT_ADD(tx_mcast_packets);
7027 ESTAT_ADD(tx_bcast_packets);
7028 ESTAT_ADD(tx_carrier_sense_errors);
7029 ESTAT_ADD(tx_discards);
7030 ESTAT_ADD(tx_errors);
7032 ESTAT_ADD(dma_writeq_full);
7033 ESTAT_ADD(dma_write_prioq_full);
7034 ESTAT_ADD(rxbds_empty);
7035 ESTAT_ADD(rx_discards);
7036 ESTAT_ADD(rx_errors);
7037 ESTAT_ADD(rx_threshold_hit);
7039 ESTAT_ADD(dma_readq_full);
7040 ESTAT_ADD(dma_read_prioq_full);
7041 ESTAT_ADD(tx_comp_queue_full);
7043 ESTAT_ADD(ring_set_send_prod_index);
7044 ESTAT_ADD(ring_status_update);
7045 ESTAT_ADD(nic_irqs);
7046 ESTAT_ADD(nic_avoided_irqs);
7047 ESTAT_ADD(nic_tx_threshold_hit);
7052 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7054 struct tg3 *tp = netdev_priv(dev);
7055 struct net_device_stats *stats = &tp->net_stats;
7056 struct net_device_stats *old_stats = &tp->net_stats_prev;
7057 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7062 stats->rx_packets = old_stats->rx_packets +
7063 get_stat64(&hw_stats->rx_ucast_packets) +
7064 get_stat64(&hw_stats->rx_mcast_packets) +
7065 get_stat64(&hw_stats->rx_bcast_packets);
7067 stats->tx_packets = old_stats->tx_packets +
7068 get_stat64(&hw_stats->tx_ucast_packets) +
7069 get_stat64(&hw_stats->tx_mcast_packets) +
7070 get_stat64(&hw_stats->tx_bcast_packets);
7072 stats->rx_bytes = old_stats->rx_bytes +
7073 get_stat64(&hw_stats->rx_octets);
7074 stats->tx_bytes = old_stats->tx_bytes +
7075 get_stat64(&hw_stats->tx_octets);
7077 stats->rx_errors = old_stats->rx_errors +
7078 get_stat64(&hw_stats->rx_errors);
7079 stats->tx_errors = old_stats->tx_errors +
7080 get_stat64(&hw_stats->tx_errors) +
7081 get_stat64(&hw_stats->tx_mac_errors) +
7082 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7083 get_stat64(&hw_stats->tx_discards);
7085 stats->multicast = old_stats->multicast +
7086 get_stat64(&hw_stats->rx_mcast_packets);
7087 stats->collisions = old_stats->collisions +
7088 get_stat64(&hw_stats->tx_collisions);
7090 stats->rx_length_errors = old_stats->rx_length_errors +
7091 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7092 get_stat64(&hw_stats->rx_undersize_packets);
7094 stats->rx_over_errors = old_stats->rx_over_errors +
7095 get_stat64(&hw_stats->rxbds_empty);
7096 stats->rx_frame_errors = old_stats->rx_frame_errors +
7097 get_stat64(&hw_stats->rx_align_errors);
7098 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7099 get_stat64(&hw_stats->tx_discards);
7100 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7101 get_stat64(&hw_stats->tx_carrier_sense_errors);
7103 stats->rx_crc_errors = old_stats->rx_crc_errors +
7104 calc_crc_errors(tp);
7106 stats->rx_missed_errors = old_stats->rx_missed_errors +
7107 get_stat64(&hw_stats->rx_discards);
7112 static inline u32 calc_crc(unsigned char *buf, int len)
7120 for (j = 0; j < len; j++) {
7123 for (k = 0; k < 8; k++) {
7137 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7139 /* accept or reject all multicast frames */
7140 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7141 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7142 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7143 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7146 static void __tg3_set_rx_mode(struct net_device *dev)
7148 struct tg3 *tp = netdev_priv(dev);
7151 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7152 RX_MODE_KEEP_VLAN_TAG);
7154 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7157 #if TG3_VLAN_TAG_USED
7159 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7160 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7162 /* By definition, VLAN is disabled always in this
7165 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7166 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7169 if (dev->flags & IFF_PROMISC) {
7170 /* Promiscuous mode. */
7171 rx_mode |= RX_MODE_PROMISC;
7172 } else if (dev->flags & IFF_ALLMULTI) {
7173 /* Accept all multicast. */
7174 tg3_set_multi (tp, 1);
7175 } else if (dev->mc_count < 1) {
7176 /* Reject all multicast. */
7177 tg3_set_multi (tp, 0);
7179 /* Accept one or more multicast(s). */
7180 struct dev_mc_list *mclist;
7182 u32 mc_filter[4] = { 0, };
7187 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7188 i++, mclist = mclist->next) {
7190 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7192 regidx = (bit & 0x60) >> 5;
7194 mc_filter[regidx] |= (1 << bit);
7197 tw32(MAC_HASH_REG_0, mc_filter[0]);
7198 tw32(MAC_HASH_REG_1, mc_filter[1]);
7199 tw32(MAC_HASH_REG_2, mc_filter[2]);
7200 tw32(MAC_HASH_REG_3, mc_filter[3]);
7203 if (rx_mode != tp->rx_mode) {
7204 tp->rx_mode = rx_mode;
7205 tw32_f(MAC_RX_MODE, rx_mode);
7210 static void tg3_set_rx_mode(struct net_device *dev)
7212 struct tg3 *tp = netdev_priv(dev);
7214 if (!netif_running(dev))
7217 tg3_full_lock(tp, 0);
7218 __tg3_set_rx_mode(dev);
7219 tg3_full_unlock(tp);
7222 #define TG3_REGDUMP_LEN (32 * 1024)
7224 static int tg3_get_regs_len(struct net_device *dev)
7226 return TG3_REGDUMP_LEN;
7229 static void tg3_get_regs(struct net_device *dev,
7230 struct ethtool_regs *regs, void *_p)
7233 struct tg3 *tp = netdev_priv(dev);
7239 memset(p, 0, TG3_REGDUMP_LEN);
7241 if (tp->link_config.phy_is_low_power)
7244 tg3_full_lock(tp, 0);
7246 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7247 #define GET_REG32_LOOP(base,len) \
7248 do { p = (u32 *)(orig_p + (base)); \
7249 for (i = 0; i < len; i += 4) \
7250 __GET_REG32((base) + i); \
7252 #define GET_REG32_1(reg) \
7253 do { p = (u32 *)(orig_p + (reg)); \
7254 __GET_REG32((reg)); \
7257 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7258 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7259 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7260 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7261 GET_REG32_1(SNDDATAC_MODE);
7262 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7263 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7264 GET_REG32_1(SNDBDC_MODE);
7265 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7266 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7267 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7268 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7269 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7270 GET_REG32_1(RCVDCC_MODE);
7271 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7272 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7273 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7274 GET_REG32_1(MBFREE_MODE);
7275 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7276 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7277 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7278 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7279 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7280 GET_REG32_1(RX_CPU_MODE);
7281 GET_REG32_1(RX_CPU_STATE);
7282 GET_REG32_1(RX_CPU_PGMCTR);
7283 GET_REG32_1(RX_CPU_HWBKPT);
7284 GET_REG32_1(TX_CPU_MODE);
7285 GET_REG32_1(TX_CPU_STATE);
7286 GET_REG32_1(TX_CPU_PGMCTR);
7287 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7288 GET_REG32_LOOP(FTQ_RESET, 0x120);
7289 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7290 GET_REG32_1(DMAC_MODE);
7291 GET_REG32_LOOP(GRC_MODE, 0x4c);
7292 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7293 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7296 #undef GET_REG32_LOOP
7299 tg3_full_unlock(tp);
7302 static int tg3_get_eeprom_len(struct net_device *dev)
7304 struct tg3 *tp = netdev_priv(dev);
7306 return tp->nvram_size;
7309 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7311 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7313 struct tg3 *tp = netdev_priv(dev);
7316 u32 i, offset, len, val, b_offset, b_count;
7318 if (tp->link_config.phy_is_low_power)
7321 offset = eeprom->offset;
7325 eeprom->magic = TG3_EEPROM_MAGIC;
7328 /* adjustments to start on required 4 byte boundary */
7329 b_offset = offset & 3;
7330 b_count = 4 - b_offset;
7331 if (b_count > len) {
7332 /* i.e. offset=1 len=2 */
7335 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7338 val = cpu_to_le32(val);
7339 memcpy(data, ((char*)&val) + b_offset, b_count);
7342 eeprom->len += b_count;
7345 /* read bytes upto the last 4 byte boundary */
7346 pd = &data[eeprom->len];
7347 for (i = 0; i < (len - (len & 3)); i += 4) {
7348 ret = tg3_nvram_read(tp, offset + i, &val);
7353 val = cpu_to_le32(val);
7354 memcpy(pd + i, &val, 4);
7359 /* read last bytes not ending on 4 byte boundary */
7360 pd = &data[eeprom->len];
7362 b_offset = offset + len - b_count;
7363 ret = tg3_nvram_read(tp, b_offset, &val);
7366 val = cpu_to_le32(val);
7367 memcpy(pd, ((char*)&val), b_count);
7368 eeprom->len += b_count;
7373 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7375 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7377 struct tg3 *tp = netdev_priv(dev);
7379 u32 offset, len, b_offset, odd_len, start, end;
7382 if (tp->link_config.phy_is_low_power)
7385 if (eeprom->magic != TG3_EEPROM_MAGIC)
7388 offset = eeprom->offset;
7391 if ((b_offset = (offset & 3))) {
7392 /* adjustments to start on required 4 byte boundary */
7393 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7396 start = cpu_to_le32(start);
7405 /* adjustments to end on required 4 byte boundary */
7407 len = (len + 3) & ~3;
7408 ret = tg3_nvram_read(tp, offset+len-4, &end);
7411 end = cpu_to_le32(end);
7415 if (b_offset || odd_len) {
7416 buf = kmalloc(len, GFP_KERNEL);
7420 memcpy(buf, &start, 4);
7422 memcpy(buf+len-4, &end, 4);
7423 memcpy(buf + b_offset, data, eeprom->len);
7426 ret = tg3_nvram_write_block(tp, offset, len, buf);
7434 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7436 struct tg3 *tp = netdev_priv(dev);
7438 cmd->supported = (SUPPORTED_Autoneg);
7440 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7441 cmd->supported |= (SUPPORTED_1000baseT_Half |
7442 SUPPORTED_1000baseT_Full);
7444 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7445 cmd->supported |= (SUPPORTED_100baseT_Half |
7446 SUPPORTED_100baseT_Full |
7447 SUPPORTED_10baseT_Half |
7448 SUPPORTED_10baseT_Full |
7451 cmd->supported |= SUPPORTED_FIBRE;
7453 cmd->advertising = tp->link_config.advertising;
7454 if (netif_running(dev)) {
7455 cmd->speed = tp->link_config.active_speed;
7456 cmd->duplex = tp->link_config.active_duplex;
7459 cmd->phy_address = PHY_ADDR;
7460 cmd->transceiver = 0;
7461 cmd->autoneg = tp->link_config.autoneg;
7467 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7469 struct tg3 *tp = netdev_priv(dev);
7471 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7472 /* These are the only valid advertisement bits allowed. */
7473 if (cmd->autoneg == AUTONEG_ENABLE &&
7474 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7475 ADVERTISED_1000baseT_Full |
7476 ADVERTISED_Autoneg |
7479 /* Fiber can only do SPEED_1000. */
7480 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7481 (cmd->speed != SPEED_1000))
7483 /* Copper cannot force SPEED_1000. */
7484 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7485 (cmd->speed == SPEED_1000))
7487 else if ((cmd->speed == SPEED_1000) &&
7488 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7491 tg3_full_lock(tp, 0);
7493 tp->link_config.autoneg = cmd->autoneg;
7494 if (cmd->autoneg == AUTONEG_ENABLE) {
7495 tp->link_config.advertising = cmd->advertising;
7496 tp->link_config.speed = SPEED_INVALID;
7497 tp->link_config.duplex = DUPLEX_INVALID;
7499 tp->link_config.advertising = 0;
7500 tp->link_config.speed = cmd->speed;
7501 tp->link_config.duplex = cmd->duplex;
7504 if (netif_running(dev))
7505 tg3_setup_phy(tp, 1);
7507 tg3_full_unlock(tp);
7512 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7514 struct tg3 *tp = netdev_priv(dev);
7516 strcpy(info->driver, DRV_MODULE_NAME);
7517 strcpy(info->version, DRV_MODULE_VERSION);
7518 strcpy(info->bus_info, pci_name(tp->pdev));
7521 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7523 struct tg3 *tp = netdev_priv(dev);
7525 wol->supported = WAKE_MAGIC;
7527 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7528 wol->wolopts = WAKE_MAGIC;
7529 memset(&wol->sopass, 0, sizeof(wol->sopass));
7532 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7534 struct tg3 *tp = netdev_priv(dev);
7536 if (wol->wolopts & ~WAKE_MAGIC)
7538 if ((wol->wolopts & WAKE_MAGIC) &&
7539 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7540 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7543 spin_lock_bh(&tp->lock);
7544 if (wol->wolopts & WAKE_MAGIC)
7545 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7547 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7548 spin_unlock_bh(&tp->lock);
7553 static u32 tg3_get_msglevel(struct net_device *dev)
7555 struct tg3 *tp = netdev_priv(dev);
7556 return tp->msg_enable;
7559 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7561 struct tg3 *tp = netdev_priv(dev);
7562 tp->msg_enable = value;
7565 #if TG3_TSO_SUPPORT != 0
7566 static int tg3_set_tso(struct net_device *dev, u32 value)
7568 struct tg3 *tp = netdev_priv(dev);
7570 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7575 return ethtool_op_set_tso(dev, value);
7579 static int tg3_nway_reset(struct net_device *dev)
7581 struct tg3 *tp = netdev_priv(dev);
7585 if (!netif_running(dev))
7588 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7591 spin_lock_bh(&tp->lock);
7593 tg3_readphy(tp, MII_BMCR, &bmcr);
7594 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7595 ((bmcr & BMCR_ANENABLE) ||
7596 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7597 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7601 spin_unlock_bh(&tp->lock);
7606 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7608 struct tg3 *tp = netdev_priv(dev);
7610 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7611 ering->rx_mini_max_pending = 0;
7612 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7613 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7615 ering->rx_jumbo_max_pending = 0;
7617 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7619 ering->rx_pending = tp->rx_pending;
7620 ering->rx_mini_pending = 0;
7621 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7622 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7624 ering->rx_jumbo_pending = 0;
7626 ering->tx_pending = tp->tx_pending;
7629 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7631 struct tg3 *tp = netdev_priv(dev);
7634 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7635 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7636 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7639 if (netif_running(dev)) {
7644 tg3_full_lock(tp, irq_sync);
7646 tp->rx_pending = ering->rx_pending;
7648 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7649 tp->rx_pending > 63)
7650 tp->rx_pending = 63;
7651 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7652 tp->tx_pending = ering->tx_pending;
7654 if (netif_running(dev)) {
7655 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7657 tg3_netif_start(tp);
7660 tg3_full_unlock(tp);
7665 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7667 struct tg3 *tp = netdev_priv(dev);
7669 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7670 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7671 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7674 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7676 struct tg3 *tp = netdev_priv(dev);
7679 if (netif_running(dev)) {
7684 tg3_full_lock(tp, irq_sync);
7686 if (epause->autoneg)
7687 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7689 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7690 if (epause->rx_pause)
7691 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7693 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7694 if (epause->tx_pause)
7695 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7697 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7699 if (netif_running(dev)) {
7700 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7702 tg3_netif_start(tp);
7705 tg3_full_unlock(tp);
7710 static u32 tg3_get_rx_csum(struct net_device *dev)
7712 struct tg3 *tp = netdev_priv(dev);
7713 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7716 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7718 struct tg3 *tp = netdev_priv(dev);
7720 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7726 spin_lock_bh(&tp->lock);
7728 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7730 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7731 spin_unlock_bh(&tp->lock);
7736 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7738 struct tg3 *tp = netdev_priv(dev);
7740 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7747 dev->features |= NETIF_F_IP_CSUM;
7749 dev->features &= ~NETIF_F_IP_CSUM;
7754 static int tg3_get_stats_count (struct net_device *dev)
7756 return TG3_NUM_STATS;
7759 static int tg3_get_test_count (struct net_device *dev)
7761 return TG3_NUM_TEST;
7764 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7766 switch (stringset) {
7768 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
7771 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
7774 WARN_ON(1); /* we need a WARN() */
7779 static int tg3_phys_id(struct net_device *dev, u32 data)
7781 struct tg3 *tp = netdev_priv(dev);
7784 if (!netif_running(tp->dev))
7790 for (i = 0; i < (data * 2); i++) {
7792 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7793 LED_CTRL_1000MBPS_ON |
7794 LED_CTRL_100MBPS_ON |
7795 LED_CTRL_10MBPS_ON |
7796 LED_CTRL_TRAFFIC_OVERRIDE |
7797 LED_CTRL_TRAFFIC_BLINK |
7798 LED_CTRL_TRAFFIC_LED);
7801 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7802 LED_CTRL_TRAFFIC_OVERRIDE);
7804 if (msleep_interruptible(500))
7807 tw32(MAC_LED_CTRL, tp->led_ctrl);
7811 static void tg3_get_ethtool_stats (struct net_device *dev,
7812 struct ethtool_stats *estats, u64 *tmp_stats)
7814 struct tg3 *tp = netdev_priv(dev);
7815 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7818 #define NVRAM_TEST_SIZE 0x100
7819 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7821 static int tg3_test_nvram(struct tg3 *tp)
7823 u32 *buf, csum, magic;
7824 int i, j, err = 0, size;
7826 if (tg3_nvram_read(tp, 0, &magic) != 0)
7829 magic = swab32(magic);
7830 if (magic == TG3_EEPROM_MAGIC)
7831 size = NVRAM_TEST_SIZE;
7832 else if ((magic & 0xff000000) == 0xa5000000) {
7833 if ((magic & 0xe00000) == 0x200000)
7834 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
7840 buf = kmalloc(size, GFP_KERNEL);
7845 for (i = 0, j = 0; i < size; i += 4, j++) {
7848 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7850 buf[j] = cpu_to_le32(val);
7855 /* Selfboot format */
7856 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
7857 u8 *buf8 = (u8 *) buf, csum8 = 0;
7859 for (i = 0; i < size; i++)
7867 /* Bootstrap checksum at offset 0x10 */
7868 csum = calc_crc((unsigned char *) buf, 0x10);
7869 if(csum != cpu_to_le32(buf[0x10/4]))
7872 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7873 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7874 if (csum != cpu_to_le32(buf[0xfc/4]))
7884 #define TG3_SERDES_TIMEOUT_SEC 2
7885 #define TG3_COPPER_TIMEOUT_SEC 6
7887 static int tg3_test_link(struct tg3 *tp)
7891 if (!netif_running(tp->dev))
7894 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7895 max = TG3_SERDES_TIMEOUT_SEC;
7897 max = TG3_COPPER_TIMEOUT_SEC;
7899 for (i = 0; i < max; i++) {
7900 if (netif_carrier_ok(tp->dev))
7903 if (msleep_interruptible(1000))
7910 /* Only test the commonly used registers */
7911 static const int tg3_test_registers(struct tg3 *tp)
7914 u32 offset, read_mask, write_mask, val, save_val, read_val;
7918 #define TG3_FL_5705 0x1
7919 #define TG3_FL_NOT_5705 0x2
7920 #define TG3_FL_NOT_5788 0x4
7924 /* MAC Control Registers */
7925 { MAC_MODE, TG3_FL_NOT_5705,
7926 0x00000000, 0x00ef6f8c },
7927 { MAC_MODE, TG3_FL_5705,
7928 0x00000000, 0x01ef6b8c },
7929 { MAC_STATUS, TG3_FL_NOT_5705,
7930 0x03800107, 0x00000000 },
7931 { MAC_STATUS, TG3_FL_5705,
7932 0x03800100, 0x00000000 },
7933 { MAC_ADDR_0_HIGH, 0x0000,
7934 0x00000000, 0x0000ffff },
7935 { MAC_ADDR_0_LOW, 0x0000,
7936 0x00000000, 0xffffffff },
7937 { MAC_RX_MTU_SIZE, 0x0000,
7938 0x00000000, 0x0000ffff },
7939 { MAC_TX_MODE, 0x0000,
7940 0x00000000, 0x00000070 },
7941 { MAC_TX_LENGTHS, 0x0000,
7942 0x00000000, 0x00003fff },
7943 { MAC_RX_MODE, TG3_FL_NOT_5705,
7944 0x00000000, 0x000007fc },
7945 { MAC_RX_MODE, TG3_FL_5705,
7946 0x00000000, 0x000007dc },
7947 { MAC_HASH_REG_0, 0x0000,
7948 0x00000000, 0xffffffff },
7949 { MAC_HASH_REG_1, 0x0000,
7950 0x00000000, 0xffffffff },
7951 { MAC_HASH_REG_2, 0x0000,
7952 0x00000000, 0xffffffff },
7953 { MAC_HASH_REG_3, 0x0000,
7954 0x00000000, 0xffffffff },
7956 /* Receive Data and Receive BD Initiator Control Registers. */
7957 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7958 0x00000000, 0xffffffff },
7959 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7960 0x00000000, 0xffffffff },
7961 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7962 0x00000000, 0x00000003 },
7963 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7964 0x00000000, 0xffffffff },
7965 { RCVDBDI_STD_BD+0, 0x0000,
7966 0x00000000, 0xffffffff },
7967 { RCVDBDI_STD_BD+4, 0x0000,
7968 0x00000000, 0xffffffff },
7969 { RCVDBDI_STD_BD+8, 0x0000,
7970 0x00000000, 0xffff0002 },
7971 { RCVDBDI_STD_BD+0xc, 0x0000,
7972 0x00000000, 0xffffffff },
7974 /* Receive BD Initiator Control Registers. */
7975 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7976 0x00000000, 0xffffffff },
7977 { RCVBDI_STD_THRESH, TG3_FL_5705,
7978 0x00000000, 0x000003ff },
7979 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7980 0x00000000, 0xffffffff },
7982 /* Host Coalescing Control Registers. */
7983 { HOSTCC_MODE, TG3_FL_NOT_5705,
7984 0x00000000, 0x00000004 },
7985 { HOSTCC_MODE, TG3_FL_5705,
7986 0x00000000, 0x000000f6 },
7987 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7988 0x00000000, 0xffffffff },
7989 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7990 0x00000000, 0x000003ff },
7991 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7992 0x00000000, 0xffffffff },
7993 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7994 0x00000000, 0x000003ff },
7995 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7996 0x00000000, 0xffffffff },
7997 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7998 0x00000000, 0x000000ff },
7999 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8000 0x00000000, 0xffffffff },
8001 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8002 0x00000000, 0x000000ff },
8003 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8004 0x00000000, 0xffffffff },
8005 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8006 0x00000000, 0xffffffff },
8007 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8008 0x00000000, 0xffffffff },
8009 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8010 0x00000000, 0x000000ff },
8011 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8012 0x00000000, 0xffffffff },
8013 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8014 0x00000000, 0x000000ff },
8015 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8016 0x00000000, 0xffffffff },
8017 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8018 0x00000000, 0xffffffff },
8019 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8020 0x00000000, 0xffffffff },
8021 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8022 0x00000000, 0xffffffff },
8023 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8024 0x00000000, 0xffffffff },
8025 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8026 0xffffffff, 0x00000000 },
8027 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8028 0xffffffff, 0x00000000 },
8030 /* Buffer Manager Control Registers. */
8031 { BUFMGR_MB_POOL_ADDR, 0x0000,
8032 0x00000000, 0x007fff80 },
8033 { BUFMGR_MB_POOL_SIZE, 0x0000,
8034 0x00000000, 0x007fffff },
8035 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8036 0x00000000, 0x0000003f },
8037 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8038 0x00000000, 0x000001ff },
8039 { BUFMGR_MB_HIGH_WATER, 0x0000,
8040 0x00000000, 0x000001ff },
8041 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8042 0xffffffff, 0x00000000 },
8043 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8044 0xffffffff, 0x00000000 },
8046 /* Mailbox Registers */
8047 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8048 0x00000000, 0x000001ff },
8049 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8050 0x00000000, 0x000001ff },
8051 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8052 0x00000000, 0x000007ff },
8053 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8054 0x00000000, 0x000001ff },
8056 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8059 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8064 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8065 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8068 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8071 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8072 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8075 offset = (u32) reg_tbl[i].offset;
8076 read_mask = reg_tbl[i].read_mask;
8077 write_mask = reg_tbl[i].write_mask;
8079 /* Save the original register content */
8080 save_val = tr32(offset);
8082 /* Determine the read-only value. */
8083 read_val = save_val & read_mask;
8085 /* Write zero to the register, then make sure the read-only bits
8086 * are not changed and the read/write bits are all zeros.
8092 /* Test the read-only and read/write bits. */
8093 if (((val & read_mask) != read_val) || (val & write_mask))
8096 /* Write ones to all the bits defined by RdMask and WrMask, then
8097 * make sure the read-only bits are not changed and the
8098 * read/write bits are all ones.
8100 tw32(offset, read_mask | write_mask);
8104 /* Test the read-only bits. */
8105 if ((val & read_mask) != read_val)
8108 /* Test the read/write bits. */
8109 if ((val & write_mask) != write_mask)
8112 tw32(offset, save_val);
8118 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8119 tw32(offset, save_val);
8123 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8125 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8129 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8130 for (j = 0; j < len; j += 4) {
8133 tg3_write_mem(tp, offset + j, test_pattern[i]);
8134 tg3_read_mem(tp, offset + j, &val);
8135 if (val != test_pattern[i])
8142 static int tg3_test_memory(struct tg3 *tp)
8144 static struct mem_entry {
8147 } mem_tbl_570x[] = {
8148 { 0x00000000, 0x00b50},
8149 { 0x00002000, 0x1c000},
8150 { 0xffffffff, 0x00000}
8151 }, mem_tbl_5705[] = {
8152 { 0x00000100, 0x0000c},
8153 { 0x00000200, 0x00008},
8154 { 0x00004000, 0x00800},
8155 { 0x00006000, 0x01000},
8156 { 0x00008000, 0x02000},
8157 { 0x00010000, 0x0e000},
8158 { 0xffffffff, 0x00000}
8160 struct mem_entry *mem_tbl;
8164 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8165 mem_tbl = mem_tbl_5705;
8167 mem_tbl = mem_tbl_570x;
8169 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8170 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8171 mem_tbl[i].len)) != 0)
8178 #define TG3_MAC_LOOPBACK 0
8179 #define TG3_PHY_LOOPBACK 1
8181 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8183 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8185 struct sk_buff *skb, *rx_skb;
8188 int num_pkts, tx_len, rx_len, i, err;
8189 struct tg3_rx_buffer_desc *desc;
8191 if (loopback_mode == TG3_MAC_LOOPBACK) {
8192 /* HW errata - mac loopback fails in some cases on 5780.
8193 * Normal traffic and PHY loopback are not affected by
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8199 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8200 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8201 MAC_MODE_PORT_MODE_GMII;
8202 tw32(MAC_MODE, mac_mode);
8203 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8204 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8207 /* reset to prevent losing 1st rx packet intermittently */
8208 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8209 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8211 tw32_f(MAC_RX_MODE, tp->rx_mode);
8213 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8214 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8215 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8216 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8217 tw32(MAC_MODE, mac_mode);
8225 skb = dev_alloc_skb(tx_len);
8226 tx_data = skb_put(skb, tx_len);
8227 memcpy(tx_data, tp->dev->dev_addr, 6);
8228 memset(tx_data + 6, 0x0, 8);
8230 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8232 for (i = 14; i < tx_len; i++)
8233 tx_data[i] = (u8) (i & 0xff);
8235 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8237 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8242 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8246 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8251 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8253 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8257 for (i = 0; i < 10; i++) {
8258 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8263 tx_idx = tp->hw_status->idx[0].tx_consumer;
8264 rx_idx = tp->hw_status->idx[0].rx_producer;
8265 if ((tx_idx == tp->tx_prod) &&
8266 (rx_idx == (rx_start_idx + num_pkts)))
8270 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8273 if (tx_idx != tp->tx_prod)
8276 if (rx_idx != rx_start_idx + num_pkts)
8279 desc = &tp->rx_rcb[rx_start_idx];
8280 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8281 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8282 if (opaque_key != RXD_OPAQUE_RING_STD)
8285 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8286 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8289 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8290 if (rx_len != tx_len)
8293 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8295 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8296 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8298 for (i = 14; i < tx_len; i++) {
8299 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8304 /* tg3_free_rings will unmap and free the rx_skb */
8309 #define TG3_MAC_LOOPBACK_FAILED 1
8310 #define TG3_PHY_LOOPBACK_FAILED 2
8311 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8312 TG3_PHY_LOOPBACK_FAILED)
8314 static int tg3_test_loopback(struct tg3 *tp)
8318 if (!netif_running(tp->dev))
8319 return TG3_LOOPBACK_FAILED;
8323 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8324 err |= TG3_MAC_LOOPBACK_FAILED;
8325 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8326 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8327 err |= TG3_PHY_LOOPBACK_FAILED;
8333 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8336 struct tg3 *tp = netdev_priv(dev);
8338 if (tp->link_config.phy_is_low_power)
8339 tg3_set_power_state(tp, PCI_D0);
8341 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8343 if (tg3_test_nvram(tp) != 0) {
8344 etest->flags |= ETH_TEST_FL_FAILED;
8347 if (tg3_test_link(tp) != 0) {
8348 etest->flags |= ETH_TEST_FL_FAILED;
8351 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8352 int err, irq_sync = 0;
8354 if (netif_running(dev)) {
8359 tg3_full_lock(tp, irq_sync);
8361 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8362 err = tg3_nvram_lock(tp);
8363 tg3_halt_cpu(tp, RX_CPU_BASE);
8364 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8365 tg3_halt_cpu(tp, TX_CPU_BASE);
8367 tg3_nvram_unlock(tp);
8369 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8372 if (tg3_test_registers(tp) != 0) {
8373 etest->flags |= ETH_TEST_FL_FAILED;
8376 if (tg3_test_memory(tp) != 0) {
8377 etest->flags |= ETH_TEST_FL_FAILED;
8380 if ((data[4] = tg3_test_loopback(tp)) != 0)
8381 etest->flags |= ETH_TEST_FL_FAILED;
8383 tg3_full_unlock(tp);
8385 if (tg3_test_interrupt(tp) != 0) {
8386 etest->flags |= ETH_TEST_FL_FAILED;
8390 tg3_full_lock(tp, 0);
8392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8393 if (netif_running(dev)) {
8394 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8396 tg3_netif_start(tp);
8399 tg3_full_unlock(tp);
8401 if (tp->link_config.phy_is_low_power)
8402 tg3_set_power_state(tp, PCI_D3hot);
8406 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8408 struct mii_ioctl_data *data = if_mii(ifr);
8409 struct tg3 *tp = netdev_priv(dev);
8414 data->phy_id = PHY_ADDR;
8420 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8421 break; /* We have no PHY */
8423 if (tp->link_config.phy_is_low_power)
8426 spin_lock_bh(&tp->lock);
8427 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8428 spin_unlock_bh(&tp->lock);
8430 data->val_out = mii_regval;
8436 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8437 break; /* We have no PHY */
8439 if (!capable(CAP_NET_ADMIN))
8442 if (tp->link_config.phy_is_low_power)
8445 spin_lock_bh(&tp->lock);
8446 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8447 spin_unlock_bh(&tp->lock);
8458 #if TG3_VLAN_TAG_USED
8459 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8461 struct tg3 *tp = netdev_priv(dev);
8463 tg3_full_lock(tp, 0);
8467 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8468 __tg3_set_rx_mode(dev);
8470 tg3_full_unlock(tp);
8473 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8475 struct tg3 *tp = netdev_priv(dev);
8477 tg3_full_lock(tp, 0);
8479 tp->vlgrp->vlan_devices[vid] = NULL;
8480 tg3_full_unlock(tp);
8484 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8486 struct tg3 *tp = netdev_priv(dev);
8488 memcpy(ec, &tp->coal, sizeof(*ec));
8492 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8494 struct tg3 *tp = netdev_priv(dev);
8495 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8496 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8498 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8499 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8500 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8501 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8502 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8505 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8506 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8507 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8508 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8509 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8510 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8511 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8512 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8513 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8514 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8517 /* No rx interrupts will be generated if both are zero */
8518 if ((ec->rx_coalesce_usecs == 0) &&
8519 (ec->rx_max_coalesced_frames == 0))
8522 /* No tx interrupts will be generated if both are zero */
8523 if ((ec->tx_coalesce_usecs == 0) &&
8524 (ec->tx_max_coalesced_frames == 0))
8527 /* Only copy relevant parameters, ignore all others. */
8528 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8529 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8530 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8531 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8532 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8533 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8534 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8535 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8536 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8538 if (netif_running(dev)) {
8539 tg3_full_lock(tp, 0);
8540 __tg3_set_coalesce(tp, &tp->coal);
8541 tg3_full_unlock(tp);
8546 static struct ethtool_ops tg3_ethtool_ops = {
8547 .get_settings = tg3_get_settings,
8548 .set_settings = tg3_set_settings,
8549 .get_drvinfo = tg3_get_drvinfo,
8550 .get_regs_len = tg3_get_regs_len,
8551 .get_regs = tg3_get_regs,
8552 .get_wol = tg3_get_wol,
8553 .set_wol = tg3_set_wol,
8554 .get_msglevel = tg3_get_msglevel,
8555 .set_msglevel = tg3_set_msglevel,
8556 .nway_reset = tg3_nway_reset,
8557 .get_link = ethtool_op_get_link,
8558 .get_eeprom_len = tg3_get_eeprom_len,
8559 .get_eeprom = tg3_get_eeprom,
8560 .set_eeprom = tg3_set_eeprom,
8561 .get_ringparam = tg3_get_ringparam,
8562 .set_ringparam = tg3_set_ringparam,
8563 .get_pauseparam = tg3_get_pauseparam,
8564 .set_pauseparam = tg3_set_pauseparam,
8565 .get_rx_csum = tg3_get_rx_csum,
8566 .set_rx_csum = tg3_set_rx_csum,
8567 .get_tx_csum = ethtool_op_get_tx_csum,
8568 .set_tx_csum = tg3_set_tx_csum,
8569 .get_sg = ethtool_op_get_sg,
8570 .set_sg = ethtool_op_set_sg,
8571 #if TG3_TSO_SUPPORT != 0
8572 .get_tso = ethtool_op_get_tso,
8573 .set_tso = tg3_set_tso,
8575 .self_test_count = tg3_get_test_count,
8576 .self_test = tg3_self_test,
8577 .get_strings = tg3_get_strings,
8578 .phys_id = tg3_phys_id,
8579 .get_stats_count = tg3_get_stats_count,
8580 .get_ethtool_stats = tg3_get_ethtool_stats,
8581 .get_coalesce = tg3_get_coalesce,
8582 .set_coalesce = tg3_set_coalesce,
8583 .get_perm_addr = ethtool_op_get_perm_addr,
8586 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8588 u32 cursize, val, magic;
8590 tp->nvram_size = EEPROM_CHIP_SIZE;
8592 if (tg3_nvram_read(tp, 0, &val) != 0)
8595 magic = swab32(val);
8596 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8600 * Size the chip by reading offsets at increasing powers of two.
8601 * When we encounter our validation signature, we know the addressing
8602 * has wrapped around, and thus have our chip size.
8606 while (cursize < tp->nvram_size) {
8607 if (tg3_nvram_read(tp, cursize, &val) != 0)
8610 if (swab32(val) == magic)
8616 tp->nvram_size = cursize;
8619 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8623 if (tg3_nvram_read(tp, 0, &val) != 0)
8626 /* Selfboot format */
8627 if (swab32(val) != TG3_EEPROM_MAGIC) {
8628 tg3_get_eeprom_size(tp);
8632 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8634 tp->nvram_size = (val >> 16) * 1024;
8638 tp->nvram_size = 0x20000;
8641 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8645 nvcfg1 = tr32(NVRAM_CFG1);
8646 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8647 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8650 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8651 tw32(NVRAM_CFG1, nvcfg1);
8654 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8655 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8656 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8657 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8658 tp->nvram_jedecnum = JEDEC_ATMEL;
8659 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8660 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8662 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8663 tp->nvram_jedecnum = JEDEC_ATMEL;
8664 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8666 case FLASH_VENDOR_ATMEL_EEPROM:
8667 tp->nvram_jedecnum = JEDEC_ATMEL;
8668 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8669 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8671 case FLASH_VENDOR_ST:
8672 tp->nvram_jedecnum = JEDEC_ST;
8673 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8674 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8676 case FLASH_VENDOR_SAIFUN:
8677 tp->nvram_jedecnum = JEDEC_SAIFUN;
8678 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8680 case FLASH_VENDOR_SST_SMALL:
8681 case FLASH_VENDOR_SST_LARGE:
8682 tp->nvram_jedecnum = JEDEC_SST;
8683 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8688 tp->nvram_jedecnum = JEDEC_ATMEL;
8689 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8694 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8698 nvcfg1 = tr32(NVRAM_CFG1);
8700 /* NVRAM protection for TPM */
8701 if (nvcfg1 & (1 << 27))
8702 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8705 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8706 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8707 tp->nvram_jedecnum = JEDEC_ATMEL;
8708 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8710 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8711 tp->nvram_jedecnum = JEDEC_ATMEL;
8712 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8713 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8715 case FLASH_5752VENDOR_ST_M45PE10:
8716 case FLASH_5752VENDOR_ST_M45PE20:
8717 case FLASH_5752VENDOR_ST_M45PE40:
8718 tp->nvram_jedecnum = JEDEC_ST;
8719 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8720 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8724 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8725 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8726 case FLASH_5752PAGE_SIZE_256:
8727 tp->nvram_pagesize = 256;
8729 case FLASH_5752PAGE_SIZE_512:
8730 tp->nvram_pagesize = 512;
8732 case FLASH_5752PAGE_SIZE_1K:
8733 tp->nvram_pagesize = 1024;
8735 case FLASH_5752PAGE_SIZE_2K:
8736 tp->nvram_pagesize = 2048;
8738 case FLASH_5752PAGE_SIZE_4K:
8739 tp->nvram_pagesize = 4096;
8741 case FLASH_5752PAGE_SIZE_264:
8742 tp->nvram_pagesize = 264;
8747 /* For eeprom, set pagesize to maximum eeprom size */
8748 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8750 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8751 tw32(NVRAM_CFG1, nvcfg1);
8755 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8759 nvcfg1 = tr32(NVRAM_CFG1);
8761 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8762 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8763 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8764 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8765 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8766 tp->nvram_jedecnum = JEDEC_ATMEL;
8767 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8768 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8770 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8771 tw32(NVRAM_CFG1, nvcfg1);
8773 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8774 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8775 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8776 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8777 tp->nvram_jedecnum = JEDEC_ATMEL;
8778 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8779 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8780 tp->nvram_pagesize = 264;
8782 case FLASH_5752VENDOR_ST_M45PE10:
8783 case FLASH_5752VENDOR_ST_M45PE20:
8784 case FLASH_5752VENDOR_ST_M45PE40:
8785 tp->nvram_jedecnum = JEDEC_ST;
8786 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8787 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8788 tp->nvram_pagesize = 256;
8793 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8794 static void __devinit tg3_nvram_init(struct tg3 *tp)
8798 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8801 tw32_f(GRC_EEPROM_ADDR,
8802 (EEPROM_ADDR_FSM_RESET |
8803 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8804 EEPROM_ADDR_CLKPERD_SHIFT)));
8806 /* XXX schedule_timeout() ... */
8807 for (j = 0; j < 100; j++)
8810 /* Enable seeprom accesses. */
8811 tw32_f(GRC_LOCAL_CTRL,
8812 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8815 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8816 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8817 tp->tg3_flags |= TG3_FLAG_NVRAM;
8819 if (tg3_nvram_lock(tp)) {
8820 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8821 "tg3_nvram_init failed.\n", tp->dev->name);
8824 tg3_enable_nvram_access(tp);
8826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8827 tg3_get_5752_nvram_info(tp);
8828 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8829 tg3_get_5787_nvram_info(tp);
8831 tg3_get_nvram_info(tp);
8833 tg3_get_nvram_size(tp);
8835 tg3_disable_nvram_access(tp);
8836 tg3_nvram_unlock(tp);
8839 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8841 tg3_get_eeprom_size(tp);
8845 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8846 u32 offset, u32 *val)
8851 if (offset > EEPROM_ADDR_ADDR_MASK ||
8855 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8856 EEPROM_ADDR_DEVID_MASK |
8858 tw32(GRC_EEPROM_ADDR,
8860 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8861 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8862 EEPROM_ADDR_ADDR_MASK) |
8863 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8865 for (i = 0; i < 10000; i++) {
8866 tmp = tr32(GRC_EEPROM_ADDR);
8868 if (tmp & EEPROM_ADDR_COMPLETE)
8872 if (!(tmp & EEPROM_ADDR_COMPLETE))
8875 *val = tr32(GRC_EEPROM_DATA);
8879 #define NVRAM_CMD_TIMEOUT 10000
8881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8885 tw32(NVRAM_CMD, nvram_cmd);
8886 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8888 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8893 if (i == NVRAM_CMD_TIMEOUT) {
8899 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8903 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8904 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8908 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8909 return tg3_nvram_read_using_eeprom(tp, offset, val);
8911 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8912 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8913 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8915 offset = ((offset / tp->nvram_pagesize) <<
8916 ATMEL_AT45DB0X1B_PAGE_POS) +
8917 (offset % tp->nvram_pagesize);
8920 if (offset > NVRAM_ADDR_MSK)
8923 ret = tg3_nvram_lock(tp);
8927 tg3_enable_nvram_access(tp);
8929 tw32(NVRAM_ADDR, offset);
8930 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8931 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8934 *val = swab32(tr32(NVRAM_RDDATA));
8936 tg3_disable_nvram_access(tp);
8938 tg3_nvram_unlock(tp);
8943 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8944 u32 offset, u32 len, u8 *buf)
8949 for (i = 0; i < len; i += 4) {
8954 memcpy(&data, buf + i, 4);
8956 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8958 val = tr32(GRC_EEPROM_ADDR);
8959 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8961 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8963 tw32(GRC_EEPROM_ADDR, val |
8964 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8965 (addr & EEPROM_ADDR_ADDR_MASK) |
8969 for (j = 0; j < 10000; j++) {
8970 val = tr32(GRC_EEPROM_ADDR);
8972 if (val & EEPROM_ADDR_COMPLETE)
8976 if (!(val & EEPROM_ADDR_COMPLETE)) {
8985 /* offset and length are dword aligned */
8986 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8990 u32 pagesize = tp->nvram_pagesize;
8991 u32 pagemask = pagesize - 1;
8995 tmp = kmalloc(pagesize, GFP_KERNEL);
9001 u32 phy_addr, page_off, size;
9003 phy_addr = offset & ~pagemask;
9005 for (j = 0; j < pagesize; j += 4) {
9006 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9007 (u32 *) (tmp + j))))
9013 page_off = offset & pagemask;
9020 memcpy(tmp + page_off, buf, size);
9022 offset = offset + (pagesize - page_off);
9024 tg3_enable_nvram_access(tp);
9027 * Before we can erase the flash page, we need
9028 * to issue a special "write enable" command.
9030 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9032 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9035 /* Erase the target page */
9036 tw32(NVRAM_ADDR, phy_addr);
9038 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9039 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9041 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9044 /* Issue another write enable to start the write. */
9045 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9047 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9050 for (j = 0; j < pagesize; j += 4) {
9053 data = *((u32 *) (tmp + j));
9054 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9056 tw32(NVRAM_ADDR, phy_addr + j);
9058 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9062 nvram_cmd |= NVRAM_CMD_FIRST;
9063 else if (j == (pagesize - 4))
9064 nvram_cmd |= NVRAM_CMD_LAST;
9066 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9073 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9074 tg3_nvram_exec_cmd(tp, nvram_cmd);
9081 /* offset and length are dword aligned */
9082 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9087 for (i = 0; i < len; i += 4, offset += 4) {
9088 u32 data, page_off, phy_addr, nvram_cmd;
9090 memcpy(&data, buf + i, 4);
9091 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9093 page_off = offset % tp->nvram_pagesize;
9095 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9096 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
9098 phy_addr = ((offset / tp->nvram_pagesize) <<
9099 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
9105 tw32(NVRAM_ADDR, phy_addr);
9107 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9109 if ((page_off == 0) || (i == 0))
9110 nvram_cmd |= NVRAM_CMD_FIRST;
9111 else if (page_off == (tp->nvram_pagesize - 4))
9112 nvram_cmd |= NVRAM_CMD_LAST;
9115 nvram_cmd |= NVRAM_CMD_LAST;
9117 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9118 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9119 (tp->nvram_jedecnum == JEDEC_ST) &&
9120 (nvram_cmd & NVRAM_CMD_FIRST)) {
9122 if ((ret = tg3_nvram_exec_cmd(tp,
9123 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9128 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9129 /* We always do complete word writes to eeprom. */
9130 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9133 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9139 /* offset and length are dword aligned */
9140 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9144 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9145 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9149 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9150 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9151 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9155 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9156 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9161 ret = tg3_nvram_lock(tp);
9165 tg3_enable_nvram_access(tp);
9166 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9167 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9168 tw32(NVRAM_WRITE1, 0x406);
9170 grc_mode = tr32(GRC_MODE);
9171 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9173 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9174 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9176 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9180 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9184 grc_mode = tr32(GRC_MODE);
9185 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9187 tg3_disable_nvram_access(tp);
9188 tg3_nvram_unlock(tp);
9191 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9192 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9199 struct subsys_tbl_ent {
9200 u16 subsys_vendor, subsys_devid;
9204 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9205 /* Broadcom boards. */
9206 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9207 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9208 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9209 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9210 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9211 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9212 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9213 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9214 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9215 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9216 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9219 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9220 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9221 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9222 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9223 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9226 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9227 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9228 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9229 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9231 /* Compaq boards. */
9232 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9233 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9234 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9235 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9236 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9239 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9242 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9246 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9247 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9248 tp->pdev->subsystem_vendor) &&
9249 (subsys_id_to_phy_id[i].subsys_devid ==
9250 tp->pdev->subsystem_device))
9251 return &subsys_id_to_phy_id[i];
9256 /* Since this function may be called in D3-hot power state during
9257 * tg3_init_one(), only config cycles are allowed.
9259 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9263 /* Make sure register accesses (indirect or otherwise)
9264 * will function correctly.
9266 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9267 tp->misc_host_ctrl);
9269 tp->phy_id = PHY_ID_INVALID;
9270 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9272 /* Do not even try poking around in here on Sun parts. */
9273 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9276 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9277 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9278 u32 nic_cfg, led_cfg;
9279 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9280 int eeprom_phy_serdes = 0;
9282 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9283 tp->nic_sram_data_cfg = nic_cfg;
9285 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9286 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9287 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9288 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9289 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9290 (ver > 0) && (ver < 0x100))
9291 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9293 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9294 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9295 eeprom_phy_serdes = 1;
9297 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9298 if (nic_phy_id != 0) {
9299 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9300 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9302 eeprom_phy_id = (id1 >> 16) << 10;
9303 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9304 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9308 tp->phy_id = eeprom_phy_id;
9309 if (eeprom_phy_serdes) {
9310 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9311 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9313 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9316 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9317 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9318 SHASTA_EXT_LED_MODE_MASK);
9320 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9324 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9325 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9328 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9329 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9332 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9333 tp->led_ctrl = LED_CTRL_MODE_MAC;
9335 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9336 * read on some older 5700/5701 bootcode.
9338 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9340 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9342 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9346 case SHASTA_EXT_LED_SHARED:
9347 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9348 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9349 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9350 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9351 LED_CTRL_MODE_PHY_2);
9354 case SHASTA_EXT_LED_MAC:
9355 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9358 case SHASTA_EXT_LED_COMBO:
9359 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9360 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9361 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9362 LED_CTRL_MODE_PHY_2);
9367 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9369 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9370 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9372 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9373 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9374 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9375 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9377 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9378 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9379 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9380 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9382 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9383 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9385 if (cfg2 & (1 << 17))
9386 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9388 /* serdes signal pre-emphasis in register 0x590 set by */
9389 /* bootcode if bit 18 is set */
9390 if (cfg2 & (1 << 18))
9391 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9395 static int __devinit tg3_phy_probe(struct tg3 *tp)
9397 u32 hw_phy_id_1, hw_phy_id_2;
9398 u32 hw_phy_id, hw_phy_id_masked;
9401 /* Reading the PHY ID register can conflict with ASF
9402 * firwmare access to the PHY hardware.
9405 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9406 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9408 /* Now read the physical PHY_ID from the chip and verify
9409 * that it is sane. If it doesn't look good, we fall back
9410 * to either the hard-coded table based PHY_ID and failing
9411 * that the value found in the eeprom area.
9413 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9414 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9416 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9417 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9418 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9420 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9423 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9424 tp->phy_id = hw_phy_id;
9425 if (hw_phy_id_masked == PHY_ID_BCM8002)
9426 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9428 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9430 if (tp->phy_id != PHY_ID_INVALID) {
9431 /* Do nothing, phy ID already set up in
9432 * tg3_get_eeprom_hw_cfg().
9435 struct subsys_tbl_ent *p;
9437 /* No eeprom signature? Try the hardcoded
9438 * subsys device table.
9440 p = lookup_by_subsys(tp);
9444 tp->phy_id = p->phy_id;
9446 tp->phy_id == PHY_ID_BCM8002)
9447 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9451 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9452 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9453 u32 bmsr, adv_reg, tg3_ctrl;
9455 tg3_readphy(tp, MII_BMSR, &bmsr);
9456 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9457 (bmsr & BMSR_LSTATUS))
9458 goto skip_phy_reset;
9460 err = tg3_phy_reset(tp);
9464 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9465 ADVERTISE_100HALF | ADVERTISE_100FULL |
9466 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9468 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9469 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9470 MII_TG3_CTRL_ADV_1000_FULL);
9471 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9472 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9473 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9474 MII_TG3_CTRL_ENABLE_AS_MASTER);
9477 if (!tg3_copper_is_advertising_all(tp)) {
9478 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9480 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9481 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9483 tg3_writephy(tp, MII_BMCR,
9484 BMCR_ANENABLE | BMCR_ANRESTART);
9486 tg3_phy_set_wirespeed(tp);
9488 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9489 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9490 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9494 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9495 err = tg3_init_5401phy_dsp(tp);
9500 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9501 err = tg3_init_5401phy_dsp(tp);
9504 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9505 tp->link_config.advertising =
9506 (ADVERTISED_1000baseT_Half |
9507 ADVERTISED_1000baseT_Full |
9508 ADVERTISED_Autoneg |
9510 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9511 tp->link_config.advertising &=
9512 ~(ADVERTISED_1000baseT_Half |
9513 ADVERTISED_1000baseT_Full);
9518 static void __devinit tg3_read_partno(struct tg3 *tp)
9520 unsigned char vpd_data[256];
9524 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9525 /* Sun decided not to put the necessary bits in the
9526 * NVRAM of their onboard tg3 parts :(
9528 strcpy(tp->board_part_number, "Sun 570X");
9532 if (tg3_nvram_read(tp, 0x0, &magic))
9535 if (swab32(magic) == TG3_EEPROM_MAGIC) {
9536 for (i = 0; i < 256; i += 4) {
9539 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9542 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9543 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9544 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9545 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9550 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9551 for (i = 0; i < 256; i += 4) {
9555 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9558 pci_read_config_word(tp->pdev, vpd_cap +
9559 PCI_VPD_ADDR, &tmp16);
9564 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9566 tmp = cpu_to_le32(tmp);
9567 memcpy(&vpd_data[i], &tmp, 4);
9571 /* Now parse and find the part number. */
9572 for (i = 0; i < 256; ) {
9573 unsigned char val = vpd_data[i];
9576 if (val == 0x82 || val == 0x91) {
9579 (vpd_data[i + 2] << 8)));
9586 block_end = (i + 3 +
9588 (vpd_data[i + 2] << 8)));
9590 while (i < block_end) {
9591 if (vpd_data[i + 0] == 'P' &&
9592 vpd_data[i + 1] == 'N') {
9593 int partno_len = vpd_data[i + 2];
9595 if (partno_len > 24)
9598 memcpy(tp->board_part_number,
9607 /* Part number not found. */
9612 strcpy(tp->board_part_number, "none");
9615 #ifdef CONFIG_SPARC64
9616 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9618 struct pci_dev *pdev = tp->pdev;
9619 struct pcidev_cookie *pcp = pdev->sysdata;
9622 int node = pcp->prom_node;
9626 err = prom_getproperty(node, "subsystem-vendor-id",
9627 (char *) &venid, sizeof(venid));
9628 if (err == 0 || err == -1)
9630 if (venid == PCI_VENDOR_ID_SUN)
9633 /* TG3 chips onboard the SunBlade-2500 don't have the
9634 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9635 * are distinguishable from non-Sun variants by being
9636 * named "network" by the firmware. Non-Sun cards will
9637 * show up as being named "ethernet".
9639 if (!strcmp(pcp->prom_name, "network"))
9646 static int __devinit tg3_get_invariants(struct tg3 *tp)
9648 static struct pci_device_id write_reorder_chipsets[] = {
9649 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9650 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9651 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9652 PCI_DEVICE_ID_VIA_8385_0) },
9656 u32 cacheline_sz_reg;
9657 u32 pci_state_reg, grc_misc_cfg;
9662 #ifdef CONFIG_SPARC64
9663 if (tg3_is_sun_570X(tp))
9664 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9667 /* Force memory write invalidate off. If we leave it on,
9668 * then on 5700_BX chips we have to enable a workaround.
9669 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9670 * to match the cacheline size. The Broadcom driver have this
9671 * workaround but turns MWI off all the times so never uses
9672 * it. This seems to suggest that the workaround is insufficient.
9674 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9675 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9676 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9678 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9679 * has the register indirect write enable bit set before
9680 * we try to access any of the MMIO registers. It is also
9681 * critical that the PCI-X hw workaround situation is decided
9682 * before that as well.
9684 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9687 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9688 MISC_HOST_CTRL_CHIPREV_SHIFT);
9690 /* Wrong chip ID in 5752 A0. This code can be removed later
9691 * as A0 is not in production.
9693 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9694 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9696 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9697 * we need to disable memory and use config. cycles
9698 * only to access all registers. The 5702/03 chips
9699 * can mistakenly decode the special cycles from the
9700 * ICH chipsets as memory write cycles, causing corruption
9701 * of register and memory space. Only certain ICH bridges
9702 * will drive special cycles with non-zero data during the
9703 * address phase which can fall within the 5703's address
9704 * range. This is not an ICH bug as the PCI spec allows
9705 * non-zero address during special cycles. However, only
9706 * these ICH bridges are known to drive non-zero addresses
9707 * during special cycles.
9709 * Since special cycles do not cross PCI bridges, we only
9710 * enable this workaround if the 5703 is on the secondary
9711 * bus of these ICH bridges.
9713 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9714 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9715 static struct tg3_dev_id {
9719 } ich_chipsets[] = {
9720 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9722 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9724 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9726 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9730 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9731 struct pci_dev *bridge = NULL;
9733 while (pci_id->vendor != 0) {
9734 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9740 if (pci_id->rev != PCI_ANY_ID) {
9743 pci_read_config_byte(bridge, PCI_REVISION_ID,
9745 if (rev > pci_id->rev)
9748 if (bridge->subordinate &&
9749 (bridge->subordinate->number ==
9750 tp->pdev->bus->number)) {
9752 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9753 pci_dev_put(bridge);
9759 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9760 * DMA addresses > 40-bit. This bridge may have other additional
9761 * 57xx devices behind it in some 4-port NIC designs for example.
9762 * Any tg3 device found behind the bridge will also need the 40-bit
9765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9767 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9768 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9769 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9772 struct pci_dev *bridge = NULL;
9775 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9776 PCI_DEVICE_ID_SERVERWORKS_EPB,
9778 if (bridge && bridge->subordinate &&
9779 (bridge->subordinate->number <=
9780 tp->pdev->bus->number) &&
9781 (bridge->subordinate->subordinate >=
9782 tp->pdev->bus->number)) {
9783 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9784 pci_dev_put(bridge);
9790 /* Initialize misc host control in PCI block. */
9791 tp->misc_host_ctrl |= (misc_ctrl_reg &
9792 MISC_HOST_CTRL_CHIPREV);
9793 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9794 tp->misc_host_ctrl);
9796 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9799 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9800 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9801 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9802 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9807 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9808 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9810 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9811 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9812 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9814 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9815 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9817 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9818 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9819 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
9820 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
9821 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9823 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9824 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9826 /* If we have an AMD 762 or VIA K8T800 chipset, write
9827 * reordering to the mailbox registers done by the host
9828 * controller can cause major troubles. We read back from
9829 * every mailbox register write to force the writes to be
9830 * posted to the chip in order.
9832 if (pci_dev_present(write_reorder_chipsets) &&
9833 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9834 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9837 tp->pci_lat_timer < 64) {
9838 tp->pci_lat_timer = 64;
9840 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9841 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9842 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9843 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9845 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9849 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9852 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9853 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9855 /* If this is a 5700 BX chipset, and we are in PCI-X
9856 * mode, enable register write workaround.
9858 * The workaround is to use indirect register accesses
9859 * for all chip writes not to mailbox registers.
9861 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9865 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9867 /* The chip can have it's power management PCI config
9868 * space registers clobbered due to this bug.
9869 * So explicitly force the chip into D0 here.
9871 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9873 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9874 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9875 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9878 /* Also, force SERR#/PERR# in PCI command. */
9879 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9880 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9881 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9885 /* 5700 BX chips need to have their TX producer index mailboxes
9886 * written twice to workaround a bug.
9888 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9889 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9891 /* Back to back register writes can cause problems on this chip,
9892 * the workaround is to read back all reg writes except those to
9893 * mailbox regs. See tg3_write_indirect_reg32().
9895 * PCI Express 5750_A0 rev chips need this workaround too.
9897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9898 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9899 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9900 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9902 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9903 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9904 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9905 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9907 /* Chip-specific fixup from Broadcom driver */
9908 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9909 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9910 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9911 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9914 /* Default fast path register access methods */
9915 tp->read32 = tg3_read32;
9916 tp->write32 = tg3_write32;
9917 tp->read32_mbox = tg3_read32;
9918 tp->write32_mbox = tg3_write32;
9919 tp->write32_tx_mbox = tg3_write32;
9920 tp->write32_rx_mbox = tg3_write32;
9922 /* Various workaround register access methods */
9923 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9924 tp->write32 = tg3_write_indirect_reg32;
9925 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9926 tp->write32 = tg3_write_flush_reg32;
9928 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9929 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9930 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9931 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9932 tp->write32_rx_mbox = tg3_write_flush_reg32;
9935 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9936 tp->read32 = tg3_read_indirect_reg32;
9937 tp->write32 = tg3_write_indirect_reg32;
9938 tp->read32_mbox = tg3_read_indirect_mbox;
9939 tp->write32_mbox = tg3_write_indirect_mbox;
9940 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9941 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9946 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9947 pci_cmd &= ~PCI_COMMAND_MEMORY;
9948 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9951 /* Get eeprom hw config before calling tg3_set_power_state().
9952 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9953 * determined before calling tg3_set_power_state() so that
9954 * we know whether or not to switch out of Vaux power.
9955 * When the flag is set, it means that GPIO1 is used for eeprom
9956 * write protect and also implies that it is a LOM where GPIOs
9957 * are not used to switch power.
9959 tg3_get_eeprom_hw_cfg(tp);
9961 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9962 * GPIO1 driven high will bring 5700's external PHY out of reset.
9963 * It is also used as eeprom write protect on LOMs.
9965 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9966 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9967 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9968 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9969 GRC_LCLCTRL_GPIO_OUTPUT1);
9970 /* Unused GPIO3 must be driven as output on 5752 because there
9971 * are no pull-up resistors on unused GPIO pins.
9973 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9974 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9976 /* Force the chip into D0. */
9977 err = tg3_set_power_state(tp, PCI_D0);
9979 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9980 pci_name(tp->pdev));
9984 /* 5700 B0 chips do not support checksumming correctly due
9987 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9988 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9990 /* Pseudo-header checksum is done by hardware logic and not
9991 * the offload processers, so make the chip do the pseudo-
9992 * header checksums on receive. For transmit it is more
9993 * convenient to do the pseudo-header checksum in software
9994 * as Linux does that on transmit for us in all cases.
9996 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9997 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9999 /* Derive initial jumbo mode from MTU assigned in
10000 * ether_setup() via the alloc_etherdev() call
10002 if (tp->dev->mtu > ETH_DATA_LEN &&
10003 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10004 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10006 /* Determine WakeOnLan speed to use. */
10007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10008 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10009 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10010 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10011 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10013 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10016 /* A few boards don't want Ethernet@WireSpeed phy feature */
10017 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10018 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10019 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10020 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10021 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10022 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10024 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10025 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10026 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10027 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10028 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10030 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10031 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10032 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10034 tp->coalesce_mode = 0;
10035 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10036 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10037 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10039 /* Initialize MAC MI mode, polling disabled. */
10040 tw32_f(MAC_MI_MODE, tp->mi_mode);
10043 /* Initialize data/descriptor byte/word swapping. */
10044 val = tr32(GRC_MODE);
10045 val &= GRC_MODE_HOST_STACKUP;
10046 tw32(GRC_MODE, val | tp->grc_mode);
10048 tg3_switch_clocks(tp);
10050 /* Clear this out for sanity. */
10051 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10053 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10055 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10056 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10057 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10059 if (chiprevid == CHIPREV_ID_5701_A0 ||
10060 chiprevid == CHIPREV_ID_5701_B0 ||
10061 chiprevid == CHIPREV_ID_5701_B2 ||
10062 chiprevid == CHIPREV_ID_5701_B5) {
10063 void __iomem *sram_base;
10065 /* Write some dummy words into the SRAM status block
10066 * area, see if it reads back correctly. If the return
10067 * value is bad, force enable the PCIX workaround.
10069 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10071 writel(0x00000000, sram_base);
10072 writel(0x00000000, sram_base + 4);
10073 writel(0xffffffff, sram_base + 4);
10074 if (readl(sram_base) != 0x00000000)
10075 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10080 tg3_nvram_init(tp);
10082 grc_misc_cfg = tr32(GRC_MISC_CFG);
10083 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10085 /* Broadcom's driver says that CIOBE multisplit has a bug */
10087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10088 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10089 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10090 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10094 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10095 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10096 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10098 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10099 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10100 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10101 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10102 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10103 HOSTCC_MODE_CLRTICK_TXBD);
10105 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10106 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10107 tp->misc_host_ctrl);
10110 /* these are limited to 10/100 only */
10111 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10112 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10113 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10114 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10115 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10116 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10117 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10118 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10119 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10120 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10121 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10123 err = tg3_phy_probe(tp);
10125 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10126 pci_name(tp->pdev), err);
10127 /* ... but do not return immediately ... */
10130 tg3_read_partno(tp);
10132 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10133 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10136 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10138 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10141 /* 5700 {AX,BX} chips have a broken status block link
10142 * change bit implementation, so we must use the
10143 * status register in those cases.
10145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10146 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10148 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10150 /* The led_ctrl is set during tg3_phy_probe, here we might
10151 * have to force the link status polling mechanism based
10152 * upon subsystem IDs.
10154 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10155 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10156 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10157 TG3_FLAG_USE_LINKCHG_REG);
10160 /* For all SERDES we poll the MAC status register. */
10161 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10162 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10164 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10166 /* It seems all chips can get confused if TX buffers
10167 * straddle the 4GB address boundary in some cases.
10169 tp->dev->hard_start_xmit = tg3_start_xmit;
10172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10173 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10176 /* By default, disable wake-on-lan. User can change this
10177 * using ETHTOOL_SWOL.
10179 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10184 #ifdef CONFIG_SPARC64
10185 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10187 struct net_device *dev = tp->dev;
10188 struct pci_dev *pdev = tp->pdev;
10189 struct pcidev_cookie *pcp = pdev->sysdata;
10192 int node = pcp->prom_node;
10194 if (prom_getproplen(node, "local-mac-address") == 6) {
10195 prom_getproperty(node, "local-mac-address",
10197 memcpy(dev->perm_addr, dev->dev_addr, 6);
10204 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10206 struct net_device *dev = tp->dev;
10208 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10209 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10214 static int __devinit tg3_get_device_address(struct tg3 *tp)
10216 struct net_device *dev = tp->dev;
10217 u32 hi, lo, mac_offset;
10219 #ifdef CONFIG_SPARC64
10220 if (!tg3_get_macaddr_sparc(tp))
10225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10226 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10227 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10228 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10230 if (tg3_nvram_lock(tp))
10231 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10233 tg3_nvram_unlock(tp);
10236 /* First try to get it from MAC address mailbox. */
10237 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10238 if ((hi >> 16) == 0x484b) {
10239 dev->dev_addr[0] = (hi >> 8) & 0xff;
10240 dev->dev_addr[1] = (hi >> 0) & 0xff;
10242 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10243 dev->dev_addr[2] = (lo >> 24) & 0xff;
10244 dev->dev_addr[3] = (lo >> 16) & 0xff;
10245 dev->dev_addr[4] = (lo >> 8) & 0xff;
10246 dev->dev_addr[5] = (lo >> 0) & 0xff;
10248 /* Next, try NVRAM. */
10249 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10250 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10251 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10252 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10253 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10254 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10255 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10256 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10257 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10259 /* Finally just fetch it out of the MAC control regs. */
10261 hi = tr32(MAC_ADDR_0_HIGH);
10262 lo = tr32(MAC_ADDR_0_LOW);
10264 dev->dev_addr[5] = lo & 0xff;
10265 dev->dev_addr[4] = (lo >> 8) & 0xff;
10266 dev->dev_addr[3] = (lo >> 16) & 0xff;
10267 dev->dev_addr[2] = (lo >> 24) & 0xff;
10268 dev->dev_addr[1] = hi & 0xff;
10269 dev->dev_addr[0] = (hi >> 8) & 0xff;
10272 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10273 #ifdef CONFIG_SPARC64
10274 if (!tg3_get_default_macaddr_sparc(tp))
10279 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10283 #define BOUNDARY_SINGLE_CACHELINE 1
10284 #define BOUNDARY_MULTI_CACHELINE 2
10286 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10288 int cacheline_size;
10292 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10294 cacheline_size = 1024;
10296 cacheline_size = (int) byte * 4;
10298 /* On 5703 and later chips, the boundary bits have no
10301 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10302 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10303 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10306 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10307 goal = BOUNDARY_MULTI_CACHELINE;
10309 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10310 goal = BOUNDARY_SINGLE_CACHELINE;
10319 /* PCI controllers on most RISC systems tend to disconnect
10320 * when a device tries to burst across a cache-line boundary.
10321 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10323 * Unfortunately, for PCI-E there are only limited
10324 * write-side controls for this, and thus for reads
10325 * we will still get the disconnects. We'll also waste
10326 * these PCI cycles for both read and write for chips
10327 * other than 5700 and 5701 which do not implement the
10330 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10331 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10332 switch (cacheline_size) {
10337 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10338 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10339 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10341 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10342 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10347 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10348 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10352 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10353 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10356 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10357 switch (cacheline_size) {
10361 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10362 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10363 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10369 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10370 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10374 switch (cacheline_size) {
10376 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10377 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10378 DMA_RWCTRL_WRITE_BNDRY_16);
10383 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10384 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10385 DMA_RWCTRL_WRITE_BNDRY_32);
10390 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10391 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10392 DMA_RWCTRL_WRITE_BNDRY_64);
10397 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10398 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10399 DMA_RWCTRL_WRITE_BNDRY_128);
10404 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10405 DMA_RWCTRL_WRITE_BNDRY_256);
10408 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10409 DMA_RWCTRL_WRITE_BNDRY_512);
10413 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10414 DMA_RWCTRL_WRITE_BNDRY_1024);
10423 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10425 struct tg3_internal_buffer_desc test_desc;
10426 u32 sram_dma_descs;
10429 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10431 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10432 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10433 tw32(RDMAC_STATUS, 0);
10434 tw32(WDMAC_STATUS, 0);
10436 tw32(BUFMGR_MODE, 0);
10437 tw32(FTQ_RESET, 0);
10439 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10440 test_desc.addr_lo = buf_dma & 0xffffffff;
10441 test_desc.nic_mbuf = 0x00002100;
10442 test_desc.len = size;
10445 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10446 * the *second* time the tg3 driver was getting loaded after an
10449 * Broadcom tells me:
10450 * ...the DMA engine is connected to the GRC block and a DMA
10451 * reset may affect the GRC block in some unpredictable way...
10452 * The behavior of resets to individual blocks has not been tested.
10454 * Broadcom noted the GRC reset will also reset all sub-components.
10457 test_desc.cqid_sqid = (13 << 8) | 2;
10459 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10462 test_desc.cqid_sqid = (16 << 8) | 7;
10464 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10467 test_desc.flags = 0x00000005;
10469 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10472 val = *(((u32 *)&test_desc) + i);
10473 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10474 sram_dma_descs + (i * sizeof(u32)));
10475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10477 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10480 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10482 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10486 for (i = 0; i < 40; i++) {
10490 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10492 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10493 if ((val & 0xffff) == sram_dma_descs) {
10504 #define TEST_BUFFER_SIZE 0x2000
10506 static int __devinit tg3_test_dma(struct tg3 *tp)
10508 dma_addr_t buf_dma;
10509 u32 *buf, saved_dma_rwctrl;
10512 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10518 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10519 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10521 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10523 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10524 /* DMA read watermark not used on PCIE */
10525 tp->dma_rwctrl |= 0x00180000;
10526 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10529 tp->dma_rwctrl |= 0x003f0000;
10531 tp->dma_rwctrl |= 0x003f000f;
10533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10535 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10537 /* If the 5704 is behind the EPB bridge, we can
10538 * do the less restrictive ONE_DMA workaround for
10539 * better performance.
10541 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10543 tp->dma_rwctrl |= 0x8000;
10544 else if (ccval == 0x6 || ccval == 0x7)
10545 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10547 /* Set bit 23 to enable PCIX hw bug fix */
10548 tp->dma_rwctrl |= 0x009f0000;
10549 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10550 /* 5780 always in PCIX mode */
10551 tp->dma_rwctrl |= 0x00144000;
10552 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10553 /* 5714 always in PCIX mode */
10554 tp->dma_rwctrl |= 0x00148000;
10556 tp->dma_rwctrl |= 0x001b000f;
10560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10562 tp->dma_rwctrl &= 0xfffffff0;
10564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10566 /* Remove this if it causes problems for some boards. */
10567 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10569 /* On 5700/5701 chips, we need to set this bit.
10570 * Otherwise the chip will issue cacheline transactions
10571 * to streamable DMA memory with not all the byte
10572 * enables turned on. This is an error on several
10573 * RISC PCI controllers, in particular sparc64.
10575 * On 5703/5704 chips, this bit has been reassigned
10576 * a different meaning. In particular, it is used
10577 * on those chips to enable a PCI-X workaround.
10579 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10582 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10585 /* Unneeded, already done by tg3_get_invariants. */
10586 tg3_switch_clocks(tp);
10590 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10591 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10594 /* It is best to perform DMA test with maximum write burst size
10595 * to expose the 5700/5701 write DMA bug.
10597 saved_dma_rwctrl = tp->dma_rwctrl;
10598 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10599 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10604 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10607 /* Send the buffer to the chip. */
10608 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10610 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10615 /* validate data reached card RAM correctly. */
10616 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10618 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10619 if (le32_to_cpu(val) != p[i]) {
10620 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10621 /* ret = -ENODEV here? */
10626 /* Now read it back. */
10627 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10629 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10635 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10639 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10640 DMA_RWCTRL_WRITE_BNDRY_16) {
10641 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10642 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10643 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10646 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10652 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10658 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10659 DMA_RWCTRL_WRITE_BNDRY_16) {
10660 static struct pci_device_id dma_wait_state_chipsets[] = {
10661 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10662 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10666 /* DMA test passed without adjusting DMA boundary,
10667 * now look for chipsets that are known to expose the
10668 * DMA bug without failing the test.
10670 if (pci_dev_present(dma_wait_state_chipsets)) {
10671 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10672 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10675 /* Safe to use the calculated DMA boundary. */
10676 tp->dma_rwctrl = saved_dma_rwctrl;
10678 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10682 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10687 static void __devinit tg3_init_link_config(struct tg3 *tp)
10689 tp->link_config.advertising =
10690 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10691 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10692 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10693 ADVERTISED_Autoneg | ADVERTISED_MII);
10694 tp->link_config.speed = SPEED_INVALID;
10695 tp->link_config.duplex = DUPLEX_INVALID;
10696 tp->link_config.autoneg = AUTONEG_ENABLE;
10697 netif_carrier_off(tp->dev);
10698 tp->link_config.active_speed = SPEED_INVALID;
10699 tp->link_config.active_duplex = DUPLEX_INVALID;
10700 tp->link_config.phy_is_low_power = 0;
10701 tp->link_config.orig_speed = SPEED_INVALID;
10702 tp->link_config.orig_duplex = DUPLEX_INVALID;
10703 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10706 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10708 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10709 tp->bufmgr_config.mbuf_read_dma_low_water =
10710 DEFAULT_MB_RDMA_LOW_WATER_5705;
10711 tp->bufmgr_config.mbuf_mac_rx_low_water =
10712 DEFAULT_MB_MACRX_LOW_WATER_5705;
10713 tp->bufmgr_config.mbuf_high_water =
10714 DEFAULT_MB_HIGH_WATER_5705;
10716 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10717 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10718 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10719 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10720 tp->bufmgr_config.mbuf_high_water_jumbo =
10721 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10723 tp->bufmgr_config.mbuf_read_dma_low_water =
10724 DEFAULT_MB_RDMA_LOW_WATER;
10725 tp->bufmgr_config.mbuf_mac_rx_low_water =
10726 DEFAULT_MB_MACRX_LOW_WATER;
10727 tp->bufmgr_config.mbuf_high_water =
10728 DEFAULT_MB_HIGH_WATER;
10730 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10731 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10732 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10733 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10734 tp->bufmgr_config.mbuf_high_water_jumbo =
10735 DEFAULT_MB_HIGH_WATER_JUMBO;
10738 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10739 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10742 static char * __devinit tg3_phy_string(struct tg3 *tp)
10744 switch (tp->phy_id & PHY_ID_MASK) {
10745 case PHY_ID_BCM5400: return "5400";
10746 case PHY_ID_BCM5401: return "5401";
10747 case PHY_ID_BCM5411: return "5411";
10748 case PHY_ID_BCM5701: return "5701";
10749 case PHY_ID_BCM5703: return "5703";
10750 case PHY_ID_BCM5704: return "5704";
10751 case PHY_ID_BCM5705: return "5705";
10752 case PHY_ID_BCM5750: return "5750";
10753 case PHY_ID_BCM5752: return "5752";
10754 case PHY_ID_BCM5714: return "5714";
10755 case PHY_ID_BCM5780: return "5780";
10756 case PHY_ID_BCM5787: return "5787";
10757 case PHY_ID_BCM8002: return "8002/serdes";
10758 case 0: return "serdes";
10759 default: return "unknown";
10763 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10765 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10766 strcpy(str, "PCI Express");
10768 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10769 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10771 strcpy(str, "PCIX:");
10773 if ((clock_ctrl == 7) ||
10774 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10775 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10776 strcat(str, "133MHz");
10777 else if (clock_ctrl == 0)
10778 strcat(str, "33MHz");
10779 else if (clock_ctrl == 2)
10780 strcat(str, "50MHz");
10781 else if (clock_ctrl == 4)
10782 strcat(str, "66MHz");
10783 else if (clock_ctrl == 6)
10784 strcat(str, "100MHz");
10786 strcpy(str, "PCI:");
10787 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10788 strcat(str, "66MHz");
10790 strcat(str, "33MHz");
10792 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10793 strcat(str, ":32-bit");
10795 strcat(str, ":64-bit");
10799 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10801 struct pci_dev *peer;
10802 unsigned int func, devnr = tp->pdev->devfn & ~7;
10804 for (func = 0; func < 8; func++) {
10805 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10806 if (peer && peer != tp->pdev)
10810 /* 5704 can be configured in single-port mode, set peer to
10811 * tp->pdev in that case.
10819 * We don't need to keep the refcount elevated; there's no way
10820 * to remove one half of this device without removing the other
10827 static void __devinit tg3_init_coal(struct tg3 *tp)
10829 struct ethtool_coalesce *ec = &tp->coal;
10831 memset(ec, 0, sizeof(*ec));
10832 ec->cmd = ETHTOOL_GCOALESCE;
10833 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10834 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10835 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10836 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10837 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10838 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10839 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10840 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10841 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10843 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10844 HOSTCC_MODE_CLRTICK_TXBD)) {
10845 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10846 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10847 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10848 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10851 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10852 ec->rx_coalesce_usecs_irq = 0;
10853 ec->tx_coalesce_usecs_irq = 0;
10854 ec->stats_block_coalesce_usecs = 0;
10858 static int __devinit tg3_init_one(struct pci_dev *pdev,
10859 const struct pci_device_id *ent)
10861 static int tg3_version_printed = 0;
10862 unsigned long tg3reg_base, tg3reg_len;
10863 struct net_device *dev;
10865 int i, err, pm_cap;
10867 u64 dma_mask, persist_dma_mask;
10869 if (tg3_version_printed++ == 0)
10870 printk(KERN_INFO "%s", version);
10872 err = pci_enable_device(pdev);
10874 printk(KERN_ERR PFX "Cannot enable PCI device, "
10879 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10880 printk(KERN_ERR PFX "Cannot find proper PCI device "
10881 "base address, aborting.\n");
10883 goto err_out_disable_pdev;
10886 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10888 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10890 goto err_out_disable_pdev;
10893 pci_set_master(pdev);
10895 /* Find power-management capability. */
10896 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10898 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10901 goto err_out_free_res;
10904 tg3reg_base = pci_resource_start(pdev, 0);
10905 tg3reg_len = pci_resource_len(pdev, 0);
10907 dev = alloc_etherdev(sizeof(*tp));
10909 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10911 goto err_out_free_res;
10914 SET_MODULE_OWNER(dev);
10915 SET_NETDEV_DEV(dev, &pdev->dev);
10917 dev->features |= NETIF_F_LLTX;
10918 #if TG3_VLAN_TAG_USED
10919 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10920 dev->vlan_rx_register = tg3_vlan_rx_register;
10921 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10924 tp = netdev_priv(dev);
10927 tp->pm_cap = pm_cap;
10928 tp->mac_mode = TG3_DEF_MAC_MODE;
10929 tp->rx_mode = TG3_DEF_RX_MODE;
10930 tp->tx_mode = TG3_DEF_TX_MODE;
10931 tp->mi_mode = MAC_MI_MODE_BASE;
10933 tp->msg_enable = tg3_debug;
10935 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10937 /* The word/byte swap controls here control register access byte
10938 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10941 tp->misc_host_ctrl =
10942 MISC_HOST_CTRL_MASK_PCI_INT |
10943 MISC_HOST_CTRL_WORD_SWAP |
10944 MISC_HOST_CTRL_INDIR_ACCESS |
10945 MISC_HOST_CTRL_PCISTATE_RW;
10947 /* The NONFRM (non-frame) byte/word swap controls take effect
10948 * on descriptor entries, anything which isn't packet data.
10950 * The StrongARM chips on the board (one for tx, one for rx)
10951 * are running in big-endian mode.
10953 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10954 GRC_MODE_WSWAP_NONFRM_DATA);
10955 #ifdef __BIG_ENDIAN
10956 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10958 spin_lock_init(&tp->lock);
10959 spin_lock_init(&tp->tx_lock);
10960 spin_lock_init(&tp->indirect_lock);
10961 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10963 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10964 if (tp->regs == 0UL) {
10965 printk(KERN_ERR PFX "Cannot map device registers, "
10968 goto err_out_free_dev;
10971 tg3_init_link_config(tp);
10973 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10974 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10975 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10977 dev->open = tg3_open;
10978 dev->stop = tg3_close;
10979 dev->get_stats = tg3_get_stats;
10980 dev->set_multicast_list = tg3_set_rx_mode;
10981 dev->set_mac_address = tg3_set_mac_addr;
10982 dev->do_ioctl = tg3_ioctl;
10983 dev->tx_timeout = tg3_tx_timeout;
10984 dev->poll = tg3_poll;
10985 dev->ethtool_ops = &tg3_ethtool_ops;
10987 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10988 dev->change_mtu = tg3_change_mtu;
10989 dev->irq = pdev->irq;
10990 #ifdef CONFIG_NET_POLL_CONTROLLER
10991 dev->poll_controller = tg3_poll_controller;
10994 err = tg3_get_invariants(tp);
10996 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10998 goto err_out_iounmap;
11001 /* The EPB bridge inside 5714, 5715, and 5780 and any
11002 * device behind the EPB cannot support DMA addresses > 40-bit.
11003 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11004 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11005 * do DMA address check in tg3_start_xmit().
11007 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11008 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11009 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11010 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11011 #ifdef CONFIG_HIGHMEM
11012 dma_mask = DMA_64BIT_MASK;
11015 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11017 /* Configure DMA attributes. */
11018 if (dma_mask > DMA_32BIT_MASK) {
11019 err = pci_set_dma_mask(pdev, dma_mask);
11021 dev->features |= NETIF_F_HIGHDMA;
11022 err = pci_set_consistent_dma_mask(pdev,
11025 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11026 "DMA for consistent allocations\n");
11027 goto err_out_iounmap;
11031 if (err || dma_mask == DMA_32BIT_MASK) {
11032 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11034 printk(KERN_ERR PFX "No usable DMA configuration, "
11036 goto err_out_iounmap;
11040 tg3_init_bufmgr_config(tp);
11042 #if TG3_TSO_SUPPORT != 0
11043 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11044 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11046 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11048 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11049 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11050 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11052 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11055 /* TSO is on by default on chips that support hardware TSO.
11056 * Firmware TSO on older chips gives lower performance, so it
11057 * is off by default, but can be enabled using ethtool.
11059 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11060 dev->features |= NETIF_F_TSO;
11064 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11065 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11066 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11067 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11068 tp->rx_pending = 63;
11071 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11072 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11073 tp->pdev_peer = tg3_find_peer(tp);
11075 err = tg3_get_device_address(tp);
11077 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11079 goto err_out_iounmap;
11083 * Reset chip in case UNDI or EFI driver did not shutdown
11084 * DMA self test will enable WDMAC and we'll see (spurious)
11085 * pending DMA on the PCI bus at that point.
11087 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11088 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11089 pci_save_state(tp->pdev);
11090 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11091 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11094 err = tg3_test_dma(tp);
11096 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11097 goto err_out_iounmap;
11100 /* Tigon3 can do ipv4 only... and some chips have buggy
11103 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11104 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
11105 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11107 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11109 /* flow control autonegotiation is default behavior */
11110 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11114 /* Now that we have fully setup the chip, save away a snapshot
11115 * of the PCI config space. We need to restore this after
11116 * GRC_MISC_CFG core clock resets and some resume events.
11118 pci_save_state(tp->pdev);
11120 err = register_netdev(dev);
11122 printk(KERN_ERR PFX "Cannot register net device, "
11124 goto err_out_iounmap;
11127 pci_set_drvdata(pdev, dev);
11129 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11131 tp->board_part_number,
11132 tp->pci_chip_rev_id,
11133 tg3_phy_string(tp),
11134 tg3_bus_string(tp, str),
11135 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11137 for (i = 0; i < 6; i++)
11138 printk("%2.2x%c", dev->dev_addr[i],
11139 i == 5 ? '\n' : ':');
11141 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11142 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11145 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11146 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11147 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11148 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11149 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11150 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11151 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11152 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11153 dev->name, tp->dma_rwctrl,
11154 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11155 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11169 pci_release_regions(pdev);
11171 err_out_disable_pdev:
11172 pci_disable_device(pdev);
11173 pci_set_drvdata(pdev, NULL);
11177 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11179 struct net_device *dev = pci_get_drvdata(pdev);
11182 struct tg3 *tp = netdev_priv(dev);
11184 flush_scheduled_work();
11185 unregister_netdev(dev);
11191 pci_release_regions(pdev);
11192 pci_disable_device(pdev);
11193 pci_set_drvdata(pdev, NULL);
11197 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11199 struct net_device *dev = pci_get_drvdata(pdev);
11200 struct tg3 *tp = netdev_priv(dev);
11203 if (!netif_running(dev))
11206 flush_scheduled_work();
11207 tg3_netif_stop(tp);
11209 del_timer_sync(&tp->timer);
11211 tg3_full_lock(tp, 1);
11212 tg3_disable_ints(tp);
11213 tg3_full_unlock(tp);
11215 netif_device_detach(dev);
11217 tg3_full_lock(tp, 0);
11218 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11219 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11220 tg3_full_unlock(tp);
11222 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11224 tg3_full_lock(tp, 0);
11226 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11229 tp->timer.expires = jiffies + tp->timer_offset;
11230 add_timer(&tp->timer);
11232 netif_device_attach(dev);
11233 tg3_netif_start(tp);
11235 tg3_full_unlock(tp);
11241 static int tg3_resume(struct pci_dev *pdev)
11243 struct net_device *dev = pci_get_drvdata(pdev);
11244 struct tg3 *tp = netdev_priv(dev);
11247 if (!netif_running(dev))
11250 pci_restore_state(tp->pdev);
11252 err = tg3_set_power_state(tp, PCI_D0);
11256 netif_device_attach(dev);
11258 tg3_full_lock(tp, 0);
11260 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11263 tp->timer.expires = jiffies + tp->timer_offset;
11264 add_timer(&tp->timer);
11266 tg3_netif_start(tp);
11268 tg3_full_unlock(tp);
11273 static struct pci_driver tg3_driver = {
11274 .name = DRV_MODULE_NAME,
11275 .id_table = tg3_pci_tbl,
11276 .probe = tg3_init_one,
11277 .remove = __devexit_p(tg3_remove_one),
11278 .suspend = tg3_suspend,
11279 .resume = tg3_resume
11282 static int __init tg3_init(void)
11284 return pci_module_init(&tg3_driver);
11287 static void __exit tg3_cleanup(void)
11289 pci_unregister_driver(&tg3_driver);
11292 module_init(tg3_init);
11293 module_exit(tg3_cleanup);